18#ifdef DEAL_II_WITH_SCALAPACK
22# include <deal.II/base/mpi.templates.h>
24# include <deal.II/lac/scalapack.templates.h>
26# ifdef DEAL_II_WITH_HDF5
35# ifdef DEAL_II_WITH_HDF5
37template <
typename number>
79template <
typename NumberType>
83 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &
process_grid,
90 , first_process_column(0)
104template <
typename NumberType>
107 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &
process_grid,
120template <
typename NumberType>
123 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &
process_grid,
129 , first_process_column(0)
131 , submatrix_column(1)
133# ifndef DEAL_II_WITH_HDF5
141 "This function is only available when deal.II is configured with HDF5"));
144 const unsigned int this_mpi_process(
150 if (this_mpi_process == 0)
214template <
typename NumberType>
219 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &
process_grid,
226 ExcMessage(
"Column block size has to be positive."));
230 "Row block size can not be greater than the number of rows of the matrix"));
234 "Column block size can not be greater than the number of columns of the matrix"));
244 if (grid->mpi_process_is_active)
247 n_local_rows =
numroc_(&n_rows,
249 &(grid->this_process_row),
251 &(grid->n_process_rows));
252 n_local_columns =
numroc_(&n_columns,
254 &(grid->this_process_column),
255 &first_process_column,
256 &(grid->n_process_columns));
269 &first_process_column,
270 &(grid->blacs_context),
281 n_local_columns = -1;
282 std::fill(std::begin(descriptor), std::end(descriptor), -1);
288template <
typename NumberType>
292 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &
process_grid,
301template <
typename NumberType>
311template <
typename NumberType>
320template <
typename NumberType>
329template <
typename NumberType>
339 Assert(n_columns ==
int(matrix.n()),
342 if (grid->mpi_process_is_active)
344 for (
int i = 0; i < n_local_rows; ++i)
346 const int glob_i = global_row(i);
347 for (
int j = 0;
j < n_local_columns; ++
j)
349 const int glob_j = global_column(
j);
360template <
typename NumberType>
363 const unsigned int rank)
365 if (n_rows * n_columns == 0)
368 const unsigned int this_mpi_process(
373 ExcMessage(
"All processes have to call routine with identical rank"));
375 ExcMessage(
"All processes have to call routine with identical rank"));
379 if (this_mpi_process ==
rank)
410 const char *order =
"Col";
460 if (this->grid->mpi_process_is_active)
464 this->
values.size() > 0 ? this->
values.data() :
nullptr;
480 &(
this->grid->blacs_context));
495template <
typename NumberType>
499 Assert(n_local_rows >= 0 &&
loc_row <
static_cast<unsigned int>(n_local_rows),
504 &(grid->this_process_row),
506 &(grid->n_process_rows)) -
512template <
typename NumberType>
516 Assert(n_local_columns >= 0 &&
517 loc_column <
static_cast<unsigned int>(n_local_columns),
522 &(grid->this_process_column),
523 &first_process_column,
524 &(grid->n_process_columns)) -
530template <
typename NumberType>
533 const unsigned int rank)
const
535 if (n_rows * n_columns == 0)
538 const unsigned int this_mpi_process(
543 ExcMessage(
"All processes have to call routine with identical rank"));
545 ExcMessage(
"All processes have to call routine with identical rank"));
548 if (this_mpi_process ==
rank)
581 const char *order =
"Col";
634 if (this->grid->mpi_process_is_active)
638 this->
values.size() > 0 ? this->
values.data() :
nullptr;
651 &(
this->grid->blacs_context));
664template <
typename NumberType>
673 Assert(n_columns ==
int(matrix.n()),
677 if (grid->mpi_process_is_active)
679 for (
int i = 0; i < n_local_rows; ++i)
681 const int glob_i = global_row(i);
682 for (
int j = 0;
j < n_local_columns; ++
j)
684 const int glob_j = global_column(
j);
697 for (
unsigned int i = 0; i < matrix.n(); ++i)
698 for (
unsigned int j = i + 1;
j < matrix.m(); ++
j)
701 for (
unsigned int i = 0; i < matrix.n(); ++i)
702 for (
unsigned int j = 0;
j < i; ++
j)
709 for (
unsigned int i = 0; i < matrix.n(); ++i)
710 for (
unsigned int j = i + 1;
j < matrix.m(); ++
j)
711 matrix(i,
j) = matrix(
j, i);
712 else if (uplo ==
'U')
713 for (
unsigned int i = 0; i < matrix.n(); ++i)
714 for (
unsigned int j = 0;
j < i; ++
j)
715 matrix(i,
j) = matrix(
j, i);
721template <
typename NumberType>
725 const std::pair<unsigned int, unsigned int> &
offset_A,
726 const std::pair<unsigned int, unsigned int> &
offset_B,
727 const std::pair<unsigned int, unsigned int> &
submatrix_size)
const
745 B.grid->mpi_communicator,
749 ExcMessage(
"Matrix A and B must have a common MPI Communicator"));
758 const char *order =
"Col";
810 if (this->
values.size() != 0)
811 loc_vals_A = this->
values.data();
813 for (
unsigned int i = 0; i <
desc_A.size(); ++i)
814 desc_A[i] = this->descriptor[i];
824 for (
unsigned int i = 0; i <
desc_B.size(); ++i)
850template <
typename NumberType>
858 if (this->grid->mpi_process_is_active)
860 this->descriptor[0] == 1,
862 "Copying of ScaLAPACK matrices only implemented for dense matrices"));
863 if (
dest.grid->mpi_process_is_active)
865 dest.descriptor[0] == 1,
867 "Copying of ScaLAPACK matrices only implemented for dense matrices"));
875 if ((this->grid !=
dest.grid) || (row_block_size !=
dest.row_block_size) ||
876 (column_block_size !=
dest.column_block_size))
917 const char *order =
"Col";
929 if (this->grid->mpi_process_is_active && (
this->
values.size() > 0))
933 "source: process is active but local matrix empty"));
934 loc_vals_source = this->
values.data();
936 if (dest.grid->mpi_process_is_active && (
dest.
values.size() > 0))
941 "destination: process is active but local matrix empty"));
952 &
dest.submatrix_column,
969 if (this->grid->mpi_process_is_active)
973 dest.property = property;
978template <
typename NumberType>
988template <
typename NumberType>
991 const NumberType
alpha,
992 const NumberType
beta,
999 Assert(column_block_size ==
B.row_block_size,
1001 Assert(row_block_size ==
B.column_block_size,
1007 Assert(n_columns ==
B.n_columns,
1009 Assert(column_block_size ==
B.column_block_size,
1011 Assert(row_block_size ==
B.row_block_size,
1015 ExcMessage(
"The matrices A and B need to have the same process grid"));
1017 if (this->grid->mpi_process_is_active)
1021 (this->
values.size() > 0) ? this->
values.data() :
nullptr;
1022 const NumberType *
B_loc =
1031 &
B.submatrix_column,
1044template <
typename NumberType>
1049 add(
B, 1, a,
false);
1054template <
typename NumberType>
1064template <
typename NumberType>
1074 ExcMessage(
"The matrices A and B need to have the same process grid"));
1076 ExcMessage(
"The matrices B and C need to have the same process grid"));
1082 Assert(this->n_columns ==
B.n_rows,
1084 Assert(this->n_rows == C.n_rows,
1086 Assert(
B.n_columns == C.n_columns,
1088 Assert(this->row_block_size == C.row_block_size,
1090 Assert(this->column_block_size ==
B.row_block_size,
1092 Assert(
B.column_block_size == C.column_block_size,
1097 Assert(this->n_rows ==
B.n_rows,
1099 Assert(this->n_columns == C.n_rows,
1101 Assert(
B.n_columns == C.n_columns,
1103 Assert(this->column_block_size == C.row_block_size,
1105 Assert(this->row_block_size ==
B.row_block_size,
1107 Assert(
B.column_block_size == C.column_block_size,
1112 Assert(this->n_columns ==
B.n_columns,
1114 Assert(this->n_rows == C.n_rows,
1116 Assert(
B.n_rows == C.n_columns,
1118 Assert(this->row_block_size == C.row_block_size,
1120 Assert(this->column_block_size ==
B.column_block_size,
1122 B.column_block_size));
1123 Assert(
B.row_block_size == C.column_block_size,
1128 Assert(this->n_rows ==
B.n_columns,
1130 Assert(this->n_columns == C.n_rows,
1132 Assert(
B.n_rows == C.n_columns,
1134 Assert(this->column_block_size == C.row_block_size,
1136 Assert(this->row_block_size ==
B.column_block_size,
1138 Assert(
B.row_block_size == C.column_block_size,
1142 if (this->grid->mpi_process_is_active)
1147 const NumberType *
A_loc =
1148 (this->
values.size() > 0) ? this->
values.data() :
nullptr;
1149 const NumberType *
B_loc =
1153 int n = C.n_columns;
1163 &(this->submatrix_row),
1164 &(this->submatrix_column),
1168 &
B.submatrix_column,
1173 &C.submatrix_column,
1181template <
typename NumberType>
1188 mult(1.,
B, 1., C,
false,
false);
1190 mult(1.,
B, 0, C,
false,
false);
1195template <
typename NumberType>
1202 mult(1.,
B, 1., C,
true,
false);
1204 mult(1.,
B, 0, C,
true,
false);
1209template <
typename NumberType>
1216 mult(1.,
B, 1., C,
false,
true);
1218 mult(1.,
B, 0, C,
false,
true);
1223template <
typename NumberType>
1230 mult(1.,
B, 1., C,
true,
true);
1232 mult(1.,
B, 0, C,
true,
true);
1237template <
typename NumberType>
1244 "Cholesky factorization can be applied to symmetric matrices only."));
1247 "Matrix has to be in Matrix state before calling this function."));
1249 if (grid->mpi_process_is_active)
1270template <
typename NumberType>
1276 "Matrix has to be in Matrix state before calling this function."));
1278 if (grid->mpi_process_is_active)
1285 &(grid->this_process_row),
1287 &(grid->n_process_rows));
1290 &(grid->this_process_row),
1292 &(grid->n_process_rows));
1293 ipiv.resize(
mp + row_block_size);
1311template <
typename NumberType>
1329 if (grid->mpi_process_is_active)
1333 const char diag =
'N';
1358 compute_cholesky_factorization();
1360 compute_lu_factorization();
1362 if (grid->mpi_process_is_active)
1400 lwork =
static_cast<int>(work[0]);
1427template <
typename NumberType>
1428std::vector<NumberType>
1430 const std::pair<unsigned int, unsigned int> &
index_limits,
1437 std::pair<unsigned int, unsigned int> idx =
1442 if (idx.first == 0 && idx.second ==
static_cast<unsigned int>(n_rows - 1))
1450template <
typename NumberType>
1451std::vector<NumberType>
1461 std::pair<unsigned int, unsigned int> indices =
1470template <
typename NumberType>
1471std::vector<NumberType>
1479 "Matrix has to be in Matrix state before calling this function."));
1481 ExcMessage(
"Matrix has to be symmetric for this operation."));
1483 std::lock_guard<std::mutex> lock(mutex);
1498 "Prescribing both the index and value range for the eigenvalues is ambiguous"));
1502 std::unique_ptr<ScaLAPACKMatrix<NumberType>>
eigenvectors =
1504 std::make_unique<ScaLAPACKMatrix<NumberType>>(n_rows,
1507 std::make_unique<ScaLAPACKMatrix<NumberType>>(
1508 grid->n_process_rows, grid->n_process_columns, grid, 1, 1);
1515 std::vector<NumberType>
ev(n_rows);
1517 if (grid->mpi_process_is_active)
1528 NumberType vl = NumberType(),
vu = NumberType();
1534 NumberType
abstol = NumberType();
1541 NumberType
orfac = 0;
1543 std::vector<int>
ifail;
1556 std::vector<NumberType>
gap(n_local_rows * n_local_columns);
1631 ifail.resize(n_rows);
1632 iclustr.resize(2 * grid->n_process_rows * grid->n_process_columns);
1633 gap.resize(grid->n_process_rows * grid->n_process_columns);
1666 lwork =
static_cast<int>(work[0]);
1731 this->
values.swap(eigenvectors->values);
1740 grid->send_to_inactive(&m, 1);
1745 if (!grid->mpi_process_is_active)
1750 grid->send_to_inactive(
ev.data(),
ev.size());
1770template <
typename NumberType>
1771std::vector<NumberType>
1773 const std::pair<unsigned int, unsigned int> &
index_limits,
1780 const std::pair<unsigned int, unsigned int> idx =
1785 if (idx.first == 0 && idx.second ==
static_cast<unsigned int>(n_rows - 1))
1793template <
typename NumberType>
1794std::vector<NumberType>
1802 const std::pair<unsigned int, unsigned int> indices =
1811template <
typename NumberType>
1812std::vector<NumberType>
1820 "Matrix has to be in Matrix state before calling this function."));
1822 ExcMessage(
"Matrix has to be symmetric for this operation."));
1824 std::lock_guard<std::mutex> lock(mutex);
1839 "Prescribing both the index and value range for the eigenvalues is ambiguous"));
1843 std::unique_ptr<ScaLAPACKMatrix<NumberType>>
eigenvectors =
1845 std::make_unique<ScaLAPACKMatrix<NumberType>>(n_rows,
1848 std::make_unique<ScaLAPACKMatrix<NumberType>>(
1849 grid->n_process_rows, grid->n_process_columns, grid, 1, 1);
1855 std::vector<NumberType>
ev(n_rows);
1862 if (grid->mpi_process_is_active)
1873 NumberType vl = NumberType(),
vu = NumberType();
1940 lwork =
static_cast<int>(work[0]);
1976 "psyevr failed to compute all eigenvectors for the selected eigenvalues"));
1982 this->
values.swap(eigenvectors->values);
1991 grid->send_to_inactive(&m, 1);
1996 if (!grid->mpi_process_is_active)
2001 grid->send_to_inactive(
ev.data(),
ev.size());
2021template <
typename NumberType>
2022std::vector<NumberType>
2028 "Matrix has to be in Matrix state before calling this function."));
2029 Assert(row_block_size == column_block_size,
2038 Assert(U->n_rows == U->n_columns,
2040 Assert(row_block_size == U->row_block_size,
2042 Assert(column_block_size == U->column_block_size,
2044 Assert(grid->blacs_context == U->grid->blacs_context,
2053 Assert(row_block_size ==
VT->row_block_size,
2055 Assert(column_block_size ==
VT->column_block_size,
2057 Assert(grid->blacs_context ==
VT->grid->blacs_context,
2059 VT->grid->blacs_context));
2061 std::lock_guard<std::mutex> lock(mutex);
2063 std::vector<NumberType>
sv(
std::min(n_rows, n_columns));
2065 if (grid->mpi_process_is_active)
2091 &U->submatrix_column,
2095 &
VT->submatrix_column,
2102 lwork =
static_cast<int>(work[0]);
2116 &U->submatrix_column,
2120 &
VT->submatrix_column,
2131 grid->send_to_inactive(
sv.data(),
sv.size());
2141template <
typename NumberType>
2147 ExcMessage(
"The matrices A and B need to have the same process grid"));
2150 "Matrix has to be in Matrix state before calling this function."));
2153 "Matrix B has to be in Matrix state before calling this function."));
2166 Assert(row_block_size == column_block_size,
2168 "Use identical block sizes for rows and columns of matrix A"));
2169 Assert(
B.row_block_size ==
B.column_block_size,
2171 "Use identical block sizes for rows and columns of matrix B"));
2172 Assert(row_block_size ==
B.row_block_size,
2174 "Use identical block-cyclic distribution for matrices A and B"));
2176 std::lock_guard<std::mutex> lock(mutex);
2178 if (grid->mpi_process_is_active)
2201 &
B.submatrix_column,
2208 lwork =
static_cast<int>(work[0]);
2221 &
B.submatrix_column,
2233template <
typename NumberType>
2239 "Matrix has to be in Matrix state before calling this function."));
2240 Assert(row_block_size == column_block_size,
2242 "Use identical block sizes for rows and columns of matrix A"));
2246 "input parameter ratio has to be larger than zero and smaller than 1"));
2260 std::vector<NumberType>
sv = this->compute_SVD(&U, &
VT);
2268 unsigned int n_sv = 1;
2272 for (
unsigned int i = 1; i <
sv.size(); ++i)
2298 std::make_pair(0, 0),
2299 std::make_pair(0, 0),
2300 std::make_pair(n_rows,
n_sv));
2302 std::make_pair(0, 0),
2303 std::make_pair(0, 0),
2304 std::make_pair(
n_sv, n_columns));
2307 this->reinit(n_columns,
2313 VT_R.mult(1,
U_R, 0, *
this,
true,
true);
2320template <
typename NumberType>
2323 const NumberType
a_norm)
const
2327 "Matrix has to be in Cholesky state before calling this function."));
2328 std::lock_guard<std::mutex> lock(mutex);
2329 NumberType
rcond = 0.;
2331 if (grid->mpi_process_is_active)
2333 int liwork = n_local_rows;
2357 lwork =
static_cast<int>(std::ceil(work[0]));
2376 grid->send_to_inactive(&
rcond);
2382template <
typename NumberType>
2386 const char type(
'O');
2389 return norm_symmetric(type);
2391 return norm_general(type);
2396template <
typename NumberType>
2400 const char type(
'I');
2403 return norm_symmetric(type);
2405 return norm_general(type);
2410template <
typename NumberType>
2414 const char type(
'F');
2417 return norm_symmetric(type);
2419 return norm_general(type);
2424template <
typename NumberType>
2430 ExcMessage(
"norms can be called in matrix state only."));
2431 std::lock_guard<std::mutex> lock(mutex);
2432 NumberType
res = 0.;
2434 if (grid->mpi_process_is_active)
2438 &(grid->this_process_row),
2440 &(grid->n_process_rows));
2443 &(grid->this_process_column),
2444 &first_process_column,
2445 &(grid->n_process_columns));
2448 &(grid->this_process_row),
2450 &(grid->n_process_rows));
2453 &(grid->this_process_column),
2455 &(grid->n_process_columns));
2461 if (type ==
'O' || type ==
'1')
2463 else if (type ==
'I')
2477 grid->send_to_inactive(&
res);
2483template <
typename NumberType>
2489 ExcMessage(
"norms can be called in matrix state only."));
2491 ExcMessage(
"Matrix has to be symmetric for this operation."));
2492 std::lock_guard<std::mutex> lock(mutex);
2493 NumberType
res = 0.;
2495 if (grid->mpi_process_is_active)
2500 ilcm_(&(grid->n_process_rows), &(grid->n_process_columns));
2501 const int v2 =
lcm / (grid->n_process_rows);
2505 &(grid->this_process_row),
2507 &(grid->n_process_rows));
2510 &(grid->this_process_column),
2511 &first_process_column,
2512 &(grid->n_process_columns));
2515 &(grid->this_process_row),
2517 &(grid->n_process_rows));
2520 &(grid->this_process_column),
2522 &(grid->n_process_columns));
2525 const int ldw = (n_local_rows == n_local_columns) ?
2530 (type ==
'M' || type ==
'F' || type ==
'E') ? 0 : 2 *
Nq0 +
Np0 +
ldw;
2542 grid->send_to_inactive(&
res);
2548# ifdef DEAL_II_WITH_HDF5
2615template <
typename NumberType>
2619 const std::pair<unsigned int, unsigned int> &chunk_size)
const
2621# ifndef DEAL_II_WITH_HDF5
2627 std::pair<unsigned int, unsigned int>
chunks_size_ = chunk_size;
2637 ExcMessage(
"The row chunk size must be larger than 0."));
2640 ExcMessage(
"The column chunk size must be larger than 0."));
2643# ifdef H5_HAVE_PARALLEL
2657template <
typename NumberType>
2661 const std::pair<unsigned int, unsigned int> &chunk_size)
const
2663# ifndef DEAL_II_WITH_HDF5
2679 std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,
2683 const int MB = n_rows,
NB = n_columns;
2689 if (tmp.grid->mpi_process_is_active)
2711 dims[0] = n_columns;
2814template <
typename NumberType>
2818 const std::pair<unsigned int, unsigned int> &chunk_size)
const
2820# ifndef DEAL_II_WITH_HDF5
2826 const unsigned int n_mpi_processes(
2837 std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,
2841 const int MB = n_rows;
2854 const int NB =
std::max(
static_cast<int>(std::ceil(
2855 static_cast<double>(n_columns) / n_mpi_processes)),
2882 dims[0] = tmp.n_columns;
2883 dims[1] = tmp.n_rows;
2913 tmp.grid->mpi_communicator);
2921 tmp.grid->mpi_communicator);
2931 count[0] = tmp.n_local_columns;
2932 count[1] = tmp.n_rows;
2935 hsize_t offset[2] = {0};
2936 for (
unsigned int i = 0; i <
my_rank; ++i)
2951 if (tmp.
values.size() > 0)
2974 if (tmp.grid->this_mpi_process == 0)
3050template <
typename NumberType>
3054# ifndef DEAL_II_WITH_HDF5
3058# ifdef H5_HAVE_PARALLEL
3071template <
typename NumberType>
3075# ifndef DEAL_II_WITH_HDF5
3087 std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,
3091 const int MB = n_rows,
NB = n_columns;
3099 if (tmp.grid->mpi_process_is_active)
3119 "The data type of the matrix to be read does not match the archive"));
3130 static_cast<int>(
dims[0]) == n_columns,
3132 "The number of columns of the matrix does not match the content of the archive"));
3134 static_cast<int>(
dims[1]) == n_rows,
3136 "The number of rows of the matrix does not match the content of the archive"));
3191 state_int =
static_cast<int>(tmp.state);
3234 tmp.grid->send_to_inactive(&
state_int, 1);
3249template <
typename NumberType>
3253# ifndef DEAL_II_WITH_HDF5
3257# ifndef H5_HAVE_PARALLEL
3261 const unsigned int n_mpi_processes(
3271 std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,
3275 const int MB = n_rows;
3277 const int NB =
std::max(
static_cast<int>(std::ceil(
3278 static_cast<double>(n_columns) / n_mpi_processes)),
3313 "The data type of the matrix to be read does not match the archive"));
3326 static_cast<int>(
dims[0]) == n_columns,
3328 "The number of columns of the matrix does not match the content of the archive"));
3330 static_cast<int>(
dims[1]) == n_rows,
3332 "The number of rows of the matrix does not match the content of the archive"));
3343 tmp.grid->mpi_communicator);
3351 tmp.grid->mpi_communicator);
3361 count[0] = tmp.n_local_columns;
3362 count[1] = tmp.n_local_rows;
3364 hsize_t offset[2] = {0};
3365 for (
unsigned int i = 0; i <
my_rank; ++i)
3463 template <
typename NumberType>
3468 Assert(matrix.n() == factors.size(),
3471 for (
unsigned int i = 0; i < matrix.local_n(); ++i)
3473 const NumberType s = factors[matrix.global_column(i)];
3475 for (
unsigned int j = 0;
j < matrix.local_m(); ++
j)
3476 matrix.local_el(
j, i) *= s;
3480 template <
typename NumberType>
3488 for (
unsigned int i = 0; i <
matrix.local_m(); ++i)
3490 const NumberType s = factors[
matrix.global_row(i)];
3492 for (
unsigned int j = 0;
j <
matrix.local_n(); ++
j)
3502template <
typename NumberType>
3503template <
class InputVector>
3507 if (this->grid->mpi_process_is_active)
3513template <
typename NumberType>
3514template <
class InputVector>
3518 if (this->grid->mpi_process_is_active)
3525# include "lac/scalapack.inst"
ArrayView< std::remove_reference_t< typename std::iterator_traits< Iterator >::reference >, MemorySpaceType > make_array_view(const Iterator begin, const Iterator end)
std::vector< NumberType > compute_SVD(ScaLAPACKMatrix< NumberType > *U=nullptr, ScaLAPACKMatrix< NumberType > *VT=nullptr)
std::vector< NumberType > eigenpairs_symmetric_by_value(const std::pair< NumberType, NumberType > &value_limits, const bool compute_eigenvectors)
NumberType frobenius_norm() const
unsigned int pseudoinverse(const NumberType ratio)
std::vector< NumberType > eigenpairs_symmetric_by_value_MRRR(const std::pair< NumberType, NumberType > &value_limits, const bool compute_eigenvectors)
void copy_from(const LAPACKFullMatrix< NumberType > &matrix, const unsigned int rank)
void save_parallel(const std::string &filename, const std::pair< unsigned int, unsigned int > &chunk_size) const
void least_squares(ScaLAPACKMatrix< NumberType > &B, const bool transpose=false)
ScaLAPACKMatrix< NumberType > & operator=(const FullMatrix< NumberType > &)
void Tadd(const NumberType b, const ScaLAPACKMatrix< NumberType > &B)
void mTmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
void add(const ScaLAPACKMatrix< NumberType > &B, const NumberType a=0., const NumberType b=1., const bool transpose_B=false)
LAPACKSupport::State get_state() const
LAPACKSupport::Property get_property() const
void Tmmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
std::vector< NumberType > eigenpairs_symmetric_MRRR(const bool compute_eigenvectors, const std::pair< unsigned int, unsigned int > &index_limits=std::make_pair(numbers::invalid_unsigned_int, numbers::invalid_unsigned_int), const std::pair< NumberType, NumberType > &value_limits=std::make_pair(std::numeric_limits< NumberType >::quiet_NaN(), std::numeric_limits< NumberType >::quiet_NaN()))
void scale_rows(const InputVector &factors)
ScaLAPACKMatrix(const size_type n_rows, const size_type n_columns, const std::shared_ptr< const Utilities::MPI::ProcessGrid > &process_grid, const size_type row_block_size=32, const size_type column_block_size=32, const LAPACKSupport::Property property=LAPACKSupport::Property::general)
void load(const std::string &filename)
void mmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
std::vector< NumberType > eigenpairs_symmetric(const bool compute_eigenvectors, const std::pair< unsigned int, unsigned int > &index_limits=std::make_pair(numbers::invalid_unsigned_int, numbers::invalid_unsigned_int), const std::pair< NumberType, NumberType > &value_limits=std::make_pair(std::numeric_limits< NumberType >::quiet_NaN(), std::numeric_limits< NumberType >::quiet_NaN()))
void save_serial(const std::string &filename, const std::pair< unsigned int, unsigned int > &chunk_size) const
NumberType norm_general(const char type) const
void save(const std::string &filename, const std::pair< unsigned int, unsigned int > &chunk_size=std::make_pair(numbers::invalid_unsigned_int, numbers::invalid_unsigned_int)) const
void load_parallel(const std::string &filename)
NumberType l1_norm() const
void compute_lu_factorization()
NumberType norm_symmetric(const char type) const
void mult(const NumberType b, const ScaLAPACKMatrix< NumberType > &B, const NumberType c, ScaLAPACKMatrix< NumberType > &C, const bool transpose_A=false, const bool transpose_B=false) const
void set_property(const LAPACKSupport::Property property)
void reinit(const size_type n_rows, const size_type n_columns, const std::shared_ptr< const Utilities::MPI::ProcessGrid > &process_grid, const size_type row_block_size=32, const size_type column_block_size=32, const LAPACKSupport::Property property=LAPACKSupport::Property::general)
void load_serial(const std::string &filename)
std::vector< NumberType > eigenpairs_symmetric_by_index_MRRR(const std::pair< unsigned int, unsigned int > &index_limits, const bool compute_eigenvectors)
NumberType reciprocal_condition_number(const NumberType a_norm) const
void TmTmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
std::vector< NumberType > eigenpairs_symmetric_by_index(const std::pair< unsigned int, unsigned int > &index_limits, const bool compute_eigenvectors)
unsigned int global_column(const unsigned int loc_column) const
void copy_to(FullMatrix< NumberType > &matrix) const
unsigned int global_row(const unsigned int loc_row) const
void compute_cholesky_factorization()
void copy_transposed(const ScaLAPACKMatrix< NumberType > &B)
NumberType linfty_norm() const
void scale_columns(const InputVector &factors)
static constexpr unsigned int rank
std::conditional_t< rank_==1, std::array< Number, dim >, std::array< Tensor< rank_ - 1, dim, Number >, dim > > values
void reinit(const size_type size1, const size_type size2, const bool omit_default_initialization=false)
#define DEAL_II_NAMESPACE_OPEN
#define DEAL_II_NAMESPACE_CLOSE
static ::ExceptionBase & ExcIO()
static ::ExceptionBase & ExcErrorCode(std::string arg1, types::blas_int arg2)
#define Assert(cond, exc)
#define AssertIsFinite(number)
#define AssertThrowMPI(error_code)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcNeedsHDF5()
static ::ExceptionBase & ExcIndexRange(std::size_t arg1, std::size_t arg2, std::size_t arg3)
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
#define DEAL_II_ASSERT_UNREACHABLE()
#define DEAL_II_NOT_IMPLEMENTED()
const unsigned int my_rank
std::vector< index_type > data
@ cholesky
Contents is a Cholesky decomposition.
@ lu
Contents is an LU decomposition.
@ matrix
Contents is actually a matrix.
@ unusable
Contents is something useless.
@ inverse_matrix
Contents is the inverse of a matrix.
@ svd
Matrix contains singular value decomposition,.
@ inverse_svd
Matrix is the inverse of a singular value decomposition.
@ eigenvalues
Eigenvalue vector is filled.
@ symmetric
Matrix is symmetric.
@ hessenberg
Matrix is in upper Hessenberg form.
@ diagonal
Matrix is diagonal.
@ upper_triangular
Matrix is upper triangular.
@ lower_triangular
Matrix is lower triangular.
@ general
No special properties.
T sum(const T &t, const MPI_Comm mpi_communicator)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
T max(const T &t, const MPI_Comm mpi_communicator)
T min(const T &t, const MPI_Comm mpi_communicator)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
const MPI_Datatype mpi_type_id_for_type
void free_communicator(MPI_Comm mpi_communicator)
constexpr unsigned int invalid_unsigned_int
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
hid_t hdf5_type_id(const number *)
std::array< std::pair< Number, Tensor< 1, dim, Number > >, dim > eigenvectors(const SymmetricTensor< 2, dim, Number > &T, const SymmetricTensorEigenvectorMethod method=SymmetricTensorEigenvectorMethod::ql_implicit_shifts)