18#ifdef DEAL_II_WITH_SCALAPACK
22# include <deal.II/base/mpi.templates.h>
24# include <deal.II/lac/scalapack.templates.h>
26# ifdef DEAL_II_WITH_HDF5
35# ifdef DEAL_II_WITH_HDF5
37template <
typename number>
49 return H5T_NATIVE_DOUBLE;
55 return H5T_NATIVE_FLOAT;
61 return H5T_NATIVE_INT;
67 return H5T_NATIVE_UINT;
73 return H5T_NATIVE_CHAR;
79template <
typename NumberType>
83 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
90 , first_process_column(0)
104template <
typename NumberType>
107 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
120template <
typename NumberType>
122 const std::string &filename,
123 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
129 , first_process_column(0)
131 , submatrix_column(1)
133# ifndef DEAL_II_WITH_HDF5
141 "This function is only available when deal.II is configured with HDF5"));
144 const unsigned int this_mpi_process(
150 if (this_mpi_process == 0)
155 hid_t file = H5Fopen(filename.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT);
159 hid_t dataset = H5Dopen2(file,
"/matrix", H5P_DEFAULT);
163 hid_t filespace = H5Dget_space(dataset);
166 int rank = H5Sget_simple_extent_ndims(filespace);
169 status = H5Sget_simple_extent_dims(filespace, dims,
nullptr);
178 status = H5Sclose(filespace);
180 status = H5Dclose(dataset);
182 status = H5Fclose(file);
185 int ierr = MPI_Bcast(&
n_rows,
189 process_grid->mpi_communicator);
196 process_grid->mpi_communicator);
207 load(filename.c_str());
214template <
typename NumberType>
219 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
224 Assert(row_block_size_ > 0,
ExcMessage(
"Row block size has to be positive."));
225 Assert(column_block_size_ > 0,
226 ExcMessage(
"Column block size has to be positive."));
228 row_block_size_ <= n_rows_,
230 "Row block size can not be greater than the number of rows of the matrix"));
232 column_block_size_ <= n_columns_,
234 "Column block size can not be greater than the number of columns of the matrix"));
237 property = property_;
240 n_columns = n_columns_;
241 row_block_size = row_block_size_;
242 column_block_size = column_block_size_;
244 if (grid->mpi_process_is_active)
247 n_local_rows = numroc_(&n_rows,
249 &(grid->this_process_row),
251 &(grid->n_process_rows));
252 n_local_columns = numroc_(&n_columns,
254 &(grid->this_process_column),
255 &first_process_column,
256 &(grid->n_process_columns));
260 int lda =
std::max(1, n_local_rows);
263 descinit_(descriptor,
269 &first_process_column,
270 &(grid->blacs_context),
281 n_local_columns = -1;
282 std::fill(std::begin(descriptor), std::end(descriptor), -1);
288template <
typename NumberType>
292 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
296 reinit(
size,
size, process_grid, block_size, block_size, property);
301template <
typename NumberType>
306 property = property_;
311template <
typename NumberType>
320template <
typename NumberType>
329template <
typename NumberType>
339 Assert(n_columns ==
int(matrix.n()),
342 if (grid->mpi_process_is_active)
344 for (
int i = 0; i < n_local_rows; ++i)
346 const int glob_i = global_row(i);
347 for (
int j = 0; j < n_local_columns; ++j)
349 const int glob_j = global_column(j);
350 local_el(i, j) = matrix(glob_i, glob_j);
360template <
typename NumberType>
363 const unsigned int rank)
365 if (n_rows * n_columns == 0)
368 const unsigned int this_mpi_process(
375 "All processes have to call routine with identical rank"));
378 "All processes have to call routine with identical rank"));
382 if (this_mpi_process == rank)
393 MPI_Comm_group(this->grid->mpi_communicator, &group_A);
395 const std::vector<int> ranks(n, rank);
397 MPI_Group_incl(group_A, n, ranks.data(), &group_B);
401 const int ierr = MPI_Comm_create_group(this->grid->mpi_communicator,
406 int n_proc_rows_B = 1, n_proc_cols_B = 1;
407 int this_process_row_B = -1, this_process_column_B = -1;
408 int blacs_context_B = -1;
409 if (MPI_COMM_NULL != communicator_B)
412 blacs_context_B = Csys2blacs_handle(communicator_B);
413 const char *order =
"Col";
414 Cblacs_gridinit(&blacs_context_B, order, n_proc_rows_B, n_proc_cols_B);
415 Cblacs_gridinfo(blacs_context_B,
419 &this_process_column_B);
425 const bool mpi_process_is_active_B =
426 (this_process_row_B >= 0 && this_process_column_B >= 0);
429 std::vector<int> descriptor_B(9, -1);
430 const int first_process_row_B = 0, first_process_col_B = 0;
432 if (mpi_process_is_active_B)
435 int n_local_rows_B = numroc_(&n_rows,
438 &first_process_row_B,
440 int n_local_cols_B = numroc_(&n_columns,
442 &this_process_column_B,
443 &first_process_col_B,
447 (void)n_local_cols_B;
449 int lda =
std::max(1, n_local_rows_B);
451 descinit_(descriptor_B.data(),
456 &first_process_row_B,
457 &first_process_col_B,
463 if (this->grid->mpi_process_is_active)
466 NumberType *loc_vals_A =
467 this->values.size() > 0 ? this->values.data() :
nullptr;
468 const NumberType *loc_vals_B =
469 mpi_process_is_active_B ? &(B(0, 0)) :
nullptr;
483 &(this->grid->blacs_context));
485 if (mpi_process_is_active_B)
486 Cblacs_gridexit(blacs_context_B);
488 MPI_Group_free(&group_A);
489 MPI_Group_free(&group_B);
490 if (MPI_COMM_NULL != communicator_B)
498template <
typename NumberType>
502 Assert(n_local_rows >= 0 && loc_row <
static_cast<unsigned int>(n_local_rows),
504 const int i = loc_row + 1;
507 &(grid->this_process_row),
509 &(grid->n_process_rows)) -
515template <
typename NumberType>
519 Assert(n_local_columns >= 0 &&
520 loc_column <
static_cast<unsigned int>(n_local_columns),
522 const int j = loc_column + 1;
525 &(grid->this_process_column),
526 &first_process_column,
527 &(grid->n_process_columns)) -
533template <
typename NumberType>
536 const unsigned int rank)
const
538 if (n_rows * n_columns == 0)
541 const unsigned int this_mpi_process(
548 "All processes have to call routine with identical rank"));
551 "All processes have to call routine with identical rank"));
554 if (this_mpi_process == rank)
567 MPI_Comm_group(this->grid->mpi_communicator, &group_A);
569 const std::vector<int> ranks(n, rank);
571 MPI_Group_incl(group_A, n, ranks.data(), &group_B);
575 const int ierr = MPI_Comm_create_group(this->grid->mpi_communicator,
580 int n_proc_rows_B = 1, n_proc_cols_B = 1;
581 int this_process_row_B = -1, this_process_column_B = -1;
582 int blacs_context_B = -1;
583 if (MPI_COMM_NULL != communicator_B)
586 blacs_context_B = Csys2blacs_handle(communicator_B);
587 const char *order =
"Col";
588 Cblacs_gridinit(&blacs_context_B, order, n_proc_rows_B, n_proc_cols_B);
589 Cblacs_gridinfo(blacs_context_B,
593 &this_process_column_B);
599 const bool mpi_process_is_active_B =
600 (this_process_row_B >= 0 && this_process_column_B >= 0);
603 std::vector<int> descriptor_B(9, -1);
604 const int first_process_row_B = 0, first_process_col_B = 0;
606 if (mpi_process_is_active_B)
609 int n_local_rows_B = numroc_(&n_rows,
612 &first_process_row_B,
614 int n_local_cols_B = numroc_(&n_columns,
616 &this_process_column_B,
617 &first_process_col_B,
621 (void)n_local_cols_B;
623 int lda =
std::max(1, n_local_rows_B);
626 descinit_(descriptor_B.data(),
631 &first_process_row_B,
632 &first_process_col_B,
640 if (this->grid->mpi_process_is_active)
643 const NumberType *loc_vals_A =
644 this->values.size() > 0 ? this->values.data() :
nullptr;
645 NumberType *loc_vals_B = mpi_process_is_active_B ? &(B(0, 0)) :
nullptr;
657 &(this->grid->blacs_context));
659 if (mpi_process_is_active_B)
660 Cblacs_gridexit(blacs_context_B);
662 MPI_Group_free(&group_A);
663 MPI_Group_free(&group_B);
664 if (MPI_COMM_NULL != communicator_B)
670template <
typename NumberType>
679 Assert(n_columns ==
int(matrix.n()),
683 if (grid->mpi_process_is_active)
685 for (
int i = 0; i < n_local_rows; ++i)
687 const int glob_i = global_row(i);
688 for (
int j = 0; j < n_local_columns; ++j)
690 const int glob_j = global_column(j);
691 matrix(glob_i, glob_j) = local_el(i, j);
703 for (
unsigned int i = 0; i < matrix.n(); ++i)
704 for (
unsigned int j = i + 1; j < matrix.m(); ++j)
707 for (
unsigned int i = 0; i < matrix.n(); ++i)
708 for (
unsigned int j = 0; j < i; ++j)
715 for (
unsigned int i = 0; i < matrix.n(); ++i)
716 for (
unsigned int j = i + 1; j < matrix.m(); ++j)
717 matrix(i, j) = matrix(j, i);
718 else if (uplo ==
'U')
719 for (
unsigned int i = 0; i < matrix.n(); ++i)
720 for (
unsigned int j = 0; j < i; ++j)
721 matrix(i, j) = matrix(j, i);
727template <
typename NumberType>
731 const std::pair<unsigned int, unsigned int> &offset_A,
732 const std::pair<unsigned int, unsigned int> &offset_B,
733 const std::pair<unsigned int, unsigned int> &submatrix_size)
const
736 if (submatrix_size.first == 0 || submatrix_size.second == 0)
749 int ierr, comparison;
750 ierr = MPI_Comm_compare(grid->mpi_communicator,
751 B.
grid->mpi_communicator,
754 Assert(comparison == MPI_IDENT,
755 ExcMessage(
"Matrix A and B must have a common MPI Communicator"));
763 int union_blacs_context = Csys2blacs_handle(this->grid->mpi_communicator);
764 const char *order =
"Col";
765 int union_n_process_rows =
767 int union_n_process_columns = 1;
768 Cblacs_gridinit(&union_blacs_context,
770 union_n_process_rows,
771 union_n_process_columns);
773 int n_grid_rows_A, n_grid_columns_A, my_row_A, my_column_A;
774 Cblacs_gridinfo(this->grid->blacs_context,
781 const bool in_context_A =
782 (my_row_A >= 0 && my_row_A < n_grid_rows_A) &&
783 (my_column_A >= 0 && my_column_A < n_grid_columns_A);
785 int n_grid_rows_B, n_grid_columns_B, my_row_B, my_column_B;
786 Cblacs_gridinfo(B.
grid->blacs_context,
793 const bool in_context_B =
794 (my_row_B >= 0 && my_row_B < n_grid_rows_B) &&
795 (my_column_B >= 0 && my_column_B < n_grid_columns_B);
797 const int n_rows_submatrix = submatrix_size.first;
798 const int n_columns_submatrix = submatrix_size.second;
801 int ia = offset_A.first + 1, ja = offset_A.second + 1;
802 int ib = offset_B.first + 1, jb = offset_B.second + 1;
804 std::array<int, 9> desc_A, desc_B;
806 const NumberType *loc_vals_A =
nullptr;
807 NumberType *loc_vals_B =
nullptr;
816 if (this->values.size() != 0)
817 loc_vals_A = this->values.data();
819 for (
unsigned int i = 0; i < desc_A.size(); ++i)
820 desc_A[i] = this->descriptor[i];
828 loc_vals_B = B.
values.data();
830 for (
unsigned int i = 0; i < desc_B.size(); ++i)
836 pgemr2d(&n_rows_submatrix,
837 &n_columns_submatrix,
846 &union_blacs_context);
851 Cblacs_gridexit(union_blacs_context);
856template <
typename NumberType>
864 if (this->grid->mpi_process_is_active)
866 this->descriptor[0] == 1,
868 "Copying of ScaLAPACK matrices only implemented for dense matrices"));
869 if (dest.
grid->mpi_process_is_active)
873 "Copying of ScaLAPACK matrices only implemented for dense matrices"));
889 MPI_Group group_source, group_dest, group_union;
890 ierr = MPI_Comm_group(this->grid->mpi_communicator, &group_source);
892 ierr = MPI_Comm_group(dest.
grid->mpi_communicator, &group_dest);
894 ierr = MPI_Group_union(group_source, group_dest, &group_union);
911 ierr = MPI_Comm_create_group(MPI_COMM_WORLD,
914 &mpi_communicator_union);
922 int union_blacs_context = Csys2blacs_handle(mpi_communicator_union);
923 const char *order =
"Col";
924 int union_n_process_rows =
926 int union_n_process_columns = 1;
927 Cblacs_gridinit(&union_blacs_context,
929 union_n_process_rows,
930 union_n_process_columns);
932 const NumberType *loc_vals_source =
nullptr;
933 NumberType *loc_vals_dest =
nullptr;
935 if (this->grid->mpi_process_is_active && (this->values.size() > 0))
939 "source: process is active but local matrix empty"));
940 loc_vals_source = this->values.data();
942 if (dest.
grid->mpi_process_is_active && (dest.
values.size() > 0))
947 "destination: process is active but local matrix empty"));
948 loc_vals_dest = dest.
values.data();
960 &union_blacs_context);
962 Cblacs_gridexit(union_blacs_context);
964 if (mpi_communicator_union != MPI_COMM_NULL)
966 ierr = MPI_Group_free(&group_source);
968 ierr = MPI_Group_free(&group_dest);
970 ierr = MPI_Group_free(&group_union);
975 if (this->grid->mpi_process_is_active)
976 dest.
values = this->values;
984template <
typename NumberType>
994template <
typename NumberType>
997 const NumberType alpha,
998 const NumberType beta,
999 const bool transpose_B)
1021 ExcMessage(
"The matrices A and B need to have the same process grid"));
1023 if (this->grid->mpi_process_is_active)
1025 char trans_b = transpose_B ?
'T' :
'N';
1027 (this->values.size() > 0) ? this->values.data() :
nullptr;
1028 const NumberType *B_loc =
1050template <
typename NumberType>
1055 add(B, 1, a,
false);
1060template <
typename NumberType>
1070template <
typename NumberType>
1076 const bool transpose_A,
1077 const bool transpose_B)
const
1080 ExcMessage(
"The matrices A and B need to have the same process grid"));
1082 ExcMessage(
"The matrices B and C need to have the same process grid"));
1086 if (!transpose_A && !transpose_B)
1090 Assert(this->n_rows == C.n_rows,
1094 Assert(this->row_block_size == C.row_block_size,
1101 else if (transpose_A && !transpose_B)
1105 Assert(this->n_columns == C.n_rows,
1109 Assert(this->column_block_size == C.row_block_size,
1116 else if (!transpose_A && transpose_B)
1120 Assert(this->n_rows == C.n_rows,
1124 Assert(this->row_block_size == C.row_block_size,
1136 Assert(this->n_columns == C.n_rows,
1140 Assert(this->column_block_size == C.row_block_size,
1148 if (this->grid->mpi_process_is_active)
1150 char trans_a = transpose_A ?
'T' :
'N';
1151 char trans_b = transpose_B ?
'T' :
'N';
1153 const NumberType *A_loc =
1154 (this->values.size() > 0) ? this->values.data() :
nullptr;
1155 const NumberType *B_loc =
1157 NumberType *C_loc = (C.values.size() > 0) ? C.values.data() :
nullptr;
1159 int n = C.n_columns;
1160 int k = transpose_A ? this->n_rows : this->n_columns;
1169 &(this->submatrix_row),
1170 &(this->submatrix_column),
1179 &C.submatrix_column,
1187template <
typename NumberType>
1191 const bool adding)
const
1194 mult(1., B, 1., C,
false,
false);
1196 mult(1., B, 0, C,
false,
false);
1201template <
typename NumberType>
1205 const bool adding)
const
1208 mult(1., B, 1., C,
true,
false);
1210 mult(1., B, 0, C,
true,
false);
1215template <
typename NumberType>
1219 const bool adding)
const
1222 mult(1., B, 1., C,
false,
true);
1224 mult(1., B, 0, C,
false,
true);
1229template <
typename NumberType>
1233 const bool adding)
const
1236 mult(1., B, 1., C,
true,
true);
1238 mult(1., B, 0, C,
true,
true);
1243template <
typename NumberType>
1250 "Cholesky factorization can be applied to symmetric matrices only."));
1253 "Matrix has to be in Matrix state before calling this function."));
1255 if (grid->mpi_process_is_active)
1258 NumberType *A_loc = this->values.data();
1276template <
typename NumberType>
1282 "Matrix has to be in Matrix state before calling this function."));
1284 if (grid->mpi_process_is_active)
1287 NumberType *A_loc = this->values.data();
1289 const int iarow = indxg2p_(&submatrix_row,
1291 &(grid->this_process_row),
1293 &(grid->n_process_rows));
1294 const int mp = numroc_(&n_rows,
1296 &(grid->this_process_row),
1298 &(grid->n_process_rows));
1299 ipiv.resize(mp + row_block_size);
1317template <
typename NumberType>
1335 if (grid->mpi_process_is_active)
1337 const char uploTriangular =
1339 const char diag =
'N';
1341 NumberType *A_loc = this->values.data();
1342 ptrtri(&uploTriangular,
1364 compute_cholesky_factorization();
1366 compute_lu_factorization();
1368 if (grid->mpi_process_is_active)
1371 NumberType *A_loc = this->values.data();
1388 int lwork = -1, liwork = -1;
1406 lwork =
static_cast<int>(work[0]);
1409 iwork.resize(liwork);
1433template <
typename NumberType>
1434std::vector<NumberType>
1436 const std::pair<unsigned int, unsigned int> &index_limits,
1437 const bool compute_eigenvectors)
1443 std::pair<unsigned int, unsigned int> idx =
1444 std::make_pair(
std::min(index_limits.first, index_limits.second),
1445 std::max(index_limits.first, index_limits.second));
1448 if (idx.first == 0 && idx.second ==
static_cast<unsigned int>(n_rows - 1))
1449 return eigenpairs_symmetric(compute_eigenvectors);
1451 return eigenpairs_symmetric(compute_eigenvectors, idx);
1456template <
typename NumberType>
1457std::vector<NumberType>
1459 const std::pair<NumberType, NumberType> &value_limits,
1460 const bool compute_eigenvectors)
1462 Assert(!std::isnan(value_limits.first),
1464 Assert(!std::isnan(value_limits.second),
1467 std::pair<unsigned int, unsigned int> indices =
1471 return eigenpairs_symmetric(compute_eigenvectors, indices, value_limits);
1476template <
typename NumberType>
1477std::vector<NumberType>
1479 const bool compute_eigenvectors,
1480 const std::pair<unsigned int, unsigned int> &eigenvalue_idx,
1481 const std::pair<NumberType, NumberType> &eigenvalue_limits)
1485 "Matrix has to be in Matrix state before calling this function."));
1487 ExcMessage(
"Matrix has to be symmetric for this operation."));
1489 std::lock_guard<std::mutex> lock(mutex);
1491 const bool use_values = (std::isnan(eigenvalue_limits.first) ||
1492 std::isnan(eigenvalue_limits.second)) ?
1495 const bool use_indices =
1502 !(use_values && use_indices),
1504 "Prescribing both the index and value range for the eigenvalues is ambiguous"));
1508 std::unique_ptr<ScaLAPACKMatrix<NumberType>>
eigenvectors =
1509 compute_eigenvectors ?
1510 std::make_unique<ScaLAPACKMatrix<NumberType>>(n_rows,
1513 std::make_unique<ScaLAPACKMatrix<NumberType>>(
1514 grid->n_process_rows, grid->n_process_columns, grid, 1, 1);
1521 std::vector<NumberType> ev(n_rows);
1523 if (grid->mpi_process_is_active)
1530 char jobz = compute_eigenvectors ?
'V' :
'N';
1533 bool all_eigenpairs =
true;
1534 NumberType vl = NumberType(), vu = NumberType();
1540 NumberType abstol = NumberType();
1547 NumberType orfac = 0;
1549 std::vector<int> ifail;
1556 std::vector<int> iclustr;
1562 std::vector<NumberType> gap(n_local_rows * n_local_columns);
1572 all_eigenpairs =
true;
1577 all_eigenpairs =
false;
1578 vl =
std::min(eigenvalue_limits.first, eigenvalue_limits.second);
1579 vu =
std::max(eigenvalue_limits.first, eigenvalue_limits.second);
1585 all_eigenpairs =
false;
1588 il =
std::min(eigenvalue_idx.first, eigenvalue_idx.second) + 1;
1589 iu =
std::max(eigenvalue_idx.first, eigenvalue_idx.second) + 1;
1591 NumberType *A_loc = this->values.data();
1598 NumberType *eigenvectors_loc =
1599 (compute_eigenvectors ?
eigenvectors->values.data() :
nullptr);
1634 char cmach = compute_eigenvectors ?
'U' :
'S';
1635 plamch(&(this->grid->blacs_context), &cmach, abstol);
1637 ifail.resize(n_rows);
1638 iclustr.resize(2 * grid->n_process_rows * grid->n_process_columns);
1639 gap.resize(grid->n_process_rows * grid->n_process_columns);
1672 lwork =
static_cast<int>(work[0]);
1699 iwork.resize(liwork);
1736 if (compute_eigenvectors)
1740 while (ev.size() >
static_cast<size_type>(m))
1746 grid->send_to_inactive(&m, 1);
1751 if (!grid->mpi_process_is_active)
1756 grid->send_to_inactive(ev.data(), ev.size());
1763 if (compute_eigenvectors)
1776template <
typename NumberType>
1777std::vector<NumberType>
1779 const std::pair<unsigned int, unsigned int> &index_limits,
1780 const bool compute_eigenvectors)
1786 const std::pair<unsigned int, unsigned int> idx =
1787 std::make_pair(
std::min(index_limits.first, index_limits.second),
1788 std::max(index_limits.first, index_limits.second));
1791 if (idx.first == 0 && idx.second ==
static_cast<unsigned int>(n_rows - 1))
1792 return eigenpairs_symmetric_MRRR(compute_eigenvectors);
1794 return eigenpairs_symmetric_MRRR(compute_eigenvectors, idx);
1799template <
typename NumberType>
1800std::vector<NumberType>
1802 const std::pair<NumberType, NumberType> &value_limits,
1803 const bool compute_eigenvectors)
1808 const std::pair<unsigned int, unsigned int> indices =
1812 return eigenpairs_symmetric_MRRR(compute_eigenvectors, indices, value_limits);
1817template <
typename NumberType>
1818std::vector<NumberType>
1820 const bool compute_eigenvectors,
1821 const std::pair<unsigned int, unsigned int> &eigenvalue_idx,
1822 const std::pair<NumberType, NumberType> &eigenvalue_limits)
1826 "Matrix has to be in Matrix state before calling this function."));
1828 ExcMessage(
"Matrix has to be symmetric for this operation."));
1830 std::lock_guard<std::mutex> lock(mutex);
1832 const bool use_values = (std::isnan(eigenvalue_limits.first) ||
1833 std::isnan(eigenvalue_limits.second)) ?
1836 const bool use_indices =
1843 !(use_values && use_indices),
1845 "Prescribing both the index and value range for the eigenvalues is ambiguous"));
1849 std::unique_ptr<ScaLAPACKMatrix<NumberType>>
eigenvectors =
1850 compute_eigenvectors ?
1851 std::make_unique<ScaLAPACKMatrix<NumberType>>(n_rows,
1854 std::make_unique<ScaLAPACKMatrix<NumberType>>(
1855 grid->n_process_rows, grid->n_process_columns, grid, 1, 1);
1861 std::vector<NumberType> ev(n_rows);
1868 if (grid->mpi_process_is_active)
1875 char jobz = compute_eigenvectors ?
'V' :
'N';
1879 NumberType vl = NumberType(), vu = NumberType();
1894 vl =
std::min(eigenvalue_limits.first, eigenvalue_limits.second);
1895 vu =
std::max(eigenvalue_limits.first, eigenvalue_limits.second);
1903 il =
std::min(eigenvalue_idx.first, eigenvalue_idx.second) + 1;
1904 iu =
std::max(eigenvalue_idx.first, eigenvalue_idx.second) + 1;
1906 NumberType *A_loc = this->values.data();
1914 NumberType *eigenvectors_loc =
1915 (compute_eigenvectors ?
eigenvectors->values.data() :
nullptr);
1946 lwork =
static_cast<int>(work[0]);
1949 iwork.resize(liwork);
1978 if (compute_eigenvectors)
1982 "psyevr failed to compute all eigenvectors for the selected eigenvalues"));
1987 if (compute_eigenvectors)
1991 while (ev.size() >
static_cast<size_type>(m))
1997 grid->send_to_inactive(&m, 1);
2002 if (!grid->mpi_process_is_active)
2007 grid->send_to_inactive(ev.data(), ev.size());
2014 if (compute_eigenvectors)
2027template <
typename NumberType>
2028std::vector<NumberType>
2034 "Matrix has to be in Matrix state before calling this function."));
2035 Assert(row_block_size == column_block_size,
2038 const bool left_singular_vectors = (U !=
nullptr) ?
true :
false;
2039 const bool right_singular_vectors = (VT !=
nullptr) ?
true :
false;
2041 if (left_singular_vectors)
2044 Assert(U->n_rows == U->n_columns,
2046 Assert(row_block_size == U->row_block_size,
2048 Assert(column_block_size == U->column_block_size,
2050 Assert(grid->blacs_context == U->grid->blacs_context,
2053 if (right_singular_vectors)
2063 Assert(grid->blacs_context == VT->
grid->blacs_context,
2065 VT->
grid->blacs_context));
2067 std::lock_guard<std::mutex> lock(mutex);
2069 std::vector<NumberType> sv(
std::min(n_rows, n_columns));
2071 if (grid->mpi_process_is_active)
2073 char jobu = left_singular_vectors ?
'V' :
'N';
2074 char jobvt = right_singular_vectors ?
'V' :
'N';
2075 NumberType *A_loc = this->values.data();
2076 NumberType *U_loc = left_singular_vectors ? U->values.data() :
nullptr;
2077 NumberType *VT_loc = right_singular_vectors ? VT->
values.data() :
nullptr;
2097 &U->submatrix_column,
2108 lwork =
static_cast<int>(work[0]);
2122 &U->submatrix_column,
2137 grid->send_to_inactive(sv.data(), sv.size());
2147template <
typename NumberType>
2153 ExcMessage(
"The matrices A and B need to have the same process grid"));
2156 "Matrix has to be in Matrix state before calling this function."));
2159 "Matrix B has to be in Matrix state before calling this function."));
2172 Assert(row_block_size == column_block_size,
2174 "Use identical block sizes for rows and columns of matrix A"));
2177 "Use identical block sizes for rows and columns of matrix B"));
2180 "Use identical block-cyclic distribution for matrices A and B"));
2182 std::lock_guard<std::mutex> lock(mutex);
2184 if (grid->mpi_process_is_active)
2187 NumberType *A_loc = this->values.data();
2188 NumberType *B_loc = B.
values.data();
2214 lwork =
static_cast<int>(work[0]);
2239template <
typename NumberType>
2245 "Matrix has to be in Matrix state before calling this function."));
2246 Assert(row_block_size == column_block_size,
2248 "Use identical block sizes for rows and columns of matrix A"));
2250 ratio > 0. && ratio < 1.,
2252 "input parameter ratio has to be larger than zero and smaller than 1"));
2266 std::vector<NumberType> sv = this->compute_SVD(&U, &VT);
2267 AssertThrow(sv[0] > std::numeric_limits<NumberType>::min(),
2274 unsigned int n_sv = 1;
2275 std::vector<NumberType> inv_sigma;
2276 inv_sigma.push_back(1 / sv[0]);
2278 for (
unsigned int i = 1; i < sv.size(); ++i)
2279 if (sv[i] > sv[0] * ratio)
2282 inv_sigma.push_back(1 / sv[i]);
2304 std::make_pair(0, 0),
2305 std::make_pair(0, 0),
2306 std::make_pair(n_rows, n_sv));
2308 std::make_pair(0, 0),
2309 std::make_pair(0, 0),
2310 std::make_pair(n_sv, n_columns));
2313 this->reinit(n_columns,
2319 VT_R.
mult(1, U_R, 0, *
this,
true,
true);
2326template <
typename NumberType>
2329 const NumberType a_norm)
const
2333 "Matrix has to be in Cholesky state before calling this function."));
2334 std::lock_guard<std::mutex> lock(mutex);
2335 NumberType rcond = 0.;
2337 if (grid->mpi_process_is_active)
2339 int liwork = n_local_rows;
2340 iwork.resize(liwork);
2343 const NumberType *A_loc = this->values.data();
2363 lwork =
static_cast<int>(std::ceil(work[0]));
2382 grid->send_to_inactive(&rcond);
2388template <
typename NumberType>
2392 const char type(
'O');
2395 return norm_symmetric(type);
2397 return norm_general(type);
2402template <
typename NumberType>
2406 const char type(
'I');
2409 return norm_symmetric(type);
2411 return norm_general(type);
2416template <
typename NumberType>
2420 const char type(
'F');
2423 return norm_symmetric(type);
2425 return norm_general(type);
2430template <
typename NumberType>
2436 ExcMessage(
"norms can be called in matrix state only."));
2437 std::lock_guard<std::mutex> lock(mutex);
2438 NumberType res = 0.;
2440 if (grid->mpi_process_is_active)
2442 const int iarow = indxg2p_(&submatrix_row,
2444 &(grid->this_process_row),
2446 &(grid->n_process_rows));
2447 const int iacol = indxg2p_(&submatrix_column,
2449 &(grid->this_process_column),
2450 &first_process_column,
2451 &(grid->n_process_columns));
2452 const int mp0 = numroc_(&n_rows,
2454 &(grid->this_process_row),
2456 &(grid->n_process_rows));
2457 const int nq0 = numroc_(&n_columns,
2459 &(grid->this_process_column),
2461 &(grid->n_process_columns));
2467 if (type ==
'O' || type ==
'1')
2469 else if (type ==
'I')
2473 const NumberType *A_loc = this->values.begin();
2483 grid->send_to_inactive(&res);
2489template <
typename NumberType>
2495 ExcMessage(
"norms can be called in matrix state only."));
2497 ExcMessage(
"Matrix has to be symmetric for this operation."));
2498 std::lock_guard<std::mutex> lock(mutex);
2499 NumberType res = 0.;
2501 if (grid->mpi_process_is_active)
2506 ilcm_(&(grid->n_process_rows), &(grid->n_process_columns));
2507 const int v2 = lcm / (grid->n_process_rows);
2509 const int IAROW = indxg2p_(&submatrix_row,
2511 &(grid->this_process_row),
2513 &(grid->n_process_rows));
2514 const int IACOL = indxg2p_(&submatrix_column,
2516 &(grid->this_process_column),
2517 &first_process_column,
2518 &(grid->n_process_columns));
2519 const int Np0 = numroc_(&n_columns ,
2521 &(grid->this_process_row),
2523 &(grid->n_process_rows));
2524 const int Nq0 = numroc_(&n_columns ,
2526 &(grid->this_process_column),
2528 &(grid->n_process_columns));
2530 const int v1 = iceil_(&Np0, &row_block_size);
2531 const int ldw = (n_local_rows == n_local_columns) ?
2533 row_block_size * iceil_(&
v1, &v2);
2536 (type ==
'M' || type ==
'F' || type ==
'E') ? 0 : 2 * Nq0 + Np0 + ldw;
2538 const NumberType *A_loc = this->values.begin();
2548 grid->send_to_inactive(&res);
2554# ifdef DEAL_II_WITH_HDF5
2560 create_HDF5_state_enum_id(hid_t &state_enum_id)
2566 herr_t status = H5Tenum_insert(state_enum_id,
"cholesky", &val);
2569 status = H5Tenum_insert(state_enum_id,
"eigenvalues", &val);
2572 status = H5Tenum_insert(state_enum_id,
"inverse_matrix", &val);
2575 status = H5Tenum_insert(state_enum_id,
"inverse_svd", &val);
2578 status = H5Tenum_insert(state_enum_id,
"lu", &val);
2581 status = H5Tenum_insert(state_enum_id,
"matrix", &val);
2584 status = H5Tenum_insert(state_enum_id,
"svd", &val);
2587 status = H5Tenum_insert(state_enum_id,
"unusable", &val);
2592 create_HDF5_property_enum_id(hid_t &property_enum_id)
2597 herr_t status = H5Tenum_insert(property_enum_id,
"diagonal", &prop);
2600 status = H5Tenum_insert(property_enum_id,
"general", &prop);
2603 status = H5Tenum_insert(property_enum_id,
"hessenberg", &prop);
2606 status = H5Tenum_insert(property_enum_id,
"lower_triangular", &prop);
2609 status = H5Tenum_insert(property_enum_id,
"symmetric", &prop);
2612 status = H5Tenum_insert(property_enum_id,
"upper_triangular", &prop);
2621template <
typename NumberType>
2624 const std::string &filename,
2625 const std::pair<unsigned int, unsigned int> &chunk_size)
const
2627# ifndef DEAL_II_WITH_HDF5
2633 std::pair<unsigned int, unsigned int> chunks_size_ = chunk_size;
2639 chunks_size_.first = n_rows;
2640 chunks_size_.second = 1;
2642 Assert(chunks_size_.first > 0,
2643 ExcMessage(
"The row chunk size must be larger than 0."));
2645 Assert(chunks_size_.second > 0,
2646 ExcMessage(
"The column chunk size must be larger than 0."));
2649# ifdef H5_HAVE_PARALLEL
2651 save_parallel(filename, chunks_size_);
2655 save_serial(filename, chunks_size_);
2663template <
typename NumberType>
2666 const std::string &filename,
2667 const std::pair<unsigned int, unsigned int> &chunk_size)
const
2669# ifndef DEAL_II_WITH_HDF5
2684 const auto column_grid =
2685 std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,
2689 const int MB = n_rows, NB = n_columns;
2695 if (tmp.
grid->mpi_process_is_active)
2701 H5Fcreate(filename.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
2704 hsize_t chunk_dims[2];
2707 chunk_dims[0] = chunk_size.second;
2708 chunk_dims[1] = chunk_size.first;
2709 hid_t data_property = H5Pcreate(H5P_DATASET_CREATE);
2710 status = H5Pset_chunk(data_property, 2, chunk_dims);
2717 dims[0] = n_columns;
2719 hid_t dataspace_id = H5Screate_simple(2, dims,
nullptr);
2723 hid_t dataset_id = H5Dcreate2(file_id,
2733 dataset_id, type_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, tmp.
values.data());
2738 hid_t state_enum_id, property_enum_id;
2739 internal::create_HDF5_state_enum_id(state_enum_id);
2740 internal::create_HDF5_property_enum_id(property_enum_id);
2743 hsize_t dims_state[1];
2745 hid_t state_enum_dataspace = H5Screate_simple(1, dims_state,
nullptr);
2747 hid_t state_enum_dataset = H5Dcreate2(file_id,
2750 state_enum_dataspace,
2755 status = H5Dwrite(state_enum_dataset,
2764 hsize_t dims_property[1];
2765 dims_property[0] = 1;
2766 hid_t property_enum_dataspace =
2767 H5Screate_simple(1, dims_property,
nullptr);
2769 hid_t property_enum_dataset = H5Dcreate2(file_id,
2772 property_enum_dataspace,
2777 status = H5Dwrite(property_enum_dataset,
2786 status = H5Dclose(dataset_id);
2788 status = H5Dclose(state_enum_dataset);
2790 status = H5Dclose(property_enum_dataset);
2794 status = H5Sclose(dataspace_id);
2796 status = H5Sclose(state_enum_dataspace);
2798 status = H5Sclose(property_enum_dataspace);
2802 status = H5Tclose(state_enum_id);
2804 status = H5Tclose(property_enum_id);
2808 status = H5Pclose(data_property);
2812 status = H5Fclose(file_id);
2820template <
typename NumberType>
2823 const std::string &filename,
2824 const std::pair<unsigned int, unsigned int> &chunk_size)
const
2826# ifndef DEAL_II_WITH_HDF5
2832 const unsigned int n_mpi_processes(
2834 MPI_Info info = MPI_INFO_NULL;
2842 const auto column_grid =
2843 std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,
2847 const int MB = n_rows;
2860 const int NB =
std::max(
static_cast<int>(std::ceil(
2861 static_cast<double>(n_columns) / n_mpi_processes)),
2875 hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
2876 status = H5Pset_fapl_mpio(plist_id, tmp.
grid->mpi_communicator, info);
2881 H5Fcreate(filename.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, plist_id);
2882 status = H5Pclose(plist_id);
2891 hid_t filespace = H5Screate_simple(2, dims,
nullptr);
2894 hsize_t chunk_dims[2];
2896 chunk_dims[0] = chunk_size.second;
2897 chunk_dims[1] = chunk_size.first;
2898 plist_id = H5Pcreate(H5P_DATASET_CREATE);
2899 H5Pset_chunk(plist_id, 2, chunk_dims);
2901 hid_t dset_id = H5Dcreate2(
2902 file_id,
"/matrix", type_id, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
2904 status = H5Sclose(filespace);
2907 status = H5Pclose(plist_id);
2911 std::vector<int> proc_n_local_rows(n_mpi_processes),
2912 proc_n_local_columns(n_mpi_processes);
2916 proc_n_local_rows.data(),
2919 tmp.
grid->mpi_communicator);
2924 proc_n_local_columns.data(),
2927 tmp.
grid->mpi_communicator);
2939 hid_t memspace = H5Screate_simple(2, count,
nullptr);
2941 hsize_t offset[2] = {0};
2942 for (
unsigned int i = 0; i <
my_rank; ++i)
2943 offset[0] += proc_n_local_columns[i];
2946 filespace = H5Dget_space(dset_id);
2947 status = H5Sselect_hyperslab(
2948 filespace, H5S_SELECT_SET, offset,
nullptr, count,
nullptr);
2952 plist_id = H5Pcreate(H5P_DATASET_XFER);
2953 status = H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_INDEPENDENT);
2957 if (tmp.
values.size() > 0)
2959 status = H5Dwrite(dset_id, type_id, memspace, filespace, plist_id,
data);
2963 status = H5Dclose(dset_id);
2965 status = H5Sclose(filespace);
2967 status = H5Sclose(memspace);
2969 status = H5Pclose(plist_id);
2971 status = H5Fclose(file_id);
2976 ierr = MPI_Barrier(tmp.
grid->mpi_communicator);
2980 if (tmp.
grid->this_mpi_process == 0)
2983 hid_t file_id_reopen =
2984 H5Fopen(filename.c_str(), H5F_ACC_RDWR, H5P_DEFAULT);
2988 hid_t state_enum_id, property_enum_id;
2989 internal::create_HDF5_state_enum_id(state_enum_id);
2990 internal::create_HDF5_property_enum_id(property_enum_id);
2993 hsize_t dims_state[1];
2995 hid_t state_enum_dataspace = H5Screate_simple(1, dims_state,
nullptr);
2997 hid_t state_enum_dataset = H5Dcreate2(file_id_reopen,
3000 state_enum_dataspace,
3005 status = H5Dwrite(state_enum_dataset,
3014 hsize_t dims_property[1];
3015 dims_property[0] = 1;
3016 hid_t property_enum_dataspace =
3017 H5Screate_simple(1, dims_property,
nullptr);
3019 hid_t property_enum_dataset = H5Dcreate2(file_id_reopen,
3022 property_enum_dataspace,
3027 status = H5Dwrite(property_enum_dataset,
3035 status = H5Dclose(state_enum_dataset);
3037 status = H5Dclose(property_enum_dataset);
3039 status = H5Sclose(state_enum_dataspace);
3041 status = H5Sclose(property_enum_dataspace);
3043 status = H5Tclose(state_enum_id);
3045 status = H5Tclose(property_enum_id);
3047 status = H5Fclose(file_id_reopen);
3056template <
typename NumberType>
3060# ifndef DEAL_II_WITH_HDF5
3064# ifdef H5_HAVE_PARALLEL
3066 load_parallel(filename);
3070 load_serial(filename);
3077template <
typename NumberType>
3081# ifndef DEAL_II_WITH_HDF5
3092 const auto one_grid =
3093 std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,
3097 const int MB = n_rows, NB = n_columns;
3101 int property_int = -1;
3105 if (tmp.
grid->mpi_process_is_active)
3110 hid_t file_id = H5Fopen(filename.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT);
3113 hid_t dataset_id = H5Dopen2(file_id,
"/matrix", H5P_DEFAULT);
3119 hid_t datatype = H5Dget_type(dataset_id);
3120 H5T_class_t t_class_in = H5Tget_class(datatype);
3123 t_class_in == t_class,
3125 "The data type of the matrix to be read does not match the archive"));
3128 hid_t dataspace_id = H5Dget_space(dataset_id);
3130 const int ndims = H5Sget_simple_extent_ndims(dataspace_id);
3134 H5Sget_simple_extent_dims(dataspace_id, dims,
nullptr);
3136 static_cast<int>(dims[0]) == n_columns,
3138 "The number of columns of the matrix does not match the content of the archive"));
3140 static_cast<int>(dims[1]) == n_rows,
3142 "The number of rows of the matrix does not match the content of the archive"));
3145 status = H5Dread(dataset_id,
3155 hid_t state_enum_id, property_enum_id;
3156 internal::create_HDF5_state_enum_id(state_enum_id);
3157 internal::create_HDF5_property_enum_id(property_enum_id);
3160 hid_t dataset_state_id = H5Dopen2(file_id,
"/state", H5P_DEFAULT);
3161 hid_t datatype_state = H5Dget_type(dataset_state_id);
3162 H5T_class_t t_class_state = H5Tget_class(datatype_state);
3165 hid_t dataset_property_id = H5Dopen2(file_id,
"/property", H5P_DEFAULT);
3166 hid_t datatype_property = H5Dget_type(dataset_property_id);
3167 H5T_class_t t_class_property = H5Tget_class(datatype_property);
3171 hid_t dataspace_state = H5Dget_space(dataset_state_id);
3172 hid_t dataspace_property = H5Dget_space(dataset_property_id);
3174 const int ndims_state = H5Sget_simple_extent_ndims(dataspace_state);
3176 const int ndims_property = H5Sget_simple_extent_ndims(dataspace_property);
3179 hsize_t dims_state[1];
3180 H5Sget_simple_extent_dims(dataspace_state, dims_state,
nullptr);
3182 hsize_t dims_property[1];
3183 H5Sget_simple_extent_dims(dataspace_property, dims_property,
nullptr);
3187 status = H5Dread(dataset_state_id,
3197 state_int =
static_cast<int>(tmp.
state);
3199 status = H5Dread(dataset_property_id,
3209 property_int =
static_cast<int>(tmp.
property);
3212 status = H5Sclose(dataspace_id);
3214 status = H5Sclose(dataspace_state);
3216 status = H5Sclose(dataspace_property);
3220 status = H5Tclose(datatype);
3222 status = H5Tclose(state_enum_id);
3224 status = H5Tclose(property_enum_id);
3228 status = H5Dclose(dataset_state_id);
3230 status = H5Dclose(dataset_id);
3232 status = H5Dclose(dataset_property_id);
3236 status = H5Fclose(file_id);
3240 tmp.
grid->send_to_inactive(&state_int, 1);
3243 tmp.
grid->send_to_inactive(&property_int, 1);
3255template <
typename NumberType>
3259# ifndef DEAL_II_WITH_HDF5
3263# ifndef H5_HAVE_PARALLEL
3267 const unsigned int n_mpi_processes(
3269 MPI_Info info = MPI_INFO_NULL;
3276 const auto column_grid =
3277 std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,
3281 const int MB = n_rows;
3283 const int NB =
std::max(
static_cast<int>(std::ceil(
3284 static_cast<double>(n_columns) / n_mpi_processes)),
3295 hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
3296 status = H5Pset_fapl_mpio(plist_id, tmp.
grid->mpi_communicator, info);
3301 hid_t file_id = H5Fopen(filename.c_str(), H5F_ACC_RDONLY, plist_id);
3302 status = H5Pclose(plist_id);
3306 hid_t dataset_id = H5Dopen2(file_id,
"/matrix", H5P_DEFAULT);
3313 hid_t datatype_inp = H5Dget_type(dataset_id);
3314 H5T_class_t t_class_inp = H5Tget_class(datatype_inp);
3315 H5T_class_t t_class = H5Tget_class(datatype);
3317 t_class_inp == t_class,
3319 "The data type of the matrix to be read does not match the archive"));
3323 hid_t dataspace_id = H5Dget_space(dataset_id);
3325 const int ndims = H5Sget_simple_extent_ndims(dataspace_id);
3329 status = H5Sget_simple_extent_dims(dataspace_id, dims,
nullptr);
3332 static_cast<int>(dims[0]) == n_columns,
3334 "The number of columns of the matrix does not match the content of the archive"));
3336 static_cast<int>(dims[1]) == n_rows,
3338 "The number of rows of the matrix does not match the content of the archive"));
3341 std::vector<int> proc_n_local_rows(n_mpi_processes),
3342 proc_n_local_columns(n_mpi_processes);
3346 proc_n_local_rows.data(),
3349 tmp.
grid->mpi_communicator);
3354 proc_n_local_columns.data(),
3357 tmp.
grid->mpi_communicator);
3370 hsize_t offset[2] = {0};
3371 for (
unsigned int i = 0; i <
my_rank; ++i)
3372 offset[0] += proc_n_local_columns[i];
3375 status = H5Sselect_hyperslab(
3376 dataspace_id, H5S_SELECT_SET, offset,
nullptr, count,
nullptr);
3380 hid_t memspace = H5Screate_simple(2, count,
nullptr);
3384 H5Dread(dataset_id, datatype, memspace, dataspace_id, H5P_DEFAULT,
data);
3388 hid_t state_enum_id, property_enum_id;
3389 internal::create_HDF5_state_enum_id(state_enum_id);
3390 internal::create_HDF5_property_enum_id(property_enum_id);
3393 hid_t dataset_state_id = H5Dopen2(file_id,
"/state", H5P_DEFAULT);
3394 hid_t datatype_state = H5Dget_type(dataset_state_id);
3395 H5T_class_t t_class_state = H5Tget_class(datatype_state);
3398 hid_t dataset_property_id = H5Dopen2(file_id,
"/property", H5P_DEFAULT);
3399 hid_t datatype_property = H5Dget_type(dataset_property_id);
3400 H5T_class_t t_class_property = H5Tget_class(datatype_property);
3404 hid_t dataspace_state = H5Dget_space(dataset_state_id);
3405 hid_t dataspace_property = H5Dget_space(dataset_property_id);
3407 const int ndims_state = H5Sget_simple_extent_ndims(dataspace_state);
3409 const int ndims_property = H5Sget_simple_extent_ndims(dataspace_property);
3412 hsize_t dims_state[1];
3413 H5Sget_simple_extent_dims(dataspace_state, dims_state,
nullptr);
3415 hsize_t dims_property[1];
3416 H5Sget_simple_extent_dims(dataspace_property, dims_property,
nullptr);
3421 dataset_state_id, state_enum_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, &tmp.
state);
3424 status = H5Dread(dataset_property_id,
3433 status = H5Sclose(memspace);
3435 status = H5Dclose(dataset_id);
3437 status = H5Dclose(dataset_state_id);
3439 status = H5Dclose(dataset_property_id);
3441 status = H5Sclose(dataspace_id);
3443 status = H5Sclose(dataspace_state);
3445 status = H5Sclose(dataspace_property);
3449 status = H5Tclose(state_enum_id);
3451 status = H5Tclose(property_enum_id);
3453 status = H5Fclose(file_id);
3469 template <
typename NumberType>
3477 for (
unsigned int i = 0; i < matrix.local_n(); ++i)
3479 const NumberType s = factors[matrix.global_column(i)];
3481 for (
unsigned int j = 0; j < matrix.local_m(); ++j)
3482 matrix.local_el(j, i) *= s;
3486 template <
typename NumberType>
3494 for (
unsigned int i = 0; i <
matrix.local_m(); ++i)
3496 const NumberType s = factors[
matrix.global_row(i)];
3498 for (
unsigned int j = 0; j <
matrix.local_n(); ++j)
3499 matrix.local_el(i, j) *= s;
3508template <
typename NumberType>
3509template <
class InputVector>
3513 if (this->grid->mpi_process_is_active)
3519template <
typename NumberType>
3520template <
class InputVector>
3524 if (this->grid->mpi_process_is_active)
3531# include "lac/scalapack.inst"
ArrayView< std::remove_reference_t< typename std::iterator_traits< Iterator >::reference >, MemorySpaceType > make_array_view(const Iterator begin, const Iterator end)
std::vector< NumberType > compute_SVD(ScaLAPACKMatrix< NumberType > *U=nullptr, ScaLAPACKMatrix< NumberType > *VT=nullptr)
std::vector< NumberType > eigenpairs_symmetric_by_value(const std::pair< NumberType, NumberType > &value_limits, const bool compute_eigenvectors)
NumberType frobenius_norm() const
unsigned int pseudoinverse(const NumberType ratio)
std::vector< NumberType > eigenpairs_symmetric_by_value_MRRR(const std::pair< NumberType, NumberType > &value_limits, const bool compute_eigenvectors)
void copy_from(const LAPACKFullMatrix< NumberType > &matrix, const unsigned int rank)
void save_parallel(const std::string &filename, const std::pair< unsigned int, unsigned int > &chunk_size) const
void least_squares(ScaLAPACKMatrix< NumberType > &B, const bool transpose=false)
ScaLAPACKMatrix< NumberType > & operator=(const FullMatrix< NumberType > &)
void Tadd(const NumberType b, const ScaLAPACKMatrix< NumberType > &B)
void mTmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
void add(const ScaLAPACKMatrix< NumberType > &B, const NumberType a=0., const NumberType b=1., const bool transpose_B=false)
LAPACKSupport::State get_state() const
LAPACKSupport::Property get_property() const
void Tmmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
std::vector< NumberType > eigenpairs_symmetric_MRRR(const bool compute_eigenvectors, const std::pair< unsigned int, unsigned int > &index_limits=std::make_pair(numbers::invalid_unsigned_int, numbers::invalid_unsigned_int), const std::pair< NumberType, NumberType > &value_limits=std::make_pair(std::numeric_limits< NumberType >::quiet_NaN(), std::numeric_limits< NumberType >::quiet_NaN()))
void scale_rows(const InputVector &factors)
std::shared_ptr< const Utilities::MPI::ProcessGrid > grid
ScaLAPACKMatrix(const size_type n_rows, const size_type n_columns, const std::shared_ptr< const Utilities::MPI::ProcessGrid > &process_grid, const size_type row_block_size=32, const size_type column_block_size=32, const LAPACKSupport::Property property=LAPACKSupport::Property::general)
void load(const std::string &filename)
const int submatrix_column
void mmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
std::vector< NumberType > eigenpairs_symmetric(const bool compute_eigenvectors, const std::pair< unsigned int, unsigned int > &index_limits=std::make_pair(numbers::invalid_unsigned_int, numbers::invalid_unsigned_int), const std::pair< NumberType, NumberType > &value_limits=std::make_pair(std::numeric_limits< NumberType >::quiet_NaN(), std::numeric_limits< NumberType >::quiet_NaN()))
void save_serial(const std::string &filename, const std::pair< unsigned int, unsigned int > &chunk_size) const
NumberType norm_general(const char type) const
void save(const std::string &filename, const std::pair< unsigned int, unsigned int > &chunk_size=std::make_pair(numbers::invalid_unsigned_int, numbers::invalid_unsigned_int)) const
void load_parallel(const std::string &filename)
NumberType l1_norm() const
void compute_lu_factorization()
NumberType norm_symmetric(const char type) const
void mult(const NumberType b, const ScaLAPACKMatrix< NumberType > &B, const NumberType c, ScaLAPACKMatrix< NumberType > &C, const bool transpose_A=false, const bool transpose_B=false) const
LAPACKSupport::Property property
void set_property(const LAPACKSupport::Property property)
void reinit(const size_type n_rows, const size_type n_columns, const std::shared_ptr< const Utilities::MPI::ProcessGrid > &process_grid, const size_type row_block_size=32, const size_type column_block_size=32, const LAPACKSupport::Property property=LAPACKSupport::Property::general)
void load_serial(const std::string &filename)
std::vector< NumberType > eigenpairs_symmetric_by_index_MRRR(const std::pair< unsigned int, unsigned int > &index_limits, const bool compute_eigenvectors)
NumberType reciprocal_condition_number(const NumberType a_norm) const
LAPACKSupport::State state
void TmTmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
std::vector< NumberType > eigenpairs_symmetric_by_index(const std::pair< unsigned int, unsigned int > &index_limits, const bool compute_eigenvectors)
unsigned int global_column(const unsigned int loc_column) const
void copy_to(FullMatrix< NumberType > &matrix) const
unsigned int global_row(const unsigned int loc_row) const
void compute_cholesky_factorization()
void copy_transposed(const ScaLAPACKMatrix< NumberType > &B)
NumberType linfty_norm() const
void scale_columns(const InputVector &factors)
AlignedVector< T > values
void reinit(const size_type size1, const size_type size2, const bool omit_default_initialization=false)
#define DEAL_II_NAMESPACE_OPEN
constexpr bool running_in_debug_mode()
#define DEAL_II_NAMESPACE_CLOSE
#define DEAL_II_ASSERT_UNREACHABLE()
#define DEAL_II_NOT_IMPLEMENTED()
static ::ExceptionBase & ExcIO()
static ::ExceptionBase & ExcErrorCode(std::string arg1, types::blas_int arg2)
#define Assert(cond, exc)
#define AssertIsFinite(number)
#define AssertThrowMPI(error_code)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcNeedsHDF5()
static ::ExceptionBase & ExcIndexRange(std::size_t arg1, std::size_t arg2, std::size_t arg3)
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
const unsigned int my_rank
std::vector< index_type > data
@ cholesky
Contents is a Cholesky decomposition.
@ lu
Contents is an LU decomposition.
@ matrix
Contents is actually a matrix.
@ unusable
Contents is something useless.
@ inverse_matrix
Contents is the inverse of a matrix.
@ svd
Matrix contains singular value decomposition,.
@ inverse_svd
Matrix is the inverse of a singular value decomposition.
@ eigenvalues
Eigenvalue vector is filled.
@ symmetric
Matrix is symmetric.
@ hessenberg
Matrix is in upper Hessenberg form.
@ diagonal
Matrix is diagonal.
@ upper_triangular
Matrix is upper triangular.
@ lower_triangular
Matrix is lower triangular.
@ general
No special properties.
T sum(const T &t, const MPI_Comm mpi_communicator)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
T max(const T &t, const MPI_Comm mpi_communicator)
T min(const T &t, const MPI_Comm mpi_communicator)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
const MPI_Datatype mpi_type_id_for_type
void free_communicator(MPI_Comm mpi_communicator)
constexpr unsigned int invalid_unsigned_int
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
hid_t hdf5_type_id(const number *)
std::array< std::pair< Number, Tensor< 1, dim, Number > >, dim > eigenvectors(const SymmetricTensor< 2, dim, Number > &T, const SymmetricTensorEigenvectorMethod method=SymmetricTensorEigenvectorMethod::ql_implicit_shifts)