18#ifdef DEAL_II_WITH_SCALAPACK
22# include <deal.II/base/mpi.templates.h>
24# include <deal.II/lac/scalapack.templates.h>
26# ifdef DEAL_II_WITH_HDF5
35# ifdef DEAL_II_WITH_HDF5
37template <
typename number>
49 return H5T_NATIVE_DOUBLE;
55 return H5T_NATIVE_FLOAT;
61 return H5T_NATIVE_INT;
67 return H5T_NATIVE_UINT;
73 return H5T_NATIVE_CHAR;
79template <
typename NumberType>
83 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
90 , first_process_column(0)
104template <
typename NumberType>
107 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
120template <
typename NumberType>
122 const std::string &filename,
123 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
129 , first_process_column(0)
131 , submatrix_column(1)
133# ifndef DEAL_II_WITH_HDF5
141 "This function is only available when deal.II is configured with HDF5"));
144 const unsigned int this_mpi_process(
150 if (this_mpi_process == 0)
155 hid_t file = H5Fopen(filename.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT);
159 hid_t dataset = H5Dopen2(file,
"/matrix", H5P_DEFAULT);
163 hid_t filespace = H5Dget_space(dataset);
166 int rank = H5Sget_simple_extent_ndims(filespace);
169 status = H5Sget_simple_extent_dims(filespace, dims,
nullptr);
178 status = H5Sclose(filespace);
180 status = H5Dclose(dataset);
182 status = H5Fclose(file);
185 int ierr = MPI_Bcast(&
n_rows,
189 process_grid->mpi_communicator);
196 process_grid->mpi_communicator);
207 load(filename.c_str());
214template <
typename NumberType>
219 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
224 Assert(row_block_size_ > 0,
ExcMessage(
"Row block size has to be positive."));
225 Assert(column_block_size_ > 0,
226 ExcMessage(
"Column block size has to be positive."));
228 row_block_size_ <= n_rows_,
230 "Row block size can not be greater than the number of rows of the matrix"));
232 column_block_size_ <= n_columns_,
234 "Column block size can not be greater than the number of columns of the matrix"));
237 property = property_;
240 n_columns = n_columns_;
241 row_block_size = row_block_size_;
242 column_block_size = column_block_size_;
244 if (grid->mpi_process_is_active)
247 n_local_rows = numroc_(&n_rows,
249 &(grid->this_process_row),
251 &(grid->n_process_rows));
252 n_local_columns = numroc_(&n_columns,
254 &(grid->this_process_column),
255 &first_process_column,
256 &(grid->n_process_columns));
260 int lda =
std::max(1, n_local_rows);
263 descinit_(descriptor,
269 &first_process_column,
270 &(grid->blacs_context),
281 n_local_columns = -1;
282 std::fill(std::begin(descriptor), std::end(descriptor), -1);
288template <
typename NumberType>
292 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
296 reinit(
size,
size, process_grid, block_size, block_size, property);
301template <
typename NumberType>
306 property = property_;
311template <
typename NumberType>
320template <
typename NumberType>
329template <
typename NumberType>
339 Assert(n_columns ==
int(matrix.n()),
342 if (grid->mpi_process_is_active)
344 for (
int i = 0; i < n_local_rows; ++i)
346 const int glob_i = global_row(i);
347 for (
int j = 0; j < n_local_columns; ++j)
349 const int glob_j = global_column(j);
350 local_el(i, j) = matrix(glob_i, glob_j);
360template <
typename NumberType>
363 const unsigned int rank)
365 if (n_rows * n_columns == 0)
368 const unsigned int this_mpi_process(
373 ExcMessage(
"All processes have to call routine with identical rank"));
375 ExcMessage(
"All processes have to call routine with identical rank"));
379 if (this_mpi_process == rank)
390 MPI_Comm_group(this->grid->mpi_communicator, &group_A);
392 const std::vector<int> ranks(n, rank);
394 MPI_Group_incl(group_A, n, ranks.data(), &group_B);
398 const int ierr = MPI_Comm_create_group(this->grid->mpi_communicator,
403 int n_proc_rows_B = 1, n_proc_cols_B = 1;
404 int this_process_row_B = -1, this_process_column_B = -1;
405 int blacs_context_B = -1;
406 if (MPI_COMM_NULL != communicator_B)
409 blacs_context_B = Csys2blacs_handle(communicator_B);
410 const char *order =
"Col";
411 Cblacs_gridinit(&blacs_context_B, order, n_proc_rows_B, n_proc_cols_B);
412 Cblacs_gridinfo(blacs_context_B,
416 &this_process_column_B);
422 const bool mpi_process_is_active_B =
423 (this_process_row_B >= 0 && this_process_column_B >= 0);
426 std::vector<int> descriptor_B(9, -1);
427 const int first_process_row_B = 0, first_process_col_B = 0;
429 if (mpi_process_is_active_B)
432 int n_local_rows_B = numroc_(&n_rows,
435 &first_process_row_B,
437 int n_local_cols_B = numroc_(&n_columns,
439 &this_process_column_B,
440 &first_process_col_B,
444 (void)n_local_cols_B;
446 int lda =
std::max(1, n_local_rows_B);
448 descinit_(descriptor_B.data(),
453 &first_process_row_B,
454 &first_process_col_B,
460 if (this->grid->mpi_process_is_active)
463 NumberType *loc_vals_A =
464 this->values.size() > 0 ? this->values.data() :
nullptr;
465 const NumberType *loc_vals_B =
466 mpi_process_is_active_B ? &(B(0, 0)) :
nullptr;
480 &(this->grid->blacs_context));
482 if (mpi_process_is_active_B)
483 Cblacs_gridexit(blacs_context_B);
485 MPI_Group_free(&group_A);
486 MPI_Group_free(&group_B);
487 if (MPI_COMM_NULL != communicator_B)
495template <
typename NumberType>
499 Assert(n_local_rows >= 0 && loc_row <
static_cast<unsigned int>(n_local_rows),
501 const int i = loc_row + 1;
504 &(grid->this_process_row),
506 &(grid->n_process_rows)) -
512template <
typename NumberType>
516 Assert(n_local_columns >= 0 &&
517 loc_column <
static_cast<unsigned int>(n_local_columns),
519 const int j = loc_column + 1;
522 &(grid->this_process_column),
523 &first_process_column,
524 &(grid->n_process_columns)) -
530template <
typename NumberType>
533 const unsigned int rank)
const
535 if (n_rows * n_columns == 0)
538 const unsigned int this_mpi_process(
543 ExcMessage(
"All processes have to call routine with identical rank"));
545 ExcMessage(
"All processes have to call routine with identical rank"));
548 if (this_mpi_process == rank)
561 MPI_Comm_group(this->grid->mpi_communicator, &group_A);
563 const std::vector<int> ranks(n, rank);
565 MPI_Group_incl(group_A, n, ranks.data(), &group_B);
569 const int ierr = MPI_Comm_create_group(this->grid->mpi_communicator,
574 int n_proc_rows_B = 1, n_proc_cols_B = 1;
575 int this_process_row_B = -1, this_process_column_B = -1;
576 int blacs_context_B = -1;
577 if (MPI_COMM_NULL != communicator_B)
580 blacs_context_B = Csys2blacs_handle(communicator_B);
581 const char *order =
"Col";
582 Cblacs_gridinit(&blacs_context_B, order, n_proc_rows_B, n_proc_cols_B);
583 Cblacs_gridinfo(blacs_context_B,
587 &this_process_column_B);
593 const bool mpi_process_is_active_B =
594 (this_process_row_B >= 0 && this_process_column_B >= 0);
597 std::vector<int> descriptor_B(9, -1);
598 const int first_process_row_B = 0, first_process_col_B = 0;
600 if (mpi_process_is_active_B)
603 int n_local_rows_B = numroc_(&n_rows,
606 &first_process_row_B,
608 int n_local_cols_B = numroc_(&n_columns,
610 &this_process_column_B,
611 &first_process_col_B,
615 (void)n_local_cols_B;
617 int lda =
std::max(1, n_local_rows_B);
620 descinit_(descriptor_B.data(),
625 &first_process_row_B,
626 &first_process_col_B,
634 if (this->grid->mpi_process_is_active)
637 const NumberType *loc_vals_A =
638 this->values.size() > 0 ? this->values.data() :
nullptr;
639 NumberType *loc_vals_B = mpi_process_is_active_B ? &(B(0, 0)) :
nullptr;
651 &(this->grid->blacs_context));
653 if (mpi_process_is_active_B)
654 Cblacs_gridexit(blacs_context_B);
656 MPI_Group_free(&group_A);
657 MPI_Group_free(&group_B);
658 if (MPI_COMM_NULL != communicator_B)
664template <
typename NumberType>
673 Assert(n_columns ==
int(matrix.n()),
677 if (grid->mpi_process_is_active)
679 for (
int i = 0; i < n_local_rows; ++i)
681 const int glob_i = global_row(i);
682 for (
int j = 0; j < n_local_columns; ++j)
684 const int glob_j = global_column(j);
685 matrix(glob_i, glob_j) = local_el(i, j);
697 for (
unsigned int i = 0; i < matrix.n(); ++i)
698 for (
unsigned int j = i + 1; j < matrix.m(); ++j)
701 for (
unsigned int i = 0; i < matrix.n(); ++i)
702 for (
unsigned int j = 0; j < i; ++j)
709 for (
unsigned int i = 0; i < matrix.n(); ++i)
710 for (
unsigned int j = i + 1; j < matrix.m(); ++j)
711 matrix(i, j) = matrix(j, i);
712 else if (uplo ==
'U')
713 for (
unsigned int i = 0; i < matrix.n(); ++i)
714 for (
unsigned int j = 0; j < i; ++j)
715 matrix(i, j) = matrix(j, i);
721template <
typename NumberType>
725 const std::pair<unsigned int, unsigned int> &offset_A,
726 const std::pair<unsigned int, unsigned int> &offset_B,
727 const std::pair<unsigned int, unsigned int> &submatrix_size)
const
730 if (submatrix_size.first == 0 || submatrix_size.second == 0)
743 int ierr, comparison;
744 ierr = MPI_Comm_compare(grid->mpi_communicator,
745 B.
grid->mpi_communicator,
748 Assert(comparison == MPI_IDENT,
749 ExcMessage(
"Matrix A and B must have a common MPI Communicator"));
757 int union_blacs_context = Csys2blacs_handle(this->grid->mpi_communicator);
758 const char *order =
"Col";
759 int union_n_process_rows =
761 int union_n_process_columns = 1;
762 Cblacs_gridinit(&union_blacs_context,
764 union_n_process_rows,
765 union_n_process_columns);
767 int n_grid_rows_A, n_grid_columns_A, my_row_A, my_column_A;
768 Cblacs_gridinfo(this->grid->blacs_context,
775 const bool in_context_A =
776 (my_row_A >= 0 && my_row_A < n_grid_rows_A) &&
777 (my_column_A >= 0 && my_column_A < n_grid_columns_A);
779 int n_grid_rows_B, n_grid_columns_B, my_row_B, my_column_B;
780 Cblacs_gridinfo(B.
grid->blacs_context,
787 const bool in_context_B =
788 (my_row_B >= 0 && my_row_B < n_grid_rows_B) &&
789 (my_column_B >= 0 && my_column_B < n_grid_columns_B);
791 const int n_rows_submatrix = submatrix_size.first;
792 const int n_columns_submatrix = submatrix_size.second;
795 int ia = offset_A.first + 1, ja = offset_A.second + 1;
796 int ib = offset_B.first + 1, jb = offset_B.second + 1;
798 std::array<int, 9> desc_A, desc_B;
800 const NumberType *loc_vals_A =
nullptr;
801 NumberType *loc_vals_B =
nullptr;
810 if (this->values.size() != 0)
811 loc_vals_A = this->values.data();
813 for (
unsigned int i = 0; i < desc_A.size(); ++i)
814 desc_A[i] = this->descriptor[i];
822 loc_vals_B = B.
values.data();
824 for (
unsigned int i = 0; i < desc_B.size(); ++i)
830 pgemr2d(&n_rows_submatrix,
831 &n_columns_submatrix,
840 &union_blacs_context);
845 Cblacs_gridexit(union_blacs_context);
850template <
typename NumberType>
858 if (this->grid->mpi_process_is_active)
860 this->descriptor[0] == 1,
862 "Copying of ScaLAPACK matrices only implemented for dense matrices"));
863 if (dest.
grid->mpi_process_is_active)
867 "Copying of ScaLAPACK matrices only implemented for dense matrices"));
883 MPI_Group group_source, group_dest, group_union;
884 ierr = MPI_Comm_group(this->grid->mpi_communicator, &group_source);
886 ierr = MPI_Comm_group(dest.
grid->mpi_communicator, &group_dest);
888 ierr = MPI_Group_union(group_source, group_dest, &group_union);
905 ierr = MPI_Comm_create_group(MPI_COMM_WORLD,
908 &mpi_communicator_union);
916 int union_blacs_context = Csys2blacs_handle(mpi_communicator_union);
917 const char *order =
"Col";
918 int union_n_process_rows =
920 int union_n_process_columns = 1;
921 Cblacs_gridinit(&union_blacs_context,
923 union_n_process_rows,
924 union_n_process_columns);
926 const NumberType *loc_vals_source =
nullptr;
927 NumberType *loc_vals_dest =
nullptr;
929 if (this->grid->mpi_process_is_active && (this->values.size() > 0))
933 "source: process is active but local matrix empty"));
934 loc_vals_source = this->values.data();
936 if (dest.
grid->mpi_process_is_active && (dest.
values.size() > 0))
941 "destination: process is active but local matrix empty"));
942 loc_vals_dest = dest.
values.data();
954 &union_blacs_context);
956 Cblacs_gridexit(union_blacs_context);
958 if (mpi_communicator_union != MPI_COMM_NULL)
960 ierr = MPI_Group_free(&group_source);
962 ierr = MPI_Group_free(&group_dest);
964 ierr = MPI_Group_free(&group_union);
969 if (this->grid->mpi_process_is_active)
970 dest.
values = this->values;
978template <
typename NumberType>
988template <
typename NumberType>
991 const NumberType alpha,
992 const NumberType beta,
993 const bool transpose_B)
1015 ExcMessage(
"The matrices A and B need to have the same process grid"));
1017 if (this->grid->mpi_process_is_active)
1019 char trans_b = transpose_B ?
'T' :
'N';
1021 (this->values.size() > 0) ? this->values.data() :
nullptr;
1022 const NumberType *B_loc =
1044template <
typename NumberType>
1049 add(B, 1, a,
false);
1054template <
typename NumberType>
1064template <
typename NumberType>
1070 const bool transpose_A,
1071 const bool transpose_B)
const
1074 ExcMessage(
"The matrices A and B need to have the same process grid"));
1076 ExcMessage(
"The matrices B and C need to have the same process grid"));
1080 if (!transpose_A && !transpose_B)
1084 Assert(this->n_rows == C.n_rows,
1088 Assert(this->row_block_size == C.row_block_size,
1095 else if (transpose_A && !transpose_B)
1099 Assert(this->n_columns == C.n_rows,
1103 Assert(this->column_block_size == C.row_block_size,
1110 else if (!transpose_A && transpose_B)
1114 Assert(this->n_rows == C.n_rows,
1118 Assert(this->row_block_size == C.row_block_size,
1130 Assert(this->n_columns == C.n_rows,
1134 Assert(this->column_block_size == C.row_block_size,
1142 if (this->grid->mpi_process_is_active)
1144 char trans_a = transpose_A ?
'T' :
'N';
1145 char trans_b = transpose_B ?
'T' :
'N';
1147 const NumberType *A_loc =
1148 (this->values.size() > 0) ? this->values.data() :
nullptr;
1149 const NumberType *B_loc =
1151 NumberType *C_loc = (C.values.size() > 0) ? C.values.data() :
nullptr;
1153 int n = C.n_columns;
1154 int k = transpose_A ? this->n_rows : this->n_columns;
1163 &(this->submatrix_row),
1164 &(this->submatrix_column),
1173 &C.submatrix_column,
1181template <
typename NumberType>
1185 const bool adding)
const
1188 mult(1., B, 1., C,
false,
false);
1190 mult(1., B, 0, C,
false,
false);
1195template <
typename NumberType>
1199 const bool adding)
const
1202 mult(1., B, 1., C,
true,
false);
1204 mult(1., B, 0, C,
true,
false);
1209template <
typename NumberType>
1213 const bool adding)
const
1216 mult(1., B, 1., C,
false,
true);
1218 mult(1., B, 0, C,
false,
true);
1223template <
typename NumberType>
1227 const bool adding)
const
1230 mult(1., B, 1., C,
true,
true);
1232 mult(1., B, 0, C,
true,
true);
1237template <
typename NumberType>
1244 "Cholesky factorization can be applied to symmetric matrices only."));
1247 "Matrix has to be in Matrix state before calling this function."));
1249 if (grid->mpi_process_is_active)
1252 NumberType *A_loc = this->values.data();
1270template <
typename NumberType>
1276 "Matrix has to be in Matrix state before calling this function."));
1278 if (grid->mpi_process_is_active)
1281 NumberType *A_loc = this->values.data();
1283 const int iarow = indxg2p_(&submatrix_row,
1285 &(grid->this_process_row),
1287 &(grid->n_process_rows));
1288 const int mp = numroc_(&n_rows,
1290 &(grid->this_process_row),
1292 &(grid->n_process_rows));
1293 ipiv.resize(mp + row_block_size);
1311template <
typename NumberType>
1329 if (grid->mpi_process_is_active)
1331 const char uploTriangular =
1333 const char diag =
'N';
1335 NumberType *A_loc = this->values.data();
1336 ptrtri(&uploTriangular,
1358 compute_cholesky_factorization();
1360 compute_lu_factorization();
1362 if (grid->mpi_process_is_active)
1365 NumberType *A_loc = this->values.data();
1382 int lwork = -1, liwork = -1;
1400 lwork =
static_cast<int>(work[0]);
1403 iwork.resize(liwork);
1427template <
typename NumberType>
1428std::vector<NumberType>
1430 const std::pair<unsigned int, unsigned int> &index_limits,
1431 const bool compute_eigenvectors)
1437 std::pair<unsigned int, unsigned int> idx =
1438 std::make_pair(
std::min(index_limits.first, index_limits.second),
1439 std::max(index_limits.first, index_limits.second));
1442 if (idx.first == 0 && idx.second ==
static_cast<unsigned int>(n_rows - 1))
1443 return eigenpairs_symmetric(compute_eigenvectors);
1445 return eigenpairs_symmetric(compute_eigenvectors, idx);
1450template <
typename NumberType>
1451std::vector<NumberType>
1453 const std::pair<NumberType, NumberType> &value_limits,
1454 const bool compute_eigenvectors)
1456 Assert(!std::isnan(value_limits.first),
1458 Assert(!std::isnan(value_limits.second),
1461 std::pair<unsigned int, unsigned int> indices =
1465 return eigenpairs_symmetric(compute_eigenvectors, indices, value_limits);
1470template <
typename NumberType>
1471std::vector<NumberType>
1473 const bool compute_eigenvectors,
1474 const std::pair<unsigned int, unsigned int> &eigenvalue_idx,
1475 const std::pair<NumberType, NumberType> &eigenvalue_limits)
1479 "Matrix has to be in Matrix state before calling this function."));
1481 ExcMessage(
"Matrix has to be symmetric for this operation."));
1483 std::lock_guard<std::mutex> lock(mutex);
1485 const bool use_values = (std::isnan(eigenvalue_limits.first) ||
1486 std::isnan(eigenvalue_limits.second)) ?
1489 const bool use_indices =
1496 !(use_values && use_indices),
1498 "Prescribing both the index and value range for the eigenvalues is ambiguous"));
1502 std::unique_ptr<ScaLAPACKMatrix<NumberType>>
eigenvectors =
1503 compute_eigenvectors ?
1504 std::make_unique<ScaLAPACKMatrix<NumberType>>(n_rows,
1507 std::make_unique<ScaLAPACKMatrix<NumberType>>(
1508 grid->n_process_rows, grid->n_process_columns, grid, 1, 1);
1515 std::vector<NumberType> ev(n_rows);
1517 if (grid->mpi_process_is_active)
1524 char jobz = compute_eigenvectors ?
'V' :
'N';
1527 bool all_eigenpairs =
true;
1528 NumberType vl = NumberType(), vu = NumberType();
1534 NumberType abstol = NumberType();
1541 NumberType orfac = 0;
1543 std::vector<int> ifail;
1550 std::vector<int> iclustr;
1556 std::vector<NumberType> gap(n_local_rows * n_local_columns);
1566 all_eigenpairs =
true;
1571 all_eigenpairs =
false;
1572 vl =
std::min(eigenvalue_limits.first, eigenvalue_limits.second);
1573 vu =
std::max(eigenvalue_limits.first, eigenvalue_limits.second);
1579 all_eigenpairs =
false;
1582 il =
std::min(eigenvalue_idx.first, eigenvalue_idx.second) + 1;
1583 iu =
std::max(eigenvalue_idx.first, eigenvalue_idx.second) + 1;
1585 NumberType *A_loc = this->values.data();
1592 NumberType *eigenvectors_loc =
1593 (compute_eigenvectors ?
eigenvectors->values.data() :
nullptr);
1628 char cmach = compute_eigenvectors ?
'U' :
'S';
1629 plamch(&(this->grid->blacs_context), &cmach, abstol);
1631 ifail.resize(n_rows);
1632 iclustr.resize(2 * grid->n_process_rows * grid->n_process_columns);
1633 gap.resize(grid->n_process_rows * grid->n_process_columns);
1666 lwork =
static_cast<int>(work[0]);
1693 iwork.resize(liwork);
1730 if (compute_eigenvectors)
1734 while (ev.size() >
static_cast<size_type>(m))
1740 grid->send_to_inactive(&m, 1);
1745 if (!grid->mpi_process_is_active)
1750 grid->send_to_inactive(ev.data(), ev.size());
1757 if (compute_eigenvectors)
1770template <
typename NumberType>
1771std::vector<NumberType>
1773 const std::pair<unsigned int, unsigned int> &index_limits,
1774 const bool compute_eigenvectors)
1780 const std::pair<unsigned int, unsigned int> idx =
1781 std::make_pair(
std::min(index_limits.first, index_limits.second),
1782 std::max(index_limits.first, index_limits.second));
1785 if (idx.first == 0 && idx.second ==
static_cast<unsigned int>(n_rows - 1))
1786 return eigenpairs_symmetric_MRRR(compute_eigenvectors);
1788 return eigenpairs_symmetric_MRRR(compute_eigenvectors, idx);
1793template <
typename NumberType>
1794std::vector<NumberType>
1796 const std::pair<NumberType, NumberType> &value_limits,
1797 const bool compute_eigenvectors)
1802 const std::pair<unsigned int, unsigned int> indices =
1806 return eigenpairs_symmetric_MRRR(compute_eigenvectors, indices, value_limits);
1811template <
typename NumberType>
1812std::vector<NumberType>
1814 const bool compute_eigenvectors,
1815 const std::pair<unsigned int, unsigned int> &eigenvalue_idx,
1816 const std::pair<NumberType, NumberType> &eigenvalue_limits)
1820 "Matrix has to be in Matrix state before calling this function."));
1822 ExcMessage(
"Matrix has to be symmetric for this operation."));
1824 std::lock_guard<std::mutex> lock(mutex);
1826 const bool use_values = (std::isnan(eigenvalue_limits.first) ||
1827 std::isnan(eigenvalue_limits.second)) ?
1830 const bool use_indices =
1837 !(use_values && use_indices),
1839 "Prescribing both the index and value range for the eigenvalues is ambiguous"));
1843 std::unique_ptr<ScaLAPACKMatrix<NumberType>>
eigenvectors =
1844 compute_eigenvectors ?
1845 std::make_unique<ScaLAPACKMatrix<NumberType>>(n_rows,
1848 std::make_unique<ScaLAPACKMatrix<NumberType>>(
1849 grid->n_process_rows, grid->n_process_columns, grid, 1, 1);
1855 std::vector<NumberType> ev(n_rows);
1862 if (grid->mpi_process_is_active)
1869 char jobz = compute_eigenvectors ?
'V' :
'N';
1873 NumberType vl = NumberType(), vu = NumberType();
1888 vl =
std::min(eigenvalue_limits.first, eigenvalue_limits.second);
1889 vu =
std::max(eigenvalue_limits.first, eigenvalue_limits.second);
1897 il =
std::min(eigenvalue_idx.first, eigenvalue_idx.second) + 1;
1898 iu =
std::max(eigenvalue_idx.first, eigenvalue_idx.second) + 1;
1900 NumberType *A_loc = this->values.data();
1908 NumberType *eigenvectors_loc =
1909 (compute_eigenvectors ?
eigenvectors->values.data() :
nullptr);
1940 lwork =
static_cast<int>(work[0]);
1943 iwork.resize(liwork);
1972 if (compute_eigenvectors)
1976 "psyevr failed to compute all eigenvectors for the selected eigenvalues"));
1981 if (compute_eigenvectors)
1985 while (ev.size() >
static_cast<size_type>(m))
1991 grid->send_to_inactive(&m, 1);
1996 if (!grid->mpi_process_is_active)
2001 grid->send_to_inactive(ev.data(), ev.size());
2008 if (compute_eigenvectors)
2021template <
typename NumberType>
2022std::vector<NumberType>
2028 "Matrix has to be in Matrix state before calling this function."));
2029 Assert(row_block_size == column_block_size,
2032 const bool left_singular_vectors = (U !=
nullptr) ?
true :
false;
2033 const bool right_singular_vectors = (VT !=
nullptr) ?
true :
false;
2035 if (left_singular_vectors)
2044 Assert(grid->blacs_context == U->
grid->blacs_context,
2047 if (right_singular_vectors)
2057 Assert(grid->blacs_context == VT->
grid->blacs_context,
2059 VT->
grid->blacs_context));
2061 std::lock_guard<std::mutex> lock(mutex);
2063 std::vector<NumberType> sv(
std::min(n_rows, n_columns));
2065 if (grid->mpi_process_is_active)
2067 char jobu = left_singular_vectors ?
'V' :
'N';
2068 char jobvt = right_singular_vectors ?
'V' :
'N';
2069 NumberType *A_loc = this->values.data();
2070 NumberType *U_loc = left_singular_vectors ? U->
values.data() :
nullptr;
2071 NumberType *VT_loc = right_singular_vectors ? VT->
values.data() :
nullptr;
2102 lwork =
static_cast<int>(work[0]);
2131 grid->send_to_inactive(sv.data(), sv.size());
2141template <
typename NumberType>
2147 ExcMessage(
"The matrices A and B need to have the same process grid"));
2150 "Matrix has to be in Matrix state before calling this function."));
2153 "Matrix B has to be in Matrix state before calling this function."));
2166 Assert(row_block_size == column_block_size,
2168 "Use identical block sizes for rows and columns of matrix A"));
2171 "Use identical block sizes for rows and columns of matrix B"));
2174 "Use identical block-cyclic distribution for matrices A and B"));
2176 std::lock_guard<std::mutex> lock(mutex);
2178 if (grid->mpi_process_is_active)
2181 NumberType *A_loc = this->values.data();
2182 NumberType *B_loc = B.
values.data();
2208 lwork =
static_cast<int>(work[0]);
2233template <
typename NumberType>
2239 "Matrix has to be in Matrix state before calling this function."));
2240 Assert(row_block_size == column_block_size,
2242 "Use identical block sizes for rows and columns of matrix A"));
2244 ratio > 0. && ratio < 1.,
2246 "input parameter ratio has to be larger than zero and smaller than 1"));
2260 std::vector<NumberType> sv = this->compute_SVD(&U, &VT);
2261 AssertThrow(sv[0] > std::numeric_limits<NumberType>::min(),
2268 unsigned int n_sv = 1;
2269 std::vector<NumberType> inv_sigma;
2270 inv_sigma.push_back(1 / sv[0]);
2272 for (
unsigned int i = 1; i < sv.size(); ++i)
2273 if (sv[i] > sv[0] * ratio)
2276 inv_sigma.push_back(1 / sv[i]);
2298 std::make_pair(0, 0),
2299 std::make_pair(0, 0),
2300 std::make_pair(n_rows, n_sv));
2302 std::make_pair(0, 0),
2303 std::make_pair(0, 0),
2304 std::make_pair(n_sv, n_columns));
2307 this->reinit(n_columns,
2313 VT_R.
mult(1, U_R, 0, *
this,
true,
true);
2320template <
typename NumberType>
2323 const NumberType a_norm)
const
2327 "Matrix has to be in Cholesky state before calling this function."));
2328 std::lock_guard<std::mutex> lock(mutex);
2329 NumberType rcond = 0.;
2331 if (grid->mpi_process_is_active)
2333 int liwork = n_local_rows;
2334 iwork.resize(liwork);
2337 const NumberType *A_loc = this->values.data();
2357 lwork =
static_cast<int>(std::ceil(work[0]));
2376 grid->send_to_inactive(&rcond);
2382template <
typename NumberType>
2386 const char type(
'O');
2389 return norm_symmetric(type);
2391 return norm_general(type);
2396template <
typename NumberType>
2400 const char type(
'I');
2403 return norm_symmetric(type);
2405 return norm_general(type);
2410template <
typename NumberType>
2414 const char type(
'F');
2417 return norm_symmetric(type);
2419 return norm_general(type);
2424template <
typename NumberType>
2430 ExcMessage(
"norms can be called in matrix state only."));
2431 std::lock_guard<std::mutex> lock(mutex);
2432 NumberType res = 0.;
2434 if (grid->mpi_process_is_active)
2436 const int iarow = indxg2p_(&submatrix_row,
2438 &(grid->this_process_row),
2440 &(grid->n_process_rows));
2441 const int iacol = indxg2p_(&submatrix_column,
2443 &(grid->this_process_column),
2444 &first_process_column,
2445 &(grid->n_process_columns));
2446 const int mp0 = numroc_(&n_rows,
2448 &(grid->this_process_row),
2450 &(grid->n_process_rows));
2451 const int nq0 = numroc_(&n_columns,
2453 &(grid->this_process_column),
2455 &(grid->n_process_columns));
2461 if (type ==
'O' || type ==
'1')
2463 else if (type ==
'I')
2467 const NumberType *A_loc = this->values.begin();
2477 grid->send_to_inactive(&res);
2483template <
typename NumberType>
2489 ExcMessage(
"norms can be called in matrix state only."));
2491 ExcMessage(
"Matrix has to be symmetric for this operation."));
2492 std::lock_guard<std::mutex> lock(mutex);
2493 NumberType res = 0.;
2495 if (grid->mpi_process_is_active)
2500 ilcm_(&(grid->n_process_rows), &(grid->n_process_columns));
2501 const int v2 = lcm / (grid->n_process_rows);
2503 const int IAROW = indxg2p_(&submatrix_row,
2505 &(grid->this_process_row),
2507 &(grid->n_process_rows));
2508 const int IACOL = indxg2p_(&submatrix_column,
2510 &(grid->this_process_column),
2511 &first_process_column,
2512 &(grid->n_process_columns));
2513 const int Np0 = numroc_(&n_columns ,
2515 &(grid->this_process_row),
2517 &(grid->n_process_rows));
2518 const int Nq0 = numroc_(&n_columns ,
2520 &(grid->this_process_column),
2522 &(grid->n_process_columns));
2524 const int v1 = iceil_(&Np0, &row_block_size);
2525 const int ldw = (n_local_rows == n_local_columns) ?
2527 row_block_size * iceil_(&
v1, &v2);
2530 (type ==
'M' || type ==
'F' || type ==
'E') ? 0 : 2 * Nq0 + Np0 + ldw;
2532 const NumberType *A_loc = this->values.begin();
2542 grid->send_to_inactive(&res);
2548# ifdef DEAL_II_WITH_HDF5
2554 create_HDF5_state_enum_id(hid_t &state_enum_id)
2560 herr_t status = H5Tenum_insert(state_enum_id,
"cholesky", &val);
2563 status = H5Tenum_insert(state_enum_id,
"eigenvalues", &val);
2566 status = H5Tenum_insert(state_enum_id,
"inverse_matrix", &val);
2569 status = H5Tenum_insert(state_enum_id,
"inverse_svd", &val);
2572 status = H5Tenum_insert(state_enum_id,
"lu", &val);
2575 status = H5Tenum_insert(state_enum_id,
"matrix", &val);
2578 status = H5Tenum_insert(state_enum_id,
"svd", &val);
2581 status = H5Tenum_insert(state_enum_id,
"unusable", &val);
2586 create_HDF5_property_enum_id(hid_t &property_enum_id)
2591 herr_t status = H5Tenum_insert(property_enum_id,
"diagonal", &prop);
2594 status = H5Tenum_insert(property_enum_id,
"general", &prop);
2597 status = H5Tenum_insert(property_enum_id,
"hessenberg", &prop);
2600 status = H5Tenum_insert(property_enum_id,
"lower_triangular", &prop);
2603 status = H5Tenum_insert(property_enum_id,
"symmetric", &prop);
2606 status = H5Tenum_insert(property_enum_id,
"upper_triangular", &prop);
2615template <
typename NumberType>
2618 const std::string &filename,
2619 const std::pair<unsigned int, unsigned int> &chunk_size)
const
2621# ifndef DEAL_II_WITH_HDF5
2627 std::pair<unsigned int, unsigned int> chunks_size_ = chunk_size;
2633 chunks_size_.first = n_rows;
2634 chunks_size_.second = 1;
2636 Assert(chunks_size_.first > 0,
2637 ExcMessage(
"The row chunk size must be larger than 0."));
2639 Assert(chunks_size_.second > 0,
2640 ExcMessage(
"The column chunk size must be larger than 0."));
2643# ifdef H5_HAVE_PARALLEL
2645 save_parallel(filename, chunks_size_);
2649 save_serial(filename, chunks_size_);
2657template <
typename NumberType>
2660 const std::string &filename,
2661 const std::pair<unsigned int, unsigned int> &chunk_size)
const
2663# ifndef DEAL_II_WITH_HDF5
2678 const auto column_grid =
2679 std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,
2683 const int MB = n_rows, NB = n_columns;
2689 if (tmp.
grid->mpi_process_is_active)
2695 H5Fcreate(filename.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
2698 hsize_t chunk_dims[2];
2701 chunk_dims[0] = chunk_size.second;
2702 chunk_dims[1] = chunk_size.first;
2703 hid_t data_property = H5Pcreate(H5P_DATASET_CREATE);
2704 status = H5Pset_chunk(data_property, 2, chunk_dims);
2711 dims[0] = n_columns;
2713 hid_t dataspace_id = H5Screate_simple(2, dims,
nullptr);
2717 hid_t dataset_id = H5Dcreate2(file_id,
2727 dataset_id, type_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, tmp.
values.data());
2732 hid_t state_enum_id, property_enum_id;
2733 internal::create_HDF5_state_enum_id(state_enum_id);
2734 internal::create_HDF5_property_enum_id(property_enum_id);
2737 hsize_t dims_state[1];
2739 hid_t state_enum_dataspace = H5Screate_simple(1, dims_state,
nullptr);
2741 hid_t state_enum_dataset = H5Dcreate2(file_id,
2744 state_enum_dataspace,
2749 status = H5Dwrite(state_enum_dataset,
2758 hsize_t dims_property[1];
2759 dims_property[0] = 1;
2760 hid_t property_enum_dataspace =
2761 H5Screate_simple(1, dims_property,
nullptr);
2763 hid_t property_enum_dataset = H5Dcreate2(file_id,
2766 property_enum_dataspace,
2771 status = H5Dwrite(property_enum_dataset,
2780 status = H5Dclose(dataset_id);
2782 status = H5Dclose(state_enum_dataset);
2784 status = H5Dclose(property_enum_dataset);
2788 status = H5Sclose(dataspace_id);
2790 status = H5Sclose(state_enum_dataspace);
2792 status = H5Sclose(property_enum_dataspace);
2796 status = H5Tclose(state_enum_id);
2798 status = H5Tclose(property_enum_id);
2802 status = H5Pclose(data_property);
2806 status = H5Fclose(file_id);
2814template <
typename NumberType>
2817 const std::string &filename,
2818 const std::pair<unsigned int, unsigned int> &chunk_size)
const
2820# ifndef DEAL_II_WITH_HDF5
2826 const unsigned int n_mpi_processes(
2828 MPI_Info info = MPI_INFO_NULL;
2836 const auto column_grid =
2837 std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,
2841 const int MB = n_rows;
2854 const int NB =
std::max(
static_cast<int>(std::ceil(
2855 static_cast<double>(n_columns) / n_mpi_processes)),
2869 hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
2870 status = H5Pset_fapl_mpio(plist_id, tmp.
grid->mpi_communicator, info);
2875 H5Fcreate(filename.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, plist_id);
2876 status = H5Pclose(plist_id);
2885 hid_t filespace = H5Screate_simple(2, dims,
nullptr);
2888 hsize_t chunk_dims[2];
2890 chunk_dims[0] = chunk_size.second;
2891 chunk_dims[1] = chunk_size.first;
2892 plist_id = H5Pcreate(H5P_DATASET_CREATE);
2893 H5Pset_chunk(plist_id, 2, chunk_dims);
2895 hid_t dset_id = H5Dcreate2(
2896 file_id,
"/matrix", type_id, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
2898 status = H5Sclose(filespace);
2901 status = H5Pclose(plist_id);
2905 std::vector<int> proc_n_local_rows(n_mpi_processes),
2906 proc_n_local_columns(n_mpi_processes);
2910 proc_n_local_rows.data(),
2913 tmp.
grid->mpi_communicator);
2918 proc_n_local_columns.data(),
2921 tmp.
grid->mpi_communicator);
2933 hid_t memspace = H5Screate_simple(2, count,
nullptr);
2935 hsize_t offset[2] = {0};
2936 for (
unsigned int i = 0; i <
my_rank; ++i)
2937 offset[0] += proc_n_local_columns[i];
2940 filespace = H5Dget_space(dset_id);
2941 status = H5Sselect_hyperslab(
2942 filespace, H5S_SELECT_SET, offset,
nullptr, count,
nullptr);
2946 plist_id = H5Pcreate(H5P_DATASET_XFER);
2947 status = H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_INDEPENDENT);
2951 if (tmp.
values.size() > 0)
2953 status = H5Dwrite(dset_id, type_id, memspace, filespace, plist_id,
data);
2957 status = H5Dclose(dset_id);
2959 status = H5Sclose(filespace);
2961 status = H5Sclose(memspace);
2963 status = H5Pclose(plist_id);
2965 status = H5Fclose(file_id);
2970 ierr = MPI_Barrier(tmp.
grid->mpi_communicator);
2974 if (tmp.
grid->this_mpi_process == 0)
2977 hid_t file_id_reopen =
2978 H5Fopen(filename.c_str(), H5F_ACC_RDWR, H5P_DEFAULT);
2982 hid_t state_enum_id, property_enum_id;
2983 internal::create_HDF5_state_enum_id(state_enum_id);
2984 internal::create_HDF5_property_enum_id(property_enum_id);
2987 hsize_t dims_state[1];
2989 hid_t state_enum_dataspace = H5Screate_simple(1, dims_state,
nullptr);
2991 hid_t state_enum_dataset = H5Dcreate2(file_id_reopen,
2994 state_enum_dataspace,
2999 status = H5Dwrite(state_enum_dataset,
3008 hsize_t dims_property[1];
3009 dims_property[0] = 1;
3010 hid_t property_enum_dataspace =
3011 H5Screate_simple(1, dims_property,
nullptr);
3013 hid_t property_enum_dataset = H5Dcreate2(file_id_reopen,
3016 property_enum_dataspace,
3021 status = H5Dwrite(property_enum_dataset,
3029 status = H5Dclose(state_enum_dataset);
3031 status = H5Dclose(property_enum_dataset);
3033 status = H5Sclose(state_enum_dataspace);
3035 status = H5Sclose(property_enum_dataspace);
3037 status = H5Tclose(state_enum_id);
3039 status = H5Tclose(property_enum_id);
3041 status = H5Fclose(file_id_reopen);
3050template <
typename NumberType>
3054# ifndef DEAL_II_WITH_HDF5
3058# ifdef H5_HAVE_PARALLEL
3060 load_parallel(filename);
3064 load_serial(filename);
3071template <
typename NumberType>
3075# ifndef DEAL_II_WITH_HDF5
3086 const auto one_grid =
3087 std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,
3091 const int MB = n_rows, NB = n_columns;
3095 int property_int = -1;
3099 if (tmp.
grid->mpi_process_is_active)
3104 hid_t file_id = H5Fopen(filename.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT);
3107 hid_t dataset_id = H5Dopen2(file_id,
"/matrix", H5P_DEFAULT);
3113 hid_t datatype = H5Dget_type(dataset_id);
3114 H5T_class_t t_class_in = H5Tget_class(datatype);
3117 t_class_in == t_class,
3119 "The data type of the matrix to be read does not match the archive"));
3122 hid_t dataspace_id = H5Dget_space(dataset_id);
3124 const int ndims = H5Sget_simple_extent_ndims(dataspace_id);
3128 H5Sget_simple_extent_dims(dataspace_id, dims,
nullptr);
3130 static_cast<int>(dims[0]) == n_columns,
3132 "The number of columns of the matrix does not match the content of the archive"));
3134 static_cast<int>(dims[1]) == n_rows,
3136 "The number of rows of the matrix does not match the content of the archive"));
3139 status = H5Dread(dataset_id,
3149 hid_t state_enum_id, property_enum_id;
3150 internal::create_HDF5_state_enum_id(state_enum_id);
3151 internal::create_HDF5_property_enum_id(property_enum_id);
3154 hid_t dataset_state_id = H5Dopen2(file_id,
"/state", H5P_DEFAULT);
3155 hid_t datatype_state = H5Dget_type(dataset_state_id);
3156 H5T_class_t t_class_state = H5Tget_class(datatype_state);
3159 hid_t dataset_property_id = H5Dopen2(file_id,
"/property", H5P_DEFAULT);
3160 hid_t datatype_property = H5Dget_type(dataset_property_id);
3161 H5T_class_t t_class_property = H5Tget_class(datatype_property);
3165 hid_t dataspace_state = H5Dget_space(dataset_state_id);
3166 hid_t dataspace_property = H5Dget_space(dataset_property_id);
3168 const int ndims_state = H5Sget_simple_extent_ndims(dataspace_state);
3170 const int ndims_property = H5Sget_simple_extent_ndims(dataspace_property);
3173 hsize_t dims_state[1];
3174 H5Sget_simple_extent_dims(dataspace_state, dims_state,
nullptr);
3176 hsize_t dims_property[1];
3177 H5Sget_simple_extent_dims(dataspace_property, dims_property,
nullptr);
3181 status = H5Dread(dataset_state_id,
3191 state_int =
static_cast<int>(tmp.
state);
3193 status = H5Dread(dataset_property_id,
3203 property_int =
static_cast<int>(tmp.
property);
3206 status = H5Sclose(dataspace_id);
3208 status = H5Sclose(dataspace_state);
3210 status = H5Sclose(dataspace_property);
3214 status = H5Tclose(datatype);
3216 status = H5Tclose(state_enum_id);
3218 status = H5Tclose(property_enum_id);
3222 status = H5Dclose(dataset_state_id);
3224 status = H5Dclose(dataset_id);
3226 status = H5Dclose(dataset_property_id);
3230 status = H5Fclose(file_id);
3234 tmp.
grid->send_to_inactive(&state_int, 1);
3237 tmp.
grid->send_to_inactive(&property_int, 1);
3249template <
typename NumberType>
3253# ifndef DEAL_II_WITH_HDF5
3257# ifndef H5_HAVE_PARALLEL
3261 const unsigned int n_mpi_processes(
3263 MPI_Info info = MPI_INFO_NULL;
3270 const auto column_grid =
3271 std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,
3275 const int MB = n_rows;
3277 const int NB =
std::max(
static_cast<int>(std::ceil(
3278 static_cast<double>(n_columns) / n_mpi_processes)),
3289 hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
3290 status = H5Pset_fapl_mpio(plist_id, tmp.
grid->mpi_communicator, info);
3295 hid_t file_id = H5Fopen(filename.c_str(), H5F_ACC_RDONLY, plist_id);
3296 status = H5Pclose(plist_id);
3300 hid_t dataset_id = H5Dopen2(file_id,
"/matrix", H5P_DEFAULT);
3307 hid_t datatype_inp = H5Dget_type(dataset_id);
3308 H5T_class_t t_class_inp = H5Tget_class(datatype_inp);
3309 H5T_class_t t_class = H5Tget_class(datatype);
3311 t_class_inp == t_class,
3313 "The data type of the matrix to be read does not match the archive"));
3317 hid_t dataspace_id = H5Dget_space(dataset_id);
3319 const int ndims = H5Sget_simple_extent_ndims(dataspace_id);
3323 status = H5Sget_simple_extent_dims(dataspace_id, dims,
nullptr);
3326 static_cast<int>(dims[0]) == n_columns,
3328 "The number of columns of the matrix does not match the content of the archive"));
3330 static_cast<int>(dims[1]) == n_rows,
3332 "The number of rows of the matrix does not match the content of the archive"));
3335 std::vector<int> proc_n_local_rows(n_mpi_processes),
3336 proc_n_local_columns(n_mpi_processes);
3340 proc_n_local_rows.data(),
3343 tmp.
grid->mpi_communicator);
3348 proc_n_local_columns.data(),
3351 tmp.
grid->mpi_communicator);
3364 hsize_t offset[2] = {0};
3365 for (
unsigned int i = 0; i <
my_rank; ++i)
3366 offset[0] += proc_n_local_columns[i];
3369 status = H5Sselect_hyperslab(
3370 dataspace_id, H5S_SELECT_SET, offset,
nullptr, count,
nullptr);
3374 hid_t memspace = H5Screate_simple(2, count,
nullptr);
3378 H5Dread(dataset_id, datatype, memspace, dataspace_id, H5P_DEFAULT,
data);
3382 hid_t state_enum_id, property_enum_id;
3383 internal::create_HDF5_state_enum_id(state_enum_id);
3384 internal::create_HDF5_property_enum_id(property_enum_id);
3387 hid_t dataset_state_id = H5Dopen2(file_id,
"/state", H5P_DEFAULT);
3388 hid_t datatype_state = H5Dget_type(dataset_state_id);
3389 H5T_class_t t_class_state = H5Tget_class(datatype_state);
3392 hid_t dataset_property_id = H5Dopen2(file_id,
"/property", H5P_DEFAULT);
3393 hid_t datatype_property = H5Dget_type(dataset_property_id);
3394 H5T_class_t t_class_property = H5Tget_class(datatype_property);
3398 hid_t dataspace_state = H5Dget_space(dataset_state_id);
3399 hid_t dataspace_property = H5Dget_space(dataset_property_id);
3401 const int ndims_state = H5Sget_simple_extent_ndims(dataspace_state);
3403 const int ndims_property = H5Sget_simple_extent_ndims(dataspace_property);
3406 hsize_t dims_state[1];
3407 H5Sget_simple_extent_dims(dataspace_state, dims_state,
nullptr);
3409 hsize_t dims_property[1];
3410 H5Sget_simple_extent_dims(dataspace_property, dims_property,
nullptr);
3415 dataset_state_id, state_enum_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, &tmp.
state);
3418 status = H5Dread(dataset_property_id,
3427 status = H5Sclose(memspace);
3429 status = H5Dclose(dataset_id);
3431 status = H5Dclose(dataset_state_id);
3433 status = H5Dclose(dataset_property_id);
3435 status = H5Sclose(dataspace_id);
3437 status = H5Sclose(dataspace_state);
3439 status = H5Sclose(dataspace_property);
3443 status = H5Tclose(state_enum_id);
3445 status = H5Tclose(property_enum_id);
3447 status = H5Fclose(file_id);
3463 template <
typename NumberType>
3471 for (
unsigned int i = 0; i < matrix.local_n(); ++i)
3473 const NumberType s = factors[matrix.global_column(i)];
3475 for (
unsigned int j = 0; j < matrix.local_m(); ++j)
3476 matrix.local_el(j, i) *= s;
3480 template <
typename NumberType>
3488 for (
unsigned int i = 0; i <
matrix.local_m(); ++i)
3490 const NumberType s = factors[
matrix.global_row(i)];
3492 for (
unsigned int j = 0; j <
matrix.local_n(); ++j)
3493 matrix.local_el(i, j) *= s;
3502template <
typename NumberType>
3503template <
class InputVector>
3507 if (this->grid->mpi_process_is_active)
3513template <
typename NumberType>
3514template <
class InputVector>
3518 if (this->grid->mpi_process_is_active)
3525# include "scalapack.inst"
ArrayView< std::remove_reference_t< typename std::iterator_traits< Iterator >::reference >, MemorySpaceType > make_array_view(const Iterator begin, const Iterator end)
std::vector< NumberType > compute_SVD(ScaLAPACKMatrix< NumberType > *U=nullptr, ScaLAPACKMatrix< NumberType > *VT=nullptr)
std::vector< NumberType > eigenpairs_symmetric_by_value(const std::pair< NumberType, NumberType > &value_limits, const bool compute_eigenvectors)
NumberType frobenius_norm() const
unsigned int pseudoinverse(const NumberType ratio)
std::vector< NumberType > eigenpairs_symmetric_by_value_MRRR(const std::pair< NumberType, NumberType > &value_limits, const bool compute_eigenvectors)
void copy_from(const LAPACKFullMatrix< NumberType > &matrix, const unsigned int rank)
void save_parallel(const std::string &filename, const std::pair< unsigned int, unsigned int > &chunk_size) const
void least_squares(ScaLAPACKMatrix< NumberType > &B, const bool transpose=false)
ScaLAPACKMatrix< NumberType > & operator=(const FullMatrix< NumberType > &)
void Tadd(const NumberType b, const ScaLAPACKMatrix< NumberType > &B)
void mTmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
void add(const ScaLAPACKMatrix< NumberType > &B, const NumberType a=0., const NumberType b=1., const bool transpose_B=false)
LAPACKSupport::State get_state() const
LAPACKSupport::Property get_property() const
void Tmmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
std::vector< NumberType > eigenpairs_symmetric_MRRR(const bool compute_eigenvectors, const std::pair< unsigned int, unsigned int > &index_limits=std::make_pair(numbers::invalid_unsigned_int, numbers::invalid_unsigned_int), const std::pair< NumberType, NumberType > &value_limits=std::make_pair(std::numeric_limits< NumberType >::quiet_NaN(), std::numeric_limits< NumberType >::quiet_NaN()))
void scale_rows(const InputVector &factors)
std::shared_ptr< const Utilities::MPI::ProcessGrid > grid
ScaLAPACKMatrix(const size_type n_rows, const size_type n_columns, const std::shared_ptr< const Utilities::MPI::ProcessGrid > &process_grid, const size_type row_block_size=32, const size_type column_block_size=32, const LAPACKSupport::Property property=LAPACKSupport::Property::general)
void load(const std::string &filename)
const int submatrix_column
void mmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
std::vector< NumberType > eigenpairs_symmetric(const bool compute_eigenvectors, const std::pair< unsigned int, unsigned int > &index_limits=std::make_pair(numbers::invalid_unsigned_int, numbers::invalid_unsigned_int), const std::pair< NumberType, NumberType > &value_limits=std::make_pair(std::numeric_limits< NumberType >::quiet_NaN(), std::numeric_limits< NumberType >::quiet_NaN()))
void save_serial(const std::string &filename, const std::pair< unsigned int, unsigned int > &chunk_size) const
NumberType norm_general(const char type) const
void save(const std::string &filename, const std::pair< unsigned int, unsigned int > &chunk_size=std::make_pair(numbers::invalid_unsigned_int, numbers::invalid_unsigned_int)) const
void load_parallel(const std::string &filename)
NumberType l1_norm() const
void compute_lu_factorization()
NumberType norm_symmetric(const char type) const
void mult(const NumberType b, const ScaLAPACKMatrix< NumberType > &B, const NumberType c, ScaLAPACKMatrix< NumberType > &C, const bool transpose_A=false, const bool transpose_B=false) const
LAPACKSupport::Property property
void set_property(const LAPACKSupport::Property property)
void reinit(const size_type n_rows, const size_type n_columns, const std::shared_ptr< const Utilities::MPI::ProcessGrid > &process_grid, const size_type row_block_size=32, const size_type column_block_size=32, const LAPACKSupport::Property property=LAPACKSupport::Property::general)
void load_serial(const std::string &filename)
std::vector< NumberType > eigenpairs_symmetric_by_index_MRRR(const std::pair< unsigned int, unsigned int > &index_limits, const bool compute_eigenvectors)
NumberType reciprocal_condition_number(const NumberType a_norm) const
LAPACKSupport::State state
void TmTmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
std::vector< NumberType > eigenpairs_symmetric_by_index(const std::pair< unsigned int, unsigned int > &index_limits, const bool compute_eigenvectors)
unsigned int global_column(const unsigned int loc_column) const
void copy_to(FullMatrix< NumberType > &matrix) const
unsigned int global_row(const unsigned int loc_row) const
void compute_cholesky_factorization()
void copy_transposed(const ScaLAPACKMatrix< NumberType > &B)
NumberType linfty_norm() const
void scale_columns(const InputVector &factors)
AlignedVector< T > values
void reinit(const size_type size1, const size_type size2, const bool omit_default_initialization=false)
#define DEAL_II_NAMESPACE_OPEN
#define DEAL_II_NAMESPACE_CLOSE
static ::ExceptionBase & ExcIO()
static ::ExceptionBase & ExcErrorCode(std::string arg1, types::blas_int arg2)
#define Assert(cond, exc)
#define AssertIsFinite(number)
#define AssertThrowMPI(error_code)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcNeedsHDF5()
static ::ExceptionBase & ExcIndexRange(std::size_t arg1, std::size_t arg2, std::size_t arg3)
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
#define DEAL_II_ASSERT_UNREACHABLE()
#define DEAL_II_NOT_IMPLEMENTED()
const unsigned int my_rank
std::vector< index_type > data
@ cholesky
Contents is a Cholesky decomposition.
@ lu
Contents is an LU decomposition.
@ matrix
Contents is actually a matrix.
@ unusable
Contents is something useless.
@ inverse_matrix
Contents is the inverse of a matrix.
@ svd
Matrix contains singular value decomposition,.
@ inverse_svd
Matrix is the inverse of a singular value decomposition.
@ eigenvalues
Eigenvalue vector is filled.
@ symmetric
Matrix is symmetric.
@ hessenberg
Matrix is in upper Hessenberg form.
@ diagonal
Matrix is diagonal.
@ upper_triangular
Matrix is upper triangular.
@ lower_triangular
Matrix is lower triangular.
@ general
No special properties.
T sum(const T &t, const MPI_Comm mpi_communicator)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
T max(const T &t, const MPI_Comm mpi_communicator)
T min(const T &t, const MPI_Comm mpi_communicator)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
const MPI_Datatype mpi_type_id_for_type
void free_communicator(MPI_Comm mpi_communicator)
static const unsigned int invalid_unsigned_int
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
hid_t hdf5_type_id(const number *)
std::array< std::pair< Number, Tensor< 1, dim, Number > >, dim > eigenvectors(const SymmetricTensor< 2, dim, Number > &T, const SymmetricTensorEigenvectorMethod method=SymmetricTensorEigenvectorMethod::ql_implicit_shifts)