19 #ifdef DEAL_II_WITH_SCALAPACK
23 # include <deal.II/base/mpi.templates.h>
26 # include <deal.II/lac/scalapack.templates.h>
28 # ifdef DEAL_II_WITH_HDF5
34 # ifdef DEAL_II_WITH_HDF5
36 template <
typename number>
48 return H5T_NATIVE_DOUBLE;
54 return H5T_NATIVE_FLOAT;
60 return H5T_NATIVE_INT;
66 return H5T_NATIVE_UINT;
72 return H5T_NATIVE_CHAR;
74 # endif // DEAL_II_WITH_HDF5
78 template <
typename NumberType>
82 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
89 , first_process_column(0)
103 template <
typename NumberType>
106 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
119 template <
typename NumberType>
121 const std::string & filename,
122 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
128 , first_process_column(0)
130 , submatrix_column(1)
132 # ifndef DEAL_II_WITH_HDF5
140 "This function is only available when deal.II is configured with HDF5"));
154 hid_t file = H5Fopen(filename.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT);
158 hid_t dataset = H5Dopen2(file,
"/matrix", H5P_DEFAULT);
162 hid_t filespace = H5Dget_space(dataset);
165 int rank = H5Sget_simple_extent_ndims(filespace);
168 status = H5Sget_simple_extent_dims(filespace, dims,
nullptr);
177 status = H5Sclose(filespace);
179 status = H5Dclose(dataset);
181 status = H5Fclose(file);
184 int ierr = MPI_Bcast(&
n_rows,
186 Utilities::MPI::internal::mpi_type_id(&
n_rows),
193 Utilities::MPI::internal::mpi_type_id(&
n_columns),
206 load(filename.c_str());
208 # endif // DEAL_II_WITH_HDF5
213 template <
typename NumberType>
218 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
223 Assert(row_block_size_ > 0,
ExcMessage(
"Row block size has to be positive."));
224 Assert(column_block_size_ > 0,
225 ExcMessage(
"Column block size has to be positive."));
227 row_block_size_ <= n_rows_,
229 "Row block size can not be greater than the number of rows of the matrix"));
231 column_block_size_ <= n_columns_,
233 "Column block size can not be greater than the number of columns of the matrix"));
236 property = property_;
239 n_columns = n_columns_;
240 row_block_size = row_block_size_;
241 column_block_size = column_block_size_;
243 if (grid->mpi_process_is_active)
246 n_local_rows = numroc_(&n_rows,
248 &(grid->this_process_row),
250 &(grid->n_process_rows));
251 n_local_columns = numroc_(&n_columns,
253 &(grid->this_process_column),
254 &first_process_column,
255 &(grid->n_process_columns));
259 int lda =
std::max(1, n_local_rows);
262 descinit_(descriptor,
268 &first_process_column,
269 &(grid->blacs_context),
280 n_local_columns = -1;
287 template <
typename NumberType>
291 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
300 template <
typename NumberType>
305 property = property_;
310 template <
typename NumberType>
319 template <
typename NumberType>
328 template <
typename NumberType>
341 if (grid->mpi_process_is_active)
343 for (
int i = 0; i < n_local_rows; ++i)
345 const int glob_i = global_row(i);
346 for (
int j = 0; j < n_local_columns; ++j)
348 const int glob_j = global_column(j);
349 local_el(i, j) =
matrix(glob_i, glob_j);
359 template <
typename NumberType>
362 const unsigned int rank)
364 if (n_rows * n_columns == 0)
372 ExcMessage(
"All processes have to call routine with identical rank"));
374 ExcMessage(
"All processes have to call routine with identical rank"));
389 MPI_Comm_group(this->grid->mpi_communicator, &group_A);
391 const std::vector<int> ranks(n, rank);
401 int n_proc_rows_B = 1, n_proc_cols_B = 1;
402 int this_process_row_B = -1, this_process_column_B = -1;
403 int blacs_context_B = -1;
404 if (MPI_COMM_NULL != communicator_B)
407 blacs_context_B = Csys2blacs_handle(communicator_B);
408 const char *order =
"Col";
409 Cblacs_gridinit(&blacs_context_B, order, n_proc_rows_B, n_proc_cols_B);
410 Cblacs_gridinfo(blacs_context_B,
414 &this_process_column_B);
420 const bool mpi_process_is_active_B =
421 (this_process_row_B >= 0 && this_process_column_B >= 0);
424 std::vector<int> descriptor_B(9, -1);
425 const int first_process_row_B = 0, first_process_col_B = 0;
427 if (mpi_process_is_active_B)
430 int n_local_rows_B = numroc_(&n_rows,
433 &first_process_row_B,
435 int n_local_cols_B = numroc_(&n_columns,
437 &this_process_column_B,
438 &first_process_col_B,
442 (void)n_local_cols_B;
444 int lda =
std::max(1, n_local_rows_B);
446 descinit_(descriptor_B.data(),
451 &first_process_row_B,
452 &first_process_col_B,
458 if (this->grid->mpi_process_is_active)
461 NumberType *loc_vals_A =
462 this->values.size() > 0 ? this->values.data() :
nullptr;
463 const NumberType *loc_vals_B =
464 mpi_process_is_active_B ? &(B(0, 0)) :
nullptr;
478 &(this->grid->blacs_context));
480 if (mpi_process_is_active_B)
481 Cblacs_gridexit(blacs_context_B);
483 MPI_Group_free(&group_A);
484 MPI_Group_free(&group_B);
485 if (MPI_COMM_NULL != communicator_B)
486 MPI_Comm_free(&communicator_B);
493 template <
typename NumberType>
497 Assert(n_local_rows >= 0 && loc_row <
static_cast<unsigned int>(n_local_rows),
499 const int i = loc_row + 1;
502 &(grid->this_process_row),
504 &(grid->n_process_rows)) -
510 template <
typename NumberType>
514 Assert(n_local_columns >= 0 &&
515 loc_column <
static_cast<unsigned int>(n_local_columns),
517 const int j = loc_column + 1;
520 &(grid->this_process_column),
521 &first_process_column,
522 &(grid->n_process_columns)) -
528 template <
typename NumberType>
531 const unsigned int rank)
const
533 if (n_rows * n_columns == 0)
541 ExcMessage(
"All processes have to call routine with identical rank"));
543 ExcMessage(
"All processes have to call routine with identical rank"));
559 MPI_Comm_group(this->grid->mpi_communicator, &group_A);
561 const std::vector<int> ranks(n, rank);
571 int n_proc_rows_B = 1, n_proc_cols_B = 1;
572 int this_process_row_B = -1, this_process_column_B = -1;
573 int blacs_context_B = -1;
574 if (MPI_COMM_NULL != communicator_B)
577 blacs_context_B = Csys2blacs_handle(communicator_B);
578 const char *order =
"Col";
579 Cblacs_gridinit(&blacs_context_B, order, n_proc_rows_B, n_proc_cols_B);
580 Cblacs_gridinfo(blacs_context_B,
584 &this_process_column_B);
590 const bool mpi_process_is_active_B =
591 (this_process_row_B >= 0 && this_process_column_B >= 0);
594 std::vector<int> descriptor_B(9, -1);
595 const int first_process_row_B = 0, first_process_col_B = 0;
597 if (mpi_process_is_active_B)
600 int n_local_rows_B = numroc_(&n_rows,
603 &first_process_row_B,
605 int n_local_cols_B = numroc_(&n_columns,
607 &this_process_column_B,
608 &first_process_col_B,
612 (void)n_local_cols_B;
614 int lda =
std::max(1, n_local_rows_B);
617 descinit_(descriptor_B.data(),
622 &first_process_row_B,
623 &first_process_col_B,
631 if (this->grid->mpi_process_is_active)
634 const NumberType *loc_vals_A =
635 this->values.size() > 0 ? this->values.data() :
nullptr;
636 NumberType *loc_vals_B = mpi_process_is_active_B ? &(B(0, 0)) :
nullptr;
648 &(this->grid->blacs_context));
650 if (mpi_process_is_active_B)
651 Cblacs_gridexit(blacs_context_B);
653 MPI_Group_free(&group_A);
654 MPI_Group_free(&group_B);
655 if (MPI_COMM_NULL != communicator_B)
656 MPI_Comm_free(&communicator_B);
661 template <
typename NumberType>
674 if (grid->mpi_process_is_active)
676 for (
int i = 0; i < n_local_rows; ++i)
678 const int glob_i = global_row(i);
679 for (
int j = 0; j < n_local_columns; ++j)
681 const int glob_j = global_column(j);
682 matrix(glob_i, glob_j) = local_el(i, j);
694 for (
unsigned int i = 0; i <
matrix.n(); ++i)
695 for (
unsigned int j = i + 1; j <
matrix.m(); ++j)
698 for (
unsigned int i = 0; i <
matrix.n(); ++i)
699 for (
unsigned int j = 0; j < i; ++j)
706 for (
unsigned int i = 0; i <
matrix.n(); ++i)
707 for (
unsigned int j = i + 1; j <
matrix.m(); ++j)
709 else if (uplo ==
'U')
710 for (
unsigned int i = 0; i <
matrix.n(); ++i)
711 for (
unsigned int j = 0; j < i; ++j)
718 template <
typename NumberType>
722 const std::pair<unsigned int, unsigned int> &offset_A,
723 const std::pair<unsigned int, unsigned int> &offset_B,
724 const std::pair<unsigned int, unsigned int> &submatrix_size)
const
727 if (submatrix_size.first == 0 || submatrix_size.second == 0)
740 int ierr, comparison;
741 ierr = MPI_Comm_compare(grid->mpi_communicator,
742 B.
grid->mpi_communicator,
745 Assert(comparison == MPI_IDENT,
746 ExcMessage(
"Matrix A and B must have a common MPI Communicator"));
754 int union_blacs_context = Csys2blacs_handle(this->grid->mpi_communicator);
755 const char *order =
"Col";
756 int union_n_process_rows =
758 int union_n_process_columns = 1;
759 Cblacs_gridinit(&union_blacs_context,
761 union_n_process_rows,
762 union_n_process_columns);
764 int n_grid_rows_A, n_grid_columns_A, my_row_A, my_column_A;
765 Cblacs_gridinfo(this->grid->blacs_context,
772 const bool in_context_A =
773 (my_row_A >= 0 && my_row_A < n_grid_rows_A) &&
774 (my_column_A >= 0 && my_column_A < n_grid_columns_A);
776 int n_grid_rows_B, n_grid_columns_B, my_row_B, my_column_B;
777 Cblacs_gridinfo(B.
grid->blacs_context,
784 const bool in_context_B =
785 (my_row_B >= 0 && my_row_B < n_grid_rows_B) &&
786 (my_column_B >= 0 && my_column_B < n_grid_columns_B);
788 const int n_rows_submatrix = submatrix_size.first;
789 const int n_columns_submatrix = submatrix_size.second;
792 int ia = offset_A.first + 1, ja = offset_A.second + 1;
793 int ib = offset_B.first + 1, jb = offset_B.second + 1;
795 std::array<int, 9> desc_A, desc_B;
797 const NumberType *loc_vals_A =
nullptr;
798 NumberType * loc_vals_B =
nullptr;
807 if (this->values.size() != 0)
808 loc_vals_A = this->values.data();
810 for (
unsigned int i = 0; i < desc_A.size(); ++i)
811 desc_A[i] = this->descriptor[i];
821 for (
unsigned int i = 0; i < desc_B.size(); ++i)
827 pgemr2d(&n_rows_submatrix,
828 &n_columns_submatrix,
837 &union_blacs_context);
842 Cblacs_gridexit(union_blacs_context);
847 template <
typename NumberType>
855 if (this->grid->mpi_process_is_active)
857 this->descriptor[0] == 1,
859 "Copying of ScaLAPACK matrices only implemented for dense matrices"));
860 if (dest.
grid->mpi_process_is_active)
864 "Copying of ScaLAPACK matrices only implemented for dense matrices"));
880 MPI_Group group_source, group_dest, group_union;
881 ierr = MPI_Comm_group(this->grid->mpi_communicator, &group_source);
883 ierr = MPI_Comm_group(dest.
grid->mpi_communicator, &group_dest);
885 ierr = MPI_Group_union(group_source, group_dest, &group_union);
905 &mpi_communicator_union);
913 int union_blacs_context = Csys2blacs_handle(mpi_communicator_union);
914 const char *order =
"Col";
915 int union_n_process_rows =
917 int union_n_process_columns = 1;
918 Cblacs_gridinit(&union_blacs_context,
920 union_n_process_rows,
921 union_n_process_columns);
923 const NumberType *loc_vals_source =
nullptr;
924 NumberType * loc_vals_dest =
nullptr;
926 if (this->grid->mpi_process_is_active && (this->values.size() > 0))
930 "source: process is active but local matrix empty"));
931 loc_vals_source = this->values.data();
938 "destination: process is active but local matrix empty"));
951 &union_blacs_context);
953 Cblacs_gridexit(union_blacs_context);
955 if (mpi_communicator_union != MPI_COMM_NULL)
957 ierr = MPI_Comm_free(&mpi_communicator_union);
960 ierr = MPI_Group_free(&group_source);
962 ierr = MPI_Group_free(&group_dest);
964 ierr = MPI_Group_free(&group_union);
969 if (this->grid->mpi_process_is_active)
970 dest.
values = this->values;
978 template <
typename NumberType>
988 template <
typename NumberType>
991 const NumberType alpha,
992 const NumberType beta,
993 const bool transpose_B)
1015 ExcMessage(
"The matrices A and B need to have the same process grid"));
1017 if (this->grid->mpi_process_is_active)
1019 char trans_b = transpose_B ?
'T' :
'N';
1021 (this->values.size() > 0) ? this->values.data() :
nullptr;
1022 const NumberType *B_loc =
1044 template <
typename NumberType>
1049 add(B, 1, a,
false);
1054 template <
typename NumberType>
1064 template <
typename NumberType>
1070 const bool transpose_A,
1071 const bool transpose_B)
const
1074 ExcMessage(
"The matrices A and B need to have the same process grid"));
1076 ExcMessage(
"The matrices B and C need to have the same process grid"));
1080 if (!transpose_A && !transpose_B)
1084 Assert(this->n_rows ==
C.n_rows,
1088 Assert(this->row_block_size ==
C.row_block_size,
1095 else if (transpose_A && !transpose_B)
1099 Assert(this->n_columns ==
C.n_rows,
1103 Assert(this->column_block_size ==
C.row_block_size,
1110 else if (!transpose_A && transpose_B)
1114 Assert(this->n_rows ==
C.n_rows,
1118 Assert(this->row_block_size ==
C.row_block_size,
1130 Assert(this->n_columns ==
C.n_rows,
1134 Assert(this->column_block_size ==
C.row_block_size,
1142 if (this->grid->mpi_process_is_active)
1144 char trans_a = transpose_A ?
'T' :
'N';
1145 char trans_b = transpose_B ?
'T' :
'N';
1147 const NumberType *A_loc =
1148 (this->values.size() > 0) ? this->values.data() :
nullptr;
1149 const NumberType *B_loc =
1151 NumberType *C_loc = (
C.values.size() > 0) ?
C.values.data() :
nullptr;
1153 int n =
C.n_columns;
1154 int k = transpose_A ? this->n_rows : this->n_columns;
1163 &(this->submatrix_row),
1164 &(this->submatrix_column),
1173 &
C.submatrix_column,
1181 template <
typename NumberType>
1185 const bool adding)
const
1188 mult(1., B, 1.,
C,
false,
false);
1190 mult(1., B, 0,
C,
false,
false);
1195 template <
typename NumberType>
1199 const bool adding)
const
1202 mult(1., B, 1.,
C,
true,
false);
1204 mult(1., B, 0,
C,
true,
false);
1209 template <
typename NumberType>
1213 const bool adding)
const
1216 mult(1., B, 1.,
C,
false,
true);
1218 mult(1., B, 0,
C,
false,
true);
1223 template <
typename NumberType>
1227 const bool adding)
const
1230 mult(1., B, 1.,
C,
true,
true);
1232 mult(1., B, 0,
C,
true,
true);
1237 template <
typename NumberType>
1244 "Cholesky factorization can be applied to symmetric matrices only."));
1247 "Matrix has to be in Matrix state before calling this function."));
1249 if (grid->mpi_process_is_active)
1252 NumberType *A_loc = this->values.data();
1270 template <
typename NumberType>
1276 "Matrix has to be in Matrix state before calling this function."));
1278 if (grid->mpi_process_is_active)
1281 NumberType *A_loc = this->values.data();
1283 const int iarow = indxg2p_(&submatrix_row,
1285 &(grid->this_process_row),
1287 &(grid->n_process_rows));
1288 const int mp = numroc_(&n_rows,
1290 &(grid->this_process_row),
1292 &(grid->n_process_rows));
1293 ipiv.resize(mp + row_block_size);
1311 template <
typename NumberType>
1329 if (grid->mpi_process_is_active)
1331 const char uploTriangular =
1333 const char diag =
'N';
1335 NumberType *A_loc = this->values.data();
1336 ptrtri(&uploTriangular,
1358 compute_cholesky_factorization();
1360 compute_lu_factorization();
1362 if (grid->mpi_process_is_active)
1365 NumberType *A_loc = this->values.data();
1382 int lwork = -1, liwork = -1;
1400 lwork =
static_cast<int>(work[0]);
1403 iwork.resize(liwork);
1427 template <
typename NumberType>
1428 std::vector<NumberType>
1430 const std::pair<unsigned int, unsigned int> &index_limits,
1431 const bool compute_eigenvectors)
1437 std::pair<unsigned int, unsigned int> idx =
1438 std::make_pair(
std::min(index_limits.first, index_limits.second),
1439 std::max(index_limits.first, index_limits.second));
1442 if (idx.first == 0 && idx.second ==
static_cast<unsigned int>(n_rows - 1))
1443 return eigenpairs_symmetric(compute_eigenvectors);
1445 return eigenpairs_symmetric(compute_eigenvectors, idx);
1450 template <
typename NumberType>
1451 std::vector<NumberType>
1453 const std::pair<NumberType, NumberType> &value_limits,
1454 const bool compute_eigenvectors)
1456 Assert(!std::isnan(value_limits.first),
1458 Assert(!std::isnan(value_limits.second),
1461 std::pair<unsigned int, unsigned int> indices =
1465 return eigenpairs_symmetric(compute_eigenvectors, indices, value_limits);
1470 template <
typename NumberType>
1471 std::vector<NumberType>
1473 const bool compute_eigenvectors,
1474 const std::pair<unsigned int, unsigned int> &eigenvalue_idx,
1475 const std::pair<NumberType, NumberType> & eigenvalue_limits)
1479 "Matrix has to be in Matrix state before calling this function."));
1481 ExcMessage(
"Matrix has to be symmetric for this operation."));
1483 std::lock_guard<std::mutex> lock(mutex);
1485 const bool use_values = (std::isnan(eigenvalue_limits.first) ||
1486 std::isnan(eigenvalue_limits.second)) ?
1489 const bool use_indices =
1496 !(use_values && use_indices),
1498 "Prescribing both the index and value range for the eigenvalues is ambiguous"));
1502 std::unique_ptr<ScaLAPACKMatrix<NumberType>>
eigenvectors =
1503 compute_eigenvectors ?
1504 std_cxx14::make_unique<ScaLAPACKMatrix<NumberType>>(n_rows,
1508 grid->n_process_rows, grid->n_process_columns, grid, 1, 1);
1515 std::vector<NumberType> ev(n_rows);
1517 if (grid->mpi_process_is_active)
1524 char jobz = compute_eigenvectors ?
'V' :
'N';
1527 bool all_eigenpairs =
true;
1528 NumberType vl = NumberType(), vu = NumberType();
1534 NumberType abstol = NumberType();
1541 NumberType orfac = 0;
1543 std::vector<int> ifail;
1550 std::vector<int> iclustr;
1556 std::vector<NumberType> gap(n_local_rows * n_local_columns);
1566 all_eigenpairs =
true;
1571 all_eigenpairs =
false;
1572 vl =
std::min(eigenvalue_limits.first, eigenvalue_limits.second);
1573 vu =
std::max(eigenvalue_limits.first, eigenvalue_limits.second);
1579 all_eigenpairs =
false;
1582 il =
std::min(eigenvalue_idx.first, eigenvalue_idx.second) + 1;
1583 iu =
std::max(eigenvalue_idx.first, eigenvalue_idx.second) + 1;
1585 NumberType *A_loc = this->values.data();
1592 NumberType *eigenvectors_loc =
1593 (compute_eigenvectors ?
eigenvectors->values.data() :
nullptr);
1618 char cmach = compute_eigenvectors ?
'U' :
'S';
1619 plamch(&(this->grid->blacs_context), &cmach, abstol);
1621 ifail.resize(n_rows);
1622 iclustr.resize(2 * grid->n_process_rows * grid->n_process_columns);
1623 gap.resize(grid->n_process_rows * grid->n_process_columns);
1656 lwork =
static_cast<int>(work[0]);
1683 iwork.resize(liwork);
1720 if (compute_eigenvectors)
1724 while (ev.size() >
static_cast<size_type>(m))
1730 grid->send_to_inactive(&m, 1);
1735 if (!grid->mpi_process_is_active)
1740 grid->send_to_inactive(ev.data(), ev.size());
1747 if (compute_eigenvectors)
1760 template <
typename NumberType>
1761 std::vector<NumberType>
1763 const std::pair<unsigned int, unsigned int> &index_limits,
1764 const bool compute_eigenvectors)
1770 const std::pair<unsigned int, unsigned int> idx =
1771 std::make_pair(
std::min(index_limits.first, index_limits.second),
1772 std::max(index_limits.first, index_limits.second));
1775 if (idx.first == 0 && idx.second ==
static_cast<unsigned int>(n_rows - 1))
1776 return eigenpairs_symmetric_MRRR(compute_eigenvectors);
1778 return eigenpairs_symmetric_MRRR(compute_eigenvectors, idx);
1783 template <
typename NumberType>
1784 std::vector<NumberType>
1786 const std::pair<NumberType, NumberType> &value_limits,
1787 const bool compute_eigenvectors)
1792 const std::pair<unsigned int, unsigned int> indices =
1796 return eigenpairs_symmetric_MRRR(compute_eigenvectors, indices, value_limits);
1801 template <
typename NumberType>
1802 std::vector<NumberType>
1804 const bool compute_eigenvectors,
1805 const std::pair<unsigned int, unsigned int> &eigenvalue_idx,
1806 const std::pair<NumberType, NumberType> & eigenvalue_limits)
1810 "Matrix has to be in Matrix state before calling this function."));
1812 ExcMessage(
"Matrix has to be symmetric for this operation."));
1814 std::lock_guard<std::mutex> lock(mutex);
1816 const bool use_values = (std::isnan(eigenvalue_limits.first) ||
1817 std::isnan(eigenvalue_limits.second)) ?
1820 const bool use_indices =
1827 !(use_values && use_indices),
1829 "Prescribing both the index and value range for the eigenvalues is ambiguous"));
1833 std::unique_ptr<ScaLAPACKMatrix<NumberType>>
eigenvectors =
1834 compute_eigenvectors ?
1835 std_cxx14::make_unique<ScaLAPACKMatrix<NumberType>>(n_rows,
1839 grid->n_process_rows, grid->n_process_columns, grid, 1, 1);
1845 std::vector<NumberType> ev(n_rows);
1852 if (grid->mpi_process_is_active)
1859 char jobz = compute_eigenvectors ?
'V' :
'N';
1863 NumberType vl = NumberType(), vu = NumberType();
1878 vl =
std::min(eigenvalue_limits.first, eigenvalue_limits.second);
1879 vu =
std::max(eigenvalue_limits.first, eigenvalue_limits.second);
1887 il =
std::min(eigenvalue_idx.first, eigenvalue_idx.second) + 1;
1888 iu =
std::max(eigenvalue_idx.first, eigenvalue_idx.second) + 1;
1890 NumberType *A_loc = this->values.data();
1898 NumberType *eigenvectors_loc =
1899 (compute_eigenvectors ?
eigenvectors->values.data() :
nullptr);
1930 lwork =
static_cast<int>(work[0]);
1933 iwork.resize(liwork);
1962 if (compute_eigenvectors)
1966 "psyevr failed to compute all eigenvectors for the selected eigenvalues"));
1971 if (compute_eigenvectors)
1975 while (ev.size() >
static_cast<size_type>(m))
1981 grid->send_to_inactive(&m, 1);
1986 if (!grid->mpi_process_is_active)
1991 grid->send_to_inactive(ev.data(), ev.size());
1998 if (compute_eigenvectors)
2011 template <
typename NumberType>
2012 std::vector<NumberType>
2018 "Matrix has to be in Matrix state before calling this function."));
2019 Assert(row_block_size == column_block_size,
2022 const bool left_singluar_vectors = (
U !=
nullptr) ?
true :
false;
2023 const bool right_singluar_vectors = (VT !=
nullptr) ?
true :
false;
2025 if (left_singluar_vectors)
2030 Assert(row_block_size ==
U->row_block_size,
2032 Assert(column_block_size ==
U->column_block_size,
2034 Assert(grid->blacs_context ==
U->grid->blacs_context,
2037 if (right_singluar_vectors)
2047 Assert(grid->blacs_context == VT->
grid->blacs_context,
2049 VT->
grid->blacs_context));
2051 std::lock_guard<std::mutex> lock(mutex);
2053 std::vector<NumberType> sv(
std::min(n_rows, n_columns));
2055 if (grid->mpi_process_is_active)
2057 char jobu = left_singluar_vectors ?
'V' :
'N';
2058 char jobvt = right_singluar_vectors ?
'V' :
'N';
2059 NumberType *A_loc = this->values.data();
2060 NumberType *U_loc = left_singluar_vectors ?
U->values.data() :
nullptr;
2061 NumberType *VT_loc = right_singluar_vectors ? VT->
values.
data() :
nullptr;
2081 &
U->submatrix_column,
2092 lwork =
static_cast<int>(work[0]);
2106 &
U->submatrix_column,
2121 grid->send_to_inactive(sv.data(), sv.size());
2131 template <
typename NumberType>
2137 ExcMessage(
"The matrices A and B need to have the same process grid"));
2140 "Matrix has to be in Matrix state before calling this function."));
2143 "Matrix B has to be in Matrix state before calling this function."));
2156 Assert(row_block_size == column_block_size,
2158 "Use identical block sizes for rows and columns of matrix A"));
2161 "Use identical block sizes for rows and columns of matrix B"));
2164 "Use identical block-cyclic distribution for matrices A and B"));
2166 std::lock_guard<std::mutex> lock(mutex);
2168 if (grid->mpi_process_is_active)
2171 NumberType *A_loc = this->values.data();
2198 lwork =
static_cast<int>(work[0]);
2223 template <
typename NumberType>
2229 "Matrix has to be in Matrix state before calling this function."));
2230 Assert(row_block_size == column_block_size,
2232 "Use identical block sizes for rows and columns of matrix A"));
2234 ratio > 0. && ratio < 1.,
2236 "input parameter ratio has to be larger than zero and smaller than 1"));
2250 std::vector<NumberType> sv = this->compute_SVD(&
U, &VT);
2258 unsigned int n_sv = 1;
2259 std::vector<NumberType> inv_sigma;
2260 inv_sigma.push_back(1 / sv[0]);
2262 for (
unsigned int i = 1; i < sv.size(); ++i)
2263 if (sv[i] > sv[0] * ratio)
2266 inv_sigma.push_back(1 / sv[i]);
2288 std::make_pair(0, 0),
2289 std::make_pair(0, 0),
2290 std::make_pair(n_rows, n_sv));
2292 std::make_pair(0, 0),
2293 std::make_pair(0, 0),
2294 std::make_pair(n_sv, n_columns));
2303 VT_R.
mult(1, U_R, 0, *
this,
true,
true);
2310 template <
typename NumberType>
2313 const NumberType a_norm)
const
2317 "Matrix has to be in Cholesky state before calling this function."));
2318 std::lock_guard<std::mutex> lock(mutex);
2319 NumberType rcond = 0.;
2321 if (grid->mpi_process_is_active)
2323 int liwork = n_local_rows;
2324 iwork.resize(liwork);
2327 const NumberType *A_loc = this->values.data();
2347 lwork =
static_cast<int>(
std::ceil(work[0]));
2366 grid->send_to_inactive(&rcond);
2372 template <
typename NumberType>
2376 const char type(
'O');
2379 return norm_symmetric(type);
2381 return norm_general(type);
2386 template <
typename NumberType>
2390 const char type(
'I');
2393 return norm_symmetric(type);
2395 return norm_general(type);
2400 template <
typename NumberType>
2404 const char type(
'F');
2407 return norm_symmetric(type);
2409 return norm_general(type);
2414 template <
typename NumberType>
2420 ExcMessage(
"norms can be called in matrix state only."));
2421 std::lock_guard<std::mutex> lock(mutex);
2422 NumberType res = 0.;
2424 if (grid->mpi_process_is_active)
2426 const int iarow = indxg2p_(&submatrix_row,
2428 &(grid->this_process_row),
2430 &(grid->n_process_rows));
2431 const int iacol = indxg2p_(&submatrix_column,
2433 &(grid->this_process_column),
2434 &first_process_column,
2435 &(grid->n_process_columns));
2436 const int mp0 = numroc_(&n_rows,
2438 &(grid->this_process_row),
2440 &(grid->n_process_rows));
2441 const int nq0 = numroc_(&n_columns,
2443 &(grid->this_process_column),
2445 &(grid->n_process_columns));
2451 if (type ==
'O' || type ==
'1')
2453 else if (type ==
'I')
2457 const NumberType *A_loc = this->values.begin();
2467 grid->send_to_inactive(&res);
2473 template <
typename NumberType>
2479 ExcMessage(
"norms can be called in matrix state only."));
2481 ExcMessage(
"Matrix has to be symmetric for this operation."));
2482 std::lock_guard<std::mutex> lock(mutex);
2483 NumberType res = 0.;
2485 if (grid->mpi_process_is_active)
2490 ilcm_(&(grid->n_process_rows), &(grid->n_process_columns));
2491 const int v2 = lcm / (grid->n_process_rows);
2493 const int IAROW = indxg2p_(&submatrix_row,
2495 &(grid->this_process_row),
2497 &(grid->n_process_rows));
2498 const int IACOL = indxg2p_(&submatrix_column,
2500 &(grid->this_process_column),
2501 &first_process_column,
2502 &(grid->n_process_columns));
2503 const int Np0 = numroc_(&n_columns ,
2505 &(grid->this_process_row),
2507 &(grid->n_process_rows));
2508 const int Nq0 = numroc_(&n_columns ,
2510 &(grid->this_process_column),
2512 &(grid->n_process_columns));
2514 const int v1 = iceil_(&Np0, &row_block_size);
2515 const int ldw = (n_local_rows == n_local_columns) ?
2517 row_block_size * iceil_(&v1, &v2);
2520 (type ==
'M' || type ==
'F' || type ==
'E') ? 0 : 2 * Nq0 + Np0 + ldw;
2522 const NumberType *A_loc = this->values.begin();
2532 grid->send_to_inactive(&res);
2538 # ifdef DEAL_II_WITH_HDF5
2544 create_HDF5_state_enum_id(hid_t &state_enum_id)
2550 herr_t status = H5Tenum_insert(state_enum_id,
"cholesky", &val);
2553 status = H5Tenum_insert(state_enum_id,
"eigenvalues", &val);
2556 status = H5Tenum_insert(state_enum_id,
"inverse_matrix", &val);
2559 status = H5Tenum_insert(state_enum_id,
"inverse_svd", &val);
2562 status = H5Tenum_insert(state_enum_id,
"lu", &val);
2565 status = H5Tenum_insert(state_enum_id,
"matrix", &val);
2568 status = H5Tenum_insert(state_enum_id,
"svd", &val);
2571 status = H5Tenum_insert(state_enum_id,
"unusable", &val);
2576 create_HDF5_property_enum_id(hid_t &property_enum_id)
2581 herr_t status = H5Tenum_insert(property_enum_id,
"diagonal", &prop);
2584 status = H5Tenum_insert(property_enum_id,
"general", &prop);
2587 status = H5Tenum_insert(property_enum_id,
"hessenberg", &prop);
2590 status = H5Tenum_insert(property_enum_id,
"lower_triangular", &prop);
2593 status = H5Tenum_insert(property_enum_id,
"symmetric", &prop);
2596 status = H5Tenum_insert(property_enum_id,
"upper_triangular", &prop);
2605 template <
typename NumberType>
2608 const std::string & filename,
2609 const std::pair<unsigned int, unsigned int> &
chunk_size)
const
2611 # ifndef DEAL_II_WITH_HDF5
2617 std::pair<unsigned int, unsigned int> chunks_size_ =
chunk_size;
2623 chunks_size_.first = n_rows;
2624 chunks_size_.second = 1;
2626 Assert(chunks_size_.first > 0,
2627 ExcMessage(
"The row chunk size must be larger than 0."));
2629 Assert(chunks_size_.second > 0,
2630 ExcMessage(
"The column chunk size must be larger than 0."));
2633 # ifdef H5_HAVE_PARALLEL
2635 save_parallel(filename, chunks_size_);
2639 save_serial(filename, chunks_size_);
2647 template <
typename NumberType>
2650 const std::string & filename,
2651 const std::pair<unsigned int, unsigned int> &
chunk_size)
const
2653 # ifndef DEAL_II_WITH_HDF5
2668 const auto column_grid =
2669 std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,
2673 const int MB = n_rows, NB = n_columns;
2679 if (tmp.
grid->mpi_process_is_active)
2685 H5Fcreate(filename.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
2688 hsize_t chunk_dims[2];
2693 hid_t data_property = H5Pcreate(H5P_DATASET_CREATE);
2694 status = H5Pset_chunk(data_property, 2, chunk_dims);
2701 dims[0] = n_columns;
2703 hid_t dataspace_id = H5Screate_simple(2, dims,
nullptr);
2707 hid_t dataset_id = H5Dcreate2(file_id,
2717 dataset_id, type_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, tmp.
values.
data());
2722 hid_t state_enum_id, property_enum_id;
2723 internal::create_HDF5_state_enum_id(state_enum_id);
2724 internal::create_HDF5_property_enum_id(property_enum_id);
2727 hsize_t dims_state[1];
2729 hid_t state_enum_dataspace = H5Screate_simple(1, dims_state,
nullptr);
2731 hid_t state_enum_dataset = H5Dcreate2(file_id,
2734 state_enum_dataspace,
2739 status = H5Dwrite(state_enum_dataset,
2748 hsize_t dims_property[1];
2749 dims_property[0] = 1;
2750 hid_t property_enum_dataspace =
2751 H5Screate_simple(1, dims_property,
nullptr);
2753 hid_t property_enum_dataset = H5Dcreate2(file_id,
2756 property_enum_dataspace,
2761 status = H5Dwrite(property_enum_dataset,
2770 status = H5Dclose(dataset_id);
2772 status = H5Dclose(state_enum_dataset);
2774 status = H5Dclose(property_enum_dataset);
2778 status = H5Sclose(dataspace_id);
2780 status = H5Sclose(state_enum_dataspace);
2782 status = H5Sclose(property_enum_dataspace);
2786 status = H5Tclose(state_enum_id);
2788 status = H5Tclose(property_enum_id);
2792 status = H5Pclose(data_property);
2796 status = H5Fclose(file_id);
2804 template <
typename NumberType>
2807 const std::string & filename,
2808 const std::pair<unsigned int, unsigned int> &
chunk_size)
const
2810 # ifndef DEAL_II_WITH_HDF5
2818 MPI_Info info = MPI_INFO_NULL;
2826 const auto column_grid =
2827 std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,
2831 const int MB = n_rows;
2859 hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
2860 status = H5Pset_fapl_mpio(plist_id, tmp.
grid->mpi_communicator, info);
2865 H5Fcreate(filename.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, plist_id);
2866 status = H5Pclose(plist_id);
2875 hid_t filespace = H5Screate_simple(2, dims,
nullptr);
2878 hsize_t chunk_dims[2];
2882 plist_id = H5Pcreate(H5P_DATASET_CREATE);
2883 H5Pset_chunk(plist_id, 2, chunk_dims);
2885 hid_t dset_id = H5Dcreate2(
2886 file_id,
"/matrix", type_id, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
2888 status = H5Sclose(filespace);
2891 status = H5Pclose(plist_id);
2900 proc_n_local_rows.data(),
2903 tmp.
grid->mpi_communicator);
2908 proc_n_local_columns.data(),
2911 tmp.
grid->mpi_communicator);
2914 const unsigned int my_rank(
2923 hid_t memspace = H5Screate_simple(2, count,
nullptr);
2925 hsize_t offset[2] = {0};
2926 for (
unsigned int i = 0; i < my_rank; ++i)
2927 offset[0] += proc_n_local_columns[i];
2930 filespace = H5Dget_space(dset_id);
2931 status = H5Sselect_hyperslab(
2932 filespace, H5S_SELECT_SET, offset,
nullptr, count,
nullptr);
2936 plist_id = H5Pcreate(H5P_DATASET_XFER);
2937 status = H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_INDEPENDENT);
2943 status = H5Dwrite(dset_id, type_id, memspace, filespace, plist_id, data);
2947 status = H5Dclose(dset_id);
2949 status = H5Sclose(filespace);
2951 status = H5Sclose(memspace);
2953 status = H5Pclose(plist_id);
2955 status = H5Fclose(file_id);
2960 ierr = MPI_Barrier(tmp.
grid->mpi_communicator);
2964 if (tmp.
grid->this_mpi_process == 0)
2967 hid_t file_id_reopen =
2968 H5Fopen(filename.c_str(), H5F_ACC_RDWR, H5P_DEFAULT);
2972 hid_t state_enum_id, property_enum_id;
2973 internal::create_HDF5_state_enum_id(state_enum_id);
2974 internal::create_HDF5_property_enum_id(property_enum_id);
2977 hsize_t dims_state[1];
2979 hid_t state_enum_dataspace = H5Screate_simple(1, dims_state,
nullptr);
2981 hid_t state_enum_dataset = H5Dcreate2(file_id_reopen,
2984 state_enum_dataspace,
2989 status = H5Dwrite(state_enum_dataset,
2998 hsize_t dims_property[1];
2999 dims_property[0] = 1;
3000 hid_t property_enum_dataspace =
3001 H5Screate_simple(1, dims_property,
nullptr);
3003 hid_t property_enum_dataset = H5Dcreate2(file_id_reopen,
3006 property_enum_dataspace,
3011 status = H5Dwrite(property_enum_dataset,
3019 status = H5Dclose(state_enum_dataset);
3021 status = H5Dclose(property_enum_dataset);
3023 status = H5Sclose(state_enum_dataspace);
3025 status = H5Sclose(property_enum_dataspace);
3027 status = H5Tclose(state_enum_id);
3029 status = H5Tclose(property_enum_id);
3031 status = H5Fclose(file_id_reopen);
3040 template <
typename NumberType>
3044 # ifndef DEAL_II_WITH_HDF5
3048 # ifdef H5_HAVE_PARALLEL
3050 load_parallel(filename);
3054 load_serial(filename);
3061 template <
typename NumberType>
3065 # ifndef DEAL_II_WITH_HDF5
3076 const auto one_grid =
3077 std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,
3081 const int MB = n_rows, NB = n_columns;
3085 int property_int = -1;
3089 if (tmp.
grid->mpi_process_is_active)
3094 hid_t file_id = H5Fopen(filename.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT);
3097 hid_t dataset_id = H5Dopen2(file_id,
"/matrix", H5P_DEFAULT);
3103 hid_t datatype = H5Dget_type(dataset_id);
3104 H5T_class_t t_class_in = H5Tget_class(datatype);
3107 t_class_in == t_class,
3109 "The data type of the matrix to be read does not match the archive"));
3112 hid_t dataspace_id = H5Dget_space(dataset_id);
3114 const int ndims = H5Sget_simple_extent_ndims(dataspace_id);
3118 H5Sget_simple_extent_dims(dataspace_id, dims,
nullptr);
3120 static_cast<int>(dims[0]) == n_columns,
3122 "The number of columns of the matrix does not match the content of the archive"));
3124 static_cast<int>(dims[1]) == n_rows,
3126 "The number of rows of the matrix does not match the content of the archive"));
3129 status = H5Dread(dataset_id,
3139 hid_t state_enum_id, property_enum_id;
3140 internal::create_HDF5_state_enum_id(state_enum_id);
3141 internal::create_HDF5_property_enum_id(property_enum_id);
3144 hid_t dataset_state_id = H5Dopen2(file_id,
"/state", H5P_DEFAULT);
3145 hid_t datatype_state = H5Dget_type(dataset_state_id);
3146 H5T_class_t t_class_state = H5Tget_class(datatype_state);
3149 hid_t dataset_property_id = H5Dopen2(file_id,
"/property", H5P_DEFAULT);
3150 hid_t datatype_property = H5Dget_type(dataset_property_id);
3151 H5T_class_t t_class_property = H5Tget_class(datatype_property);
3155 hid_t dataspace_state = H5Dget_space(dataset_state_id);
3156 hid_t dataspace_property = H5Dget_space(dataset_property_id);
3158 const int ndims_state = H5Sget_simple_extent_ndims(dataspace_state);
3160 const int ndims_property = H5Sget_simple_extent_ndims(dataspace_property);
3163 hsize_t dims_state[1];
3164 H5Sget_simple_extent_dims(dataspace_state, dims_state,
nullptr);
3166 hsize_t dims_property[1];
3167 H5Sget_simple_extent_dims(dataspace_property, dims_property,
nullptr);
3171 status = H5Dread(dataset_state_id,
3181 state_int =
static_cast<int>(tmp.
state);
3183 status = H5Dread(dataset_property_id,
3193 property_int =
static_cast<int>(tmp.
property);
3196 status = H5Sclose(dataspace_id);
3198 status = H5Sclose(dataspace_state);
3200 status = H5Sclose(dataspace_property);
3204 status = H5Tclose(datatype);
3206 status = H5Tclose(state_enum_id);
3208 status = H5Tclose(property_enum_id);
3212 status = H5Dclose(dataset_state_id);
3214 status = H5Dclose(dataset_id);
3216 status = H5Dclose(dataset_property_id);
3220 status = H5Fclose(file_id);
3224 tmp.
grid->send_to_inactive(&state_int, 1);
3227 tmp.
grid->send_to_inactive(&property_int, 1);
3234 # endif // DEAL_II_WITH_HDF5
3239 template <
typename NumberType>
3243 # ifndef DEAL_II_WITH_HDF5
3247 # ifndef H5_HAVE_PARALLEL
3253 MPI_Info info = MPI_INFO_NULL;
3260 const auto column_grid =
3261 std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,
3265 const int MB = n_rows;
3279 hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
3280 status = H5Pset_fapl_mpio(plist_id, tmp.
grid->mpi_communicator, info);
3285 hid_t file_id = H5Fopen(filename.c_str(), H5F_ACC_RDONLY, plist_id);
3286 status = H5Pclose(plist_id);
3290 hid_t dataset_id = H5Dopen2(file_id,
"/matrix", H5P_DEFAULT);
3297 hid_t datatype_inp = H5Dget_type(dataset_id);
3298 H5T_class_t t_class_inp = H5Tget_class(datatype_inp);
3299 H5T_class_t t_class = H5Tget_class(datatype);
3301 t_class_inp == t_class,
3303 "The data type of the matrix to be read does not match the archive"));
3307 hid_t dataspace_id = H5Dget_space(dataset_id);
3309 const int ndims = H5Sget_simple_extent_ndims(dataspace_id);
3313 status = H5Sget_simple_extent_dims(dataspace_id, dims,
nullptr);
3316 static_cast<int>(dims[0]) == n_columns,
3318 "The number of columns of the matrix does not match the content of the archive"));
3320 static_cast<int>(dims[1]) == n_rows,
3322 "The number of rows of the matrix does not match the content of the archive"));
3330 proc_n_local_rows.data(),
3333 tmp.
grid->mpi_communicator);
3338 proc_n_local_columns.data(),
3341 tmp.
grid->mpi_communicator);
3344 const unsigned int my_rank(
3354 hsize_t offset[2] = {0};
3355 for (
unsigned int i = 0; i < my_rank; ++i)
3356 offset[0] += proc_n_local_columns[i];
3359 status = H5Sselect_hyperslab(
3360 dataspace_id, H5S_SELECT_SET, offset,
nullptr, count,
nullptr);
3364 hid_t memspace = H5Screate_simple(2, count,
nullptr);
3368 H5Dread(dataset_id, datatype, memspace, dataspace_id, H5P_DEFAULT, data);
3372 hid_t state_enum_id, property_enum_id;
3373 internal::create_HDF5_state_enum_id(state_enum_id);
3374 internal::create_HDF5_property_enum_id(property_enum_id);
3377 hid_t dataset_state_id = H5Dopen2(file_id,
"/state", H5P_DEFAULT);
3378 hid_t datatype_state = H5Dget_type(dataset_state_id);
3379 H5T_class_t t_class_state = H5Tget_class(datatype_state);
3382 hid_t dataset_property_id = H5Dopen2(file_id,
"/property", H5P_DEFAULT);
3383 hid_t datatype_property = H5Dget_type(dataset_property_id);
3384 H5T_class_t t_class_property = H5Tget_class(datatype_property);
3388 hid_t dataspace_state = H5Dget_space(dataset_state_id);
3389 hid_t dataspace_property = H5Dget_space(dataset_property_id);
3391 const int ndims_state = H5Sget_simple_extent_ndims(dataspace_state);
3393 const int ndims_property = H5Sget_simple_extent_ndims(dataspace_property);
3396 hsize_t dims_state[1];
3397 H5Sget_simple_extent_dims(dataspace_state, dims_state,
nullptr);
3399 hsize_t dims_property[1];
3400 H5Sget_simple_extent_dims(dataspace_property, dims_property,
nullptr);
3405 dataset_state_id, state_enum_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, &tmp.
state);
3408 status = H5Dread(dataset_property_id,
3417 status = H5Sclose(memspace);
3419 status = H5Dclose(dataset_id);
3421 status = H5Dclose(dataset_state_id);
3423 status = H5Dclose(dataset_property_id);
3425 status = H5Sclose(dataspace_id);
3427 status = H5Sclose(dataspace_state);
3429 status = H5Sclose(dataspace_property);
3433 status = H5Tclose(state_enum_id);
3435 status = H5Tclose(property_enum_id);
3437 status = H5Fclose(file_id);
3443 # endif // H5_HAVE_PARALLEL
3444 # endif // DEAL_II_WITH_HDF5
3453 template <
typename NumberType>
3461 for (
unsigned int i = 0; i <
matrix.local_n(); ++i)
3463 const NumberType s = factors[
matrix.global_column(i)];
3465 for (
unsigned int j = 0; j <
matrix.local_m(); ++j)
3466 matrix.local_el(j, i) *= s;
3470 template <
typename NumberType>
3478 for (
unsigned int i = 0; i <
matrix.local_m(); ++i)
3480 const NumberType s = factors[
matrix.global_row(i)];
3482 for (
unsigned int j = 0; j <
matrix.local_n(); ++j)
3483 matrix.local_el(i, j) *= s;
3492 template <
typename NumberType>
3493 template <
class InputVector>
3497 if (this->grid->mpi_process_is_active)
3503 template <
typename NumberType>
3504 template <
class InputVector>
3508 if (this->grid->mpi_process_is_active)
3515 # include "scalapack.inst"
3520 #endif // DEAL_II_WITH_SCALAPACK