17 #include <deal.II/lac/scalapack.h> 19 #ifdef DEAL_II_WITH_SCALAPACK 21 # include <deal.II/base/array_view.h> 22 # include <deal.II/base/mpi.h> 23 # include <deal.II/base/mpi.templates.h> 24 # include <deal.II/base/std_cxx14/memory.h> 26 # include <deal.II/lac/scalapack.templates.h> 28 # ifdef DEAL_II_WITH_HDF5 32 DEAL_II_NAMESPACE_OPEN
34 # ifdef DEAL_II_WITH_HDF5 36 template <
typename number>
38 hdf5_type_id(
const number *)
46 hdf5_type_id(
const double *)
48 return H5T_NATIVE_DOUBLE;
52 hdf5_type_id(
const float *)
54 return H5T_NATIVE_FLOAT;
58 hdf5_type_id(
const int *)
60 return H5T_NATIVE_INT;
64 hdf5_type_id(
const unsigned int *)
66 return H5T_NATIVE_UINT;
70 hdf5_type_id(
const char *)
72 return H5T_NATIVE_CHAR;
74 # endif // DEAL_II_WITH_HDF5 78 template <
typename NumberType>
82 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
89 , first_process_column(0)
103 template <
typename NumberType>
106 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
119 template <
typename NumberType>
121 const std::string & filename,
122 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
128 , first_process_column(0)
130 , submatrix_column(1)
132 # ifndef DEAL_II_WITH_HDF5 140 "This function is only available when deal.II is configured with HDF5"));
143 const unsigned int this_mpi_process(
149 if (this_mpi_process == 0)
154 hid_t file = H5Fopen(filename.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT);
158 hid_t dataset = H5Dopen2(file,
"/matrix", H5P_DEFAULT);
162 hid_t filespace = H5Dget_space(dataset);
165 int rank = H5Sget_simple_extent_ndims(filespace);
168 status = H5Sget_simple_extent_dims(filespace, dims,
nullptr);
177 status = H5Sclose(filespace);
179 status = H5Dclose(dataset);
181 status = H5Fclose(file);
184 int ierr = MPI_Bcast(&
n_rows,
186 Utilities::MPI::internal::mpi_type_id(&
n_rows),
188 process_grid->mpi_communicator);
193 Utilities::MPI::internal::mpi_type_id(&
n_columns),
195 process_grid->mpi_communicator);
206 load(filename.c_str());
208 # endif // DEAL_II_WITH_HDF5 213 template <
typename NumberType>
218 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
223 Assert(row_block_size_ > 0,
ExcMessage(
"Row block size has to be positive."));
224 Assert(column_block_size_ > 0,
225 ExcMessage(
"Column block size has to be positive."));
227 row_block_size_ <= n_rows_,
229 "Row block size can not be greater than the number of rows of the matrix"));
231 column_block_size_ <= n_columns_,
233 "Column block size can not be greater than the number of columns of the matrix"));
235 state = LAPACKSupport::State::matrix;
236 property = property_;
239 n_columns = n_columns_;
240 row_block_size = row_block_size_;
241 column_block_size = column_block_size_;
243 if (grid->mpi_process_is_active)
246 n_local_rows = numroc_(&n_rows,
248 &(grid->this_process_row),
250 &(grid->n_process_rows));
251 n_local_columns = numroc_(&n_columns,
253 &(grid->this_process_column),
254 &first_process_column,
255 &(grid->n_process_columns));
259 int lda = std::max(1, n_local_rows);
262 descinit_(descriptor,
268 &first_process_column,
269 &(grid->blacs_context),
280 n_local_columns = -1;
281 std::fill(std::begin(descriptor), std::end(descriptor), -1);
287 template <
typename NumberType>
291 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
295 reinit(size, size, process_grid, block_size, block_size, property);
300 template <
typename NumberType>
305 property = property_;
310 template <
typename NumberType>
319 template <
typename NumberType>
328 template <
typename NumberType>
338 Assert(n_columns ==
int(matrix.n()),
341 if (grid->mpi_process_is_active)
343 for (
int i = 0; i < n_local_rows; ++i)
345 const int glob_i = global_row(i);
346 for (
int j = 0; j < n_local_columns; ++j)
348 const int glob_j = global_column(j);
349 local_el(i, j) = matrix(glob_i, glob_j);
359 template <
typename NumberType>
362 const unsigned int rank)
364 if (n_rows * n_columns == 0)
367 const unsigned int this_mpi_process(
372 ExcMessage(
"All processes have to call routine with identical rank"));
374 ExcMessage(
"All processes have to call routine with identical rank"));
378 if (this_mpi_process == rank)
389 MPI_Comm_group(this->grid->mpi_communicator, &group_A);
391 const std::vector<int> ranks(n, rank);
393 MPI_Group_incl(group_A, n, DEAL_II_MPI_CONST_CAST(ranks.data()), &group_B);
394 MPI_Comm communicator_B;
399 int n_proc_rows_B = 1, n_proc_cols_B = 1;
400 int this_process_row_B = -1, this_process_column_B = -1;
401 int blacs_context_B = -1;
402 if (MPI_COMM_NULL != communicator_B)
405 blacs_context_B = Csys2blacs_handle(communicator_B);
406 const char *order =
"Col";
407 Cblacs_gridinit(&blacs_context_B, order, n_proc_rows_B, n_proc_cols_B);
408 Cblacs_gridinfo(blacs_context_B,
412 &this_process_column_B);
418 const bool mpi_process_is_active_B =
419 (this_process_row_B >= 0 && this_process_column_B >= 0);
422 std::vector<int> descriptor_B(9, -1);
423 const int first_process_row_B = 0, first_process_col_B = 0;
425 if (mpi_process_is_active_B)
428 int n_local_rows_B = numroc_(&n_rows,
431 &first_process_row_B,
433 int n_local_cols_B = numroc_(&n_columns,
435 &this_process_column_B,
436 &first_process_col_B,
440 (void)n_local_cols_B;
442 int lda = std::max(1, n_local_rows_B);
444 descinit_(descriptor_B.data(),
449 &first_process_row_B,
450 &first_process_col_B,
456 if (this->grid->mpi_process_is_active)
459 NumberType *loc_vals_A =
460 this->values.size() > 0 ? this->values.data() :
nullptr;
461 const NumberType *loc_vals_B =
462 mpi_process_is_active_B ? &(B(0, 0)) :
nullptr;
476 &(this->grid->blacs_context));
478 if (mpi_process_is_active_B)
479 Cblacs_gridexit(blacs_context_B);
481 MPI_Group_free(&group_A);
482 MPI_Group_free(&group_B);
483 if (MPI_COMM_NULL != communicator_B)
484 MPI_Comm_free(&communicator_B);
491 template <
typename NumberType>
495 Assert(n_local_rows >= 0 && loc_row < static_cast<unsigned int>(n_local_rows),
497 const int i = loc_row + 1;
500 &(grid->this_process_row),
502 &(grid->n_process_rows)) -
508 template <
typename NumberType>
512 Assert(n_local_columns >= 0 &&
513 loc_column < static_cast<unsigned int>(n_local_columns),
515 const int j = loc_column + 1;
518 &(grid->this_process_column),
519 &first_process_column,
520 &(grid->n_process_columns)) -
526 template <
typename NumberType>
529 const unsigned int rank)
const 531 if (n_rows * n_columns == 0)
534 const unsigned int this_mpi_process(
539 ExcMessage(
"All processes have to call routine with identical rank"));
541 ExcMessage(
"All processes have to call routine with identical rank"));
544 if (this_mpi_process == rank)
557 MPI_Comm_group(this->grid->mpi_communicator, &group_A);
559 const std::vector<int> ranks(n, rank);
561 MPI_Group_incl(group_A, n, DEAL_II_MPI_CONST_CAST(ranks.data()), &group_B);
562 MPI_Comm communicator_B;
567 int n_proc_rows_B = 1, n_proc_cols_B = 1;
568 int this_process_row_B = -1, this_process_column_B = -1;
569 int blacs_context_B = -1;
570 if (MPI_COMM_NULL != communicator_B)
573 blacs_context_B = Csys2blacs_handle(communicator_B);
574 const char *order =
"Col";
575 Cblacs_gridinit(&blacs_context_B, order, n_proc_rows_B, n_proc_cols_B);
576 Cblacs_gridinfo(blacs_context_B,
580 &this_process_column_B);
586 const bool mpi_process_is_active_B =
587 (this_process_row_B >= 0 && this_process_column_B >= 0);
590 std::vector<int> descriptor_B(9, -1);
591 const int first_process_row_B = 0, first_process_col_B = 0;
593 if (mpi_process_is_active_B)
596 int n_local_rows_B = numroc_(&n_rows,
599 &first_process_row_B,
601 int n_local_cols_B = numroc_(&n_columns,
603 &this_process_column_B,
604 &first_process_col_B,
608 (void)n_local_cols_B;
610 int lda = std::max(1, n_local_rows_B);
613 descinit_(descriptor_B.data(),
618 &first_process_row_B,
619 &first_process_col_B,
627 if (this->grid->mpi_process_is_active)
630 const NumberType *loc_vals_A =
631 this->values.size() > 0 ? this->values.data() :
nullptr;
632 NumberType *loc_vals_B = mpi_process_is_active_B ? &(B(0, 0)) :
nullptr;
644 &(this->grid->blacs_context));
646 if (mpi_process_is_active_B)
647 Cblacs_gridexit(blacs_context_B);
649 MPI_Group_free(&group_A);
650 MPI_Group_free(&group_B);
651 if (MPI_COMM_NULL != communicator_B)
652 MPI_Comm_free(&communicator_B);
657 template <
typename NumberType>
666 Assert(n_columns ==
int(matrix.n()),
670 if (grid->mpi_process_is_active)
672 for (
int i = 0; i < n_local_rows; ++i)
674 const int glob_i = global_row(i);
675 for (
int j = 0; j < n_local_columns; ++j)
677 const int glob_j = global_column(j);
678 matrix(glob_i, glob_j) = local_el(i, j);
690 for (
unsigned int i = 0; i < matrix.n(); ++i)
691 for (
unsigned int j = i + 1; j < matrix.m(); ++j)
694 for (
unsigned int i = 0; i < matrix.n(); ++i)
695 for (
unsigned int j = 0; j < i; ++j)
702 for (
unsigned int i = 0; i < matrix.n(); ++i)
703 for (
unsigned int j = i + 1; j < matrix.m(); ++j)
704 matrix(i, j) = matrix(j, i);
705 else if (uplo ==
'U')
706 for (
unsigned int i = 0; i < matrix.n(); ++i)
707 for (
unsigned int j = 0; j < i; ++j)
708 matrix(i, j) = matrix(j, i);
714 template <
typename NumberType>
718 const std::pair<unsigned int, unsigned int> &offset_A,
719 const std::pair<unsigned int, unsigned int> &offset_B,
720 const std::pair<unsigned int, unsigned int> &submatrix_size)
const 723 if (submatrix_size.first == 0 || submatrix_size.second == 0)
736 int ierr, comparison;
737 ierr = MPI_Comm_compare(grid->mpi_communicator,
738 B.
grid->mpi_communicator,
741 Assert(comparison == MPI_IDENT,
742 ExcMessage(
"Matrix A and B must have a common MPI Communicator"));
750 int union_blacs_context = Csys2blacs_handle(this->grid->mpi_communicator);
751 const char *order =
"Col";
752 int union_n_process_rows =
754 int union_n_process_columns = 1;
755 Cblacs_gridinit(&union_blacs_context,
757 union_n_process_rows,
758 union_n_process_columns);
760 int n_grid_rows_A, n_grid_columns_A, my_row_A, my_column_A;
761 Cblacs_gridinfo(this->grid->blacs_context,
768 const bool in_context_A =
769 (my_row_A >= 0 && my_row_A < n_grid_rows_A) &&
770 (my_column_A >= 0 && my_column_A < n_grid_columns_A);
772 int n_grid_rows_B, n_grid_columns_B, my_row_B, my_column_B;
773 Cblacs_gridinfo(B.
grid->blacs_context,
780 const bool in_context_B =
781 (my_row_B >= 0 && my_row_B < n_grid_rows_B) &&
782 (my_column_B >= 0 && my_column_B < n_grid_columns_B);
784 const int n_rows_submatrix = submatrix_size.first;
785 const int n_columns_submatrix = submatrix_size.second;
788 int ia = offset_A.first + 1, ja = offset_A.second + 1;
789 int ib = offset_B.first + 1, jb = offset_B.second + 1;
791 std::array<int, 9> desc_A, desc_B;
793 const NumberType *loc_vals_A =
nullptr;
794 NumberType * loc_vals_B =
nullptr;
803 if (this->values.size() != 0)
804 loc_vals_A = this->values.data();
806 for (
unsigned int i = 0; i < desc_A.size(); ++i)
807 desc_A[i] = this->descriptor[i];
817 for (
unsigned int i = 0; i < desc_B.size(); ++i)
823 pgemr2d(&n_rows_submatrix,
824 &n_columns_submatrix,
833 &union_blacs_context);
838 Cblacs_gridexit(union_blacs_context);
843 template <
typename NumberType>
851 if (this->grid->mpi_process_is_active)
853 this->descriptor[0] == 1,
855 "Copying of ScaLAPACK matrices only implemented for dense matrices"));
856 if (dest.
grid->mpi_process_is_active)
860 "Copying of ScaLAPACK matrices only implemented for dense matrices"));
876 MPI_Group group_source, group_dest, group_union;
877 ierr = MPI_Comm_group(this->grid->mpi_communicator, &group_source);
879 ierr = MPI_Comm_group(dest.
grid->mpi_communicator, &group_dest);
881 ierr = MPI_Group_union(group_source, group_dest, &group_union);
883 MPI_Comm mpi_communicator_union;
899 &mpi_communicator_union);
907 int union_blacs_context = Csys2blacs_handle(mpi_communicator_union);
908 const char *order =
"Col";
909 int union_n_process_rows =
911 int union_n_process_columns = 1;
912 Cblacs_gridinit(&union_blacs_context,
914 union_n_process_rows,
915 union_n_process_columns);
917 const NumberType *loc_vals_source =
nullptr;
918 NumberType * loc_vals_dest =
nullptr;
920 if (this->grid->mpi_process_is_active && (this->values.size() > 0))
924 "source: process is active but local matrix empty"));
925 loc_vals_source = this->values.data();
932 "destination: process is active but local matrix empty"));
945 &union_blacs_context);
947 Cblacs_gridexit(union_blacs_context);
949 if (mpi_communicator_union != MPI_COMM_NULL)
951 ierr = MPI_Comm_free(&mpi_communicator_union);
954 ierr = MPI_Group_free(&group_source);
956 ierr = MPI_Group_free(&group_dest);
958 ierr = MPI_Group_free(&group_union);
963 if (this->grid->mpi_process_is_active)
964 dest.
values = this->values;
972 template <
typename NumberType>
982 template <
typename NumberType>
985 const NumberType alpha,
986 const NumberType beta,
987 const bool transpose_B)
1009 ExcMessage(
"The matrices A and B need to have the same process grid"));
1011 if (this->grid->mpi_process_is_active)
1013 char trans_b = transpose_B ?
'T' :
'N';
1015 (this->values.size() > 0) ? this->values.data() :
nullptr;
1016 const NumberType *B_loc =
1038 template <
typename NumberType>
1043 add(B, 1, a,
false);
1048 template <
typename NumberType>
1058 template <
typename NumberType>
1064 const bool transpose_A,
1065 const bool transpose_B)
const 1068 ExcMessage(
"The matrices A and B need to have the same process grid"));
1070 ExcMessage(
"The matrices B and C need to have the same process grid"));
1074 if (!transpose_A && !transpose_B)
1078 Assert(this->n_rows == C.n_rows,
1082 Assert(this->row_block_size == C.row_block_size,
1089 else if (transpose_A && !transpose_B)
1093 Assert(this->n_columns == C.n_rows,
1097 Assert(this->column_block_size == C.row_block_size,
1104 else if (!transpose_A && transpose_B)
1108 Assert(this->n_rows == C.n_rows,
1112 Assert(this->row_block_size == C.row_block_size,
1124 Assert(this->n_columns == C.n_rows,
1128 Assert(this->column_block_size == C.row_block_size,
1136 if (this->grid->mpi_process_is_active)
1138 char trans_a = transpose_A ?
'T' :
'N';
1139 char trans_b = transpose_B ?
'T' :
'N';
1141 const NumberType *A_loc =
1142 (this->values.size() > 0) ? this->values.data() :
nullptr;
1143 const NumberType *B_loc =
1145 NumberType *C_loc = (C.values.size() > 0) ? C.values.data() :
nullptr;
1147 int n = C.n_columns;
1148 int k = transpose_A ? this->n_rows : this->n_columns;
1157 &(this->submatrix_row),
1158 &(this->submatrix_column),
1167 &C.submatrix_column,
1175 template <
typename NumberType>
1179 const bool adding)
const 1182 mult(1., B, 1., C,
false,
false);
1184 mult(1., B, 0, C,
false,
false);
1189 template <
typename NumberType>
1193 const bool adding)
const 1196 mult(1., B, 1., C,
true,
false);
1198 mult(1., B, 0, C,
true,
false);
1203 template <
typename NumberType>
1207 const bool adding)
const 1210 mult(1., B, 1., C,
false,
true);
1212 mult(1., B, 0, C,
false,
true);
1217 template <
typename NumberType>
1221 const bool adding)
const 1224 mult(1., B, 1., C,
true,
true);
1226 mult(1., B, 0, C,
true,
true);
1231 template <
typename NumberType>
1236 n_columns == n_rows && property == LAPACKSupport::Property::symmetric,
1238 "Cholesky factorization can be applied to symmetric matrices only."));
1241 "Matrix has to be in Matrix state before calling this function."));
1243 if (grid->mpi_process_is_active)
1246 NumberType *A_loc = this->values.data();
1264 template <
typename NumberType>
1270 "Matrix has to be in Matrix state before calling this function."));
1272 if (grid->mpi_process_is_active)
1275 NumberType *A_loc = this->values.data();
1277 const int iarow = indxg2p_(&submatrix_row,
1279 &(grid->this_process_row),
1281 &(grid->n_process_rows));
1282 const int mp = numroc_(&n_rows,
1284 &(grid->this_process_row),
1286 &(grid->n_process_rows));
1287 ipiv.resize(mp + row_block_size);
1299 state = LAPACKSupport::State::lu;
1305 template <
typename NumberType>
1313 state == LAPACKSupport::State::cholesky);
1318 (state == LAPACKSupport::State::matrix ||
1319 state == LAPACKSupport::State::inverse_matrix);
1323 if (grid->mpi_process_is_active)
1325 const char uploTriangular =
1327 const char diag =
'N';
1329 NumberType *A_loc = this->values.data();
1330 ptrtri(&uploTriangular,
1348 if (!(state == LAPACKSupport::State::lu ||
1349 state == LAPACKSupport::State::cholesky))
1352 compute_cholesky_factorization();
1354 compute_lu_factorization();
1356 if (grid->mpi_process_is_active)
1359 NumberType *A_loc = this->values.data();
1372 property = LAPACKSupport::Property::symmetric;
1376 int lwork = -1, liwork = -1;
1394 lwork =
static_cast<int>(work[0]);
1397 iwork.resize(liwork);
1416 state = LAPACKSupport::State::inverse_matrix;
1421 template <
typename NumberType>
1422 std::vector<NumberType>
1424 const std::pair<unsigned int, unsigned int> &index_limits,
1425 const bool compute_eigenvectors)
1431 std::pair<unsigned int, unsigned int> idx =
1432 std::make_pair(std::min(index_limits.first, index_limits.second),
1433 std::max(index_limits.first, index_limits.second));
1436 if (idx.first == 0 && idx.second == static_cast<unsigned int>(n_rows - 1))
1437 return eigenpairs_symmetric(compute_eigenvectors);
1439 return eigenpairs_symmetric(compute_eigenvectors, idx);
1444 template <
typename NumberType>
1445 std::vector<NumberType>
1447 const std::pair<NumberType, NumberType> &value_limits,
1448 const bool compute_eigenvectors)
1450 Assert(!std::isnan(value_limits.first),
1452 Assert(!std::isnan(value_limits.second),
1455 std::pair<unsigned int, unsigned int> indices =
1459 return eigenpairs_symmetric(compute_eigenvectors, indices, value_limits);
1464 template <
typename NumberType>
1465 std::vector<NumberType>
1467 const bool compute_eigenvectors,
1468 const std::pair<unsigned int, unsigned int> &eigenvalue_idx,
1469 const std::pair<NumberType, NumberType> & eigenvalue_limits)
1473 "Matrix has to be in Matrix state before calling this function."));
1475 ExcMessage(
"Matrix has to be symmetric for this operation."));
1477 std::lock_guard<std::mutex> lock(mutex);
1479 const bool use_values = (std::isnan(eigenvalue_limits.first) ||
1480 std::isnan(eigenvalue_limits.second)) ?
1483 const bool use_indices =
1490 !(use_values && use_indices),
1492 "Prescribing both the index and value range for the eigenvalues is ambiguous"));
1496 std::unique_ptr<ScaLAPACKMatrix<NumberType>>
eigenvectors =
1497 compute_eigenvectors ?
1498 std_cxx14::make_unique<ScaLAPACKMatrix<NumberType>>(n_rows,
1502 grid->n_process_rows, grid->n_process_columns, grid, 1, 1);
1509 std::vector<NumberType> ev(n_rows);
1511 if (grid->mpi_process_is_active)
1518 char jobz = compute_eigenvectors ?
'V' :
'N';
1521 bool all_eigenpairs =
true;
1522 NumberType vl = NumberType(), vu = NumberType();
1528 NumberType abstol = NumberType();
1535 NumberType orfac = 0;
1537 std::vector<int> ifail;
1544 std::vector<int> iclustr;
1550 std::vector<NumberType> gap(n_local_rows * n_local_columns);
1560 all_eigenpairs =
true;
1565 all_eigenpairs =
false;
1566 vl = std::min(eigenvalue_limits.first, eigenvalue_limits.second);
1567 vu = std::max(eigenvalue_limits.first, eigenvalue_limits.second);
1573 all_eigenpairs =
false;
1576 il = std::min(eigenvalue_idx.first, eigenvalue_idx.second) + 1;
1577 iu = std::max(eigenvalue_idx.first, eigenvalue_idx.second) + 1;
1579 NumberType *A_loc = this->values.data();
1586 NumberType *eigenvectors_loc =
1587 (compute_eigenvectors ?
eigenvectors->values.data() :
nullptr);
1612 char cmach = compute_eigenvectors ?
'U' :
'S';
1613 plamch(&(this->grid->blacs_context), &cmach, abstol);
1615 ifail.resize(n_rows);
1616 iclustr.resize(2 * grid->n_process_rows * grid->n_process_columns);
1617 gap.resize(grid->n_process_rows * grid->n_process_columns);
1650 lwork =
static_cast<int>(work[0]);
1677 iwork.resize(liwork);
1714 if (compute_eigenvectors)
1718 while (ev.size() >
static_cast<size_type>(m))
1724 grid->send_to_inactive(&m, 1);
1729 if (!grid->mpi_process_is_active)
1734 grid->send_to_inactive(ev.data(), ev.size());
1741 if (compute_eigenvectors)
1754 template <
typename NumberType>
1755 std::vector<NumberType>
1757 const std::pair<unsigned int, unsigned int> &index_limits,
1758 const bool compute_eigenvectors)
1764 const std::pair<unsigned int, unsigned int> idx =
1765 std::make_pair(std::min(index_limits.first, index_limits.second),
1766 std::max(index_limits.first, index_limits.second));
1769 if (idx.first == 0 && idx.second == static_cast<unsigned int>(n_rows - 1))
1770 return eigenpairs_symmetric_MRRR(compute_eigenvectors);
1772 return eigenpairs_symmetric_MRRR(compute_eigenvectors, idx);
1777 template <
typename NumberType>
1778 std::vector<NumberType>
1780 const std::pair<NumberType, NumberType> &value_limits,
1781 const bool compute_eigenvectors)
1786 const std::pair<unsigned int, unsigned int> indices =
1790 return eigenpairs_symmetric_MRRR(compute_eigenvectors, indices, value_limits);
1795 template <
typename NumberType>
1796 std::vector<NumberType>
1798 const bool compute_eigenvectors,
1799 const std::pair<unsigned int, unsigned int> &eigenvalue_idx,
1800 const std::pair<NumberType, NumberType> & eigenvalue_limits)
1804 "Matrix has to be in Matrix state before calling this function."));
1806 ExcMessage(
"Matrix has to be symmetric for this operation."));
1808 std::lock_guard<std::mutex> lock(mutex);
1810 const bool use_values = (std::isnan(eigenvalue_limits.first) ||
1811 std::isnan(eigenvalue_limits.second)) ?
1814 const bool use_indices =
1821 !(use_values && use_indices),
1823 "Prescribing both the index and value range for the eigenvalues is ambiguous"));
1827 std::unique_ptr<ScaLAPACKMatrix<NumberType>>
eigenvectors =
1828 compute_eigenvectors ?
1829 std_cxx14::make_unique<ScaLAPACKMatrix<NumberType>>(n_rows,
1833 grid->n_process_rows, grid->n_process_columns, grid, 1, 1);
1839 std::vector<NumberType> ev(n_rows);
1846 if (grid->mpi_process_is_active)
1853 char jobz = compute_eigenvectors ?
'V' :
'N';
1857 NumberType vl = NumberType(), vu = NumberType();
1872 vl = std::min(eigenvalue_limits.first, eigenvalue_limits.second);
1873 vu = std::max(eigenvalue_limits.first, eigenvalue_limits.second);
1881 il = std::min(eigenvalue_idx.first, eigenvalue_idx.second) + 1;
1882 iu = std::max(eigenvalue_idx.first, eigenvalue_idx.second) + 1;
1884 NumberType *A_loc = this->values.data();
1892 NumberType *eigenvectors_loc =
1893 (compute_eigenvectors ?
eigenvectors->values.data() :
nullptr);
1924 lwork =
static_cast<int>(work[0]);
1927 iwork.resize(liwork);
1956 if (compute_eigenvectors)
1960 "psyevr failed to compute all eigenvectors for the selected eigenvalues"));
1965 if (compute_eigenvectors)
1969 while (ev.size() >
static_cast<size_type>(m))
1975 grid->send_to_inactive(&m, 1);
1980 if (!grid->mpi_process_is_active)
1985 grid->send_to_inactive(ev.data(), ev.size());
1992 if (compute_eigenvectors)
2005 template <
typename NumberType>
2006 std::vector<NumberType>
2012 "Matrix has to be in Matrix state before calling this function."));
2013 Assert(row_block_size == column_block_size,
2016 const bool left_singluar_vectors = (U !=
nullptr) ?
true :
false;
2017 const bool right_singluar_vectors = (VT !=
nullptr) ?
true :
false;
2019 if (left_singluar_vectors)
2028 Assert(grid->blacs_context == U->
grid->blacs_context,
2031 if (right_singluar_vectors)
2041 Assert(grid->blacs_context == VT->
grid->blacs_context,
2043 VT->
grid->blacs_context));
2045 std::lock_guard<std::mutex> lock(mutex);
2047 std::vector<NumberType> sv(std::min(n_rows, n_columns));
2049 if (grid->mpi_process_is_active)
2051 char jobu = left_singluar_vectors ?
'V' :
'N';
2052 char jobvt = right_singluar_vectors ?
'V' :
'N';
2053 NumberType *A_loc = this->values.data();
2054 NumberType *U_loc = left_singluar_vectors ? U->
values.
data() :
nullptr;
2055 NumberType *VT_loc = right_singluar_vectors ? VT->
values.
data() :
nullptr;
2086 lwork =
static_cast<int>(work[0]);
2115 grid->send_to_inactive(sv.data(), sv.size());
2118 state = LAPACKSupport::State::unusable;
2125 template <
typename NumberType>
2131 ExcMessage(
"The matrices A and B need to have the same process grid"));
2134 "Matrix has to be in Matrix state before calling this function."));
2137 "Matrix B has to be in Matrix state before calling this function."));
2150 Assert(row_block_size == column_block_size,
2152 "Use identical block sizes for rows and columns of matrix A"));
2155 "Use identical block sizes for rows and columns of matrix B"));
2158 "Use identical block-cyclic distribution for matrices A and B"));
2160 std::lock_guard<std::mutex> lock(mutex);
2162 if (grid->mpi_process_is_active)
2165 NumberType *A_loc = this->values.data();
2192 lwork =
static_cast<int>(work[0]);
2212 state = LAPACKSupport::State::unusable;
2217 template <
typename NumberType>
2223 "Matrix has to be in Matrix state before calling this function."));
2224 Assert(row_block_size == column_block_size,
2226 "Use identical block sizes for rows and columns of matrix A"));
2228 ratio > 0. && ratio < 1.,
2230 "input parameter ratio has to be larger than zero and smaller than 1"));
2244 std::vector<NumberType> sv = this->compute_SVD(&U, &VT);
2245 AssertThrow(sv[0] > std::numeric_limits<NumberType>::min(),
2252 unsigned int n_sv = 1;
2253 std::vector<NumberType> inv_sigma;
2254 inv_sigma.push_back(1 / sv[0]);
2256 for (
unsigned int i = 1; i < sv.size(); ++i)
2257 if (sv[i] > sv[0] * ratio)
2260 inv_sigma.push_back(1 / sv[i]);
2282 std::make_pair(0, 0),
2283 std::make_pair(0, 0),
2284 std::make_pair(n_rows, n_sv));
2286 std::make_pair(0, 0),
2287 std::make_pair(0, 0),
2288 std::make_pair(n_sv, n_columns));
2291 this->reinit(n_columns,
2297 VT_R.
mult(1, U_R, 0, *
this,
true,
true);
2298 state = LAPACKSupport::State::inverse_matrix;
2304 template <
typename NumberType>
2307 const NumberType a_norm)
const 2311 "Matrix has to be in Cholesky state before calling this function."));
2312 std::lock_guard<std::mutex> lock(mutex);
2313 NumberType rcond = 0.;
2315 if (grid->mpi_process_is_active)
2317 int liwork = n_local_rows;
2318 iwork.resize(liwork);
2321 const NumberType *A_loc = this->values.data();
2341 lwork =
static_cast<int>(std::ceil(work[0]));
2360 grid->send_to_inactive(&rcond);
2366 template <
typename NumberType>
2370 const char type(
'O');
2373 return norm_symmetric(type);
2375 return norm_general(type);
2380 template <
typename NumberType>
2384 const char type(
'I');
2387 return norm_symmetric(type);
2389 return norm_general(type);
2394 template <
typename NumberType>
2398 const char type(
'F');
2401 return norm_symmetric(type);
2403 return norm_general(type);
2408 template <
typename NumberType>
2414 ExcMessage(
"norms can be called in matrix state only."));
2415 std::lock_guard<std::mutex> lock(mutex);
2416 NumberType res = 0.;
2418 if (grid->mpi_process_is_active)
2420 const int iarow = indxg2p_(&submatrix_row,
2422 &(grid->this_process_row),
2424 &(grid->n_process_rows));
2425 const int iacol = indxg2p_(&submatrix_column,
2427 &(grid->this_process_column),
2428 &first_process_column,
2429 &(grid->n_process_columns));
2430 const int mp0 = numroc_(&n_rows,
2432 &(grid->this_process_row),
2434 &(grid->n_process_rows));
2435 const int nq0 = numroc_(&n_columns,
2437 &(grid->this_process_column),
2439 &(grid->n_process_columns));
2445 if (type ==
'O' || type ==
'1')
2447 else if (type ==
'I')
2451 const NumberType *A_loc = this->values.begin();
2461 grid->send_to_inactive(&res);
2467 template <
typename NumberType>
2473 ExcMessage(
"norms can be called in matrix state only."));
2475 ExcMessage(
"Matrix has to be symmetric for this operation."));
2476 std::lock_guard<std::mutex> lock(mutex);
2477 NumberType res = 0.;
2479 if (grid->mpi_process_is_active)
2484 ilcm_(&(grid->n_process_rows), &(grid->n_process_columns));
2485 const int v2 = lcm / (grid->n_process_rows);
2487 const int IAROW = indxg2p_(&submatrix_row,
2489 &(grid->this_process_row),
2491 &(grid->n_process_rows));
2492 const int IACOL = indxg2p_(&submatrix_column,
2494 &(grid->this_process_column),
2495 &first_process_column,
2496 &(grid->n_process_columns));
2497 const int Np0 = numroc_(&n_columns ,
2499 &(grid->this_process_row),
2501 &(grid->n_process_rows));
2502 const int Nq0 = numroc_(&n_columns ,
2504 &(grid->this_process_column),
2506 &(grid->n_process_columns));
2508 const int v1 = iceil_(&Np0, &row_block_size);
2509 const int ldw = (n_local_rows == n_local_columns) ?
2511 row_block_size * iceil_(&v1, &v2);
2514 (type ==
'M' || type ==
'F' || type ==
'E') ? 0 : 2 * Nq0 + Np0 + ldw;
2516 const NumberType *A_loc = this->values.begin();
2526 grid->send_to_inactive(&res);
2532 # ifdef DEAL_II_WITH_HDF5 2538 create_HDF5_state_enum_id(hid_t &state_enum_id)
2543 val = LAPACKSupport::State::cholesky;
2544 herr_t status = H5Tenum_insert(state_enum_id,
"cholesky", &val);
2547 status = H5Tenum_insert(state_enum_id,
"eigenvalues", &val);
2549 val = LAPACKSupport::State::inverse_matrix;
2550 status = H5Tenum_insert(state_enum_id,
"inverse_matrix", &val);
2552 val = LAPACKSupport::State::inverse_svd;
2553 status = H5Tenum_insert(state_enum_id,
"inverse_svd", &val);
2555 val = LAPACKSupport::State::lu;
2556 status = H5Tenum_insert(state_enum_id,
"lu", &val);
2558 val = LAPACKSupport::State::matrix;
2559 status = H5Tenum_insert(state_enum_id,
"matrix", &val);
2561 val = LAPACKSupport::State::svd;
2562 status = H5Tenum_insert(state_enum_id,
"svd", &val);
2564 val = LAPACKSupport::State::unusable;
2565 status = H5Tenum_insert(state_enum_id,
"unusable", &val);
2570 create_HDF5_property_enum_id(hid_t &property_enum_id)
2575 herr_t status = H5Tenum_insert(property_enum_id,
"diagonal", &prop);
2578 status = H5Tenum_insert(property_enum_id,
"general", &prop);
2580 prop = LAPACKSupport::Property::hessenberg;
2581 status = H5Tenum_insert(property_enum_id,
"hessenberg", &prop);
2583 prop = LAPACKSupport::Property::lower_triangular;
2584 status = H5Tenum_insert(property_enum_id,
"lower_triangular", &prop);
2586 prop = LAPACKSupport::Property::symmetric;
2587 status = H5Tenum_insert(property_enum_id,
"symmetric", &prop);
2589 prop = LAPACKSupport::Property::upper_triangular;
2590 status = H5Tenum_insert(property_enum_id,
"upper_triangular", &prop);
2599 template <
typename NumberType>
2602 const std::string & filename,
2603 const std::pair<unsigned int, unsigned int> &chunk_size)
const 2605 # ifndef DEAL_II_WITH_HDF5 2611 std::pair<unsigned int, unsigned int> chunks_size_ = chunk_size;
2617 chunks_size_.first = n_rows;
2618 chunks_size_.second = 1;
2620 Assert(chunks_size_.first > 0,
2621 ExcMessage(
"The row chunk size must be larger than 0."));
2623 Assert(chunks_size_.second > 0,
2624 ExcMessage(
"The column chunk size must be larger than 0."));
2627 # ifdef H5_HAVE_PARALLEL 2629 save_parallel(filename, chunks_size_);
2633 save_serial(filename, chunks_size_);
2641 template <
typename NumberType>
2644 const std::string & filename,
2645 const std::pair<unsigned int, unsigned int> &chunk_size)
const 2647 # ifndef DEAL_II_WITH_HDF5 2662 const auto column_grid =
2663 std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,
2667 const int MB = n_rows, NB = n_columns;
2673 if (tmp.grid->mpi_process_is_active)
2679 H5Fcreate(filename.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
2682 hsize_t chunk_dims[2];
2685 chunk_dims[0] = chunk_size.second;
2686 chunk_dims[1] = chunk_size.first;
2687 hid_t data_property = H5Pcreate(H5P_DATASET_CREATE);
2688 status = H5Pset_chunk(data_property, 2, chunk_dims);
2695 dims[0] = n_columns;
2697 hid_t dataspace_id = H5Screate_simple(2, dims,
nullptr);
2700 hid_t type_id = hdf5_type_id(tmp.values.data());
2701 hid_t dataset_id = H5Dcreate2(file_id,
2711 dataset_id, type_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, tmp.values.data());
2716 hid_t state_enum_id, property_enum_id;
2717 internal::create_HDF5_state_enum_id(state_enum_id);
2718 internal::create_HDF5_property_enum_id(property_enum_id);
2721 hsize_t dims_state[1];
2723 hid_t state_enum_dataspace = H5Screate_simple(1, dims_state,
nullptr);
2725 hid_t state_enum_dataset = H5Dcreate2(file_id,
2728 state_enum_dataspace,
2733 status = H5Dwrite(state_enum_dataset,
2742 hsize_t dims_property[1];
2743 dims_property[0] = 1;
2744 hid_t property_enum_dataspace =
2745 H5Screate_simple(1, dims_property,
nullptr);
2747 hid_t property_enum_dataset = H5Dcreate2(file_id,
2750 property_enum_dataspace,
2755 status = H5Dwrite(property_enum_dataset,
2764 status = H5Dclose(dataset_id);
2766 status = H5Dclose(state_enum_dataset);
2768 status = H5Dclose(property_enum_dataset);
2772 status = H5Sclose(dataspace_id);
2774 status = H5Sclose(state_enum_dataspace);
2776 status = H5Sclose(property_enum_dataspace);
2780 status = H5Tclose(state_enum_id);
2782 status = H5Tclose(property_enum_id);
2786 status = H5Pclose(data_property);
2790 status = H5Fclose(file_id);
2798 template <
typename NumberType>
2801 const std::string & filename,
2802 const std::pair<unsigned int, unsigned int> &chunk_size)
const 2804 # ifndef DEAL_II_WITH_HDF5 2812 MPI_Info info = MPI_INFO_NULL;
2820 const auto column_grid =
2821 std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,
2825 const int MB = n_rows;
2838 const int NB = std::max(static_cast<int>(std::ceil(
2839 static_cast<double>(n_columns) / n_mpi_processes)),
2846 NumberType *data = (tmp.values.size() > 0) ? tmp.values.data() :
nullptr;
2853 hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
2854 status = H5Pset_fapl_mpio(plist_id, tmp.grid->mpi_communicator, info);
2859 H5Fcreate(filename.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, plist_id);
2860 status = H5Pclose(plist_id);
2866 dims[0] = tmp.n_columns;
2867 dims[1] = tmp.n_rows;
2869 hid_t filespace = H5Screate_simple(2, dims,
nullptr);
2872 hsize_t chunk_dims[2];
2876 plist_id = H5Pcreate(H5P_DATASET_CREATE);
2877 H5Pset_chunk(plist_id, 2, chunk_dims);
2878 hid_t type_id = hdf5_type_id(data);
2879 hid_t dset_id = H5Dcreate2(
2880 file_id,
"/matrix", type_id, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
2882 status = H5Sclose(filespace);
2885 status = H5Pclose(plist_id);
2889 std::vector<int> proc_n_local_rows(n_mpi_processes),
2890 proc_n_local_columns(n_mpi_processes);
2891 int ierr = MPI_Allgather(&tmp.n_local_rows,
2894 proc_n_local_rows.data(),
2897 tmp.grid->mpi_communicator);
2899 ierr = MPI_Allgather(&tmp.n_local_columns,
2902 proc_n_local_columns.data(),
2905 tmp.grid->mpi_communicator);
2908 const unsigned int my_rank(
2915 count[0] = tmp.n_local_columns;
2916 count[1] = tmp.n_rows;
2917 hid_t memspace = H5Screate_simple(2, count,
nullptr);
2919 hsize_t offset[2] = {0};
2920 for (
unsigned int i = 0; i < my_rank; ++i)
2921 offset[0] += proc_n_local_columns[i];
2924 filespace = H5Dget_space(dset_id);
2925 status = H5Sselect_hyperslab(
2926 filespace, H5S_SELECT_SET, offset,
nullptr, count,
nullptr);
2930 plist_id = H5Pcreate(H5P_DATASET_XFER);
2931 status = H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_INDEPENDENT);
2935 if (tmp.values.size() > 0)
2937 status = H5Dwrite(dset_id, type_id, memspace, filespace, plist_id, data);
2941 status = H5Dclose(dset_id);
2943 status = H5Sclose(filespace);
2945 status = H5Sclose(memspace);
2947 status = H5Pclose(plist_id);
2949 status = H5Fclose(file_id);
2954 ierr = MPI_Barrier(tmp.grid->mpi_communicator);
2958 if (tmp.grid->this_mpi_process == 0)
2961 hid_t file_id_reopen =
2962 H5Fopen(filename.c_str(), H5F_ACC_RDWR, H5P_DEFAULT);
2966 hid_t state_enum_id, property_enum_id;
2967 internal::create_HDF5_state_enum_id(state_enum_id);
2968 internal::create_HDF5_property_enum_id(property_enum_id);
2971 hsize_t dims_state[1];
2973 hid_t state_enum_dataspace = H5Screate_simple(1, dims_state,
nullptr);
2975 hid_t state_enum_dataset = H5Dcreate2(file_id_reopen,
2978 state_enum_dataspace,
2983 status = H5Dwrite(state_enum_dataset,
2992 hsize_t dims_property[1];
2993 dims_property[0] = 1;
2994 hid_t property_enum_dataspace =
2995 H5Screate_simple(1, dims_property,
nullptr);
2997 hid_t property_enum_dataset = H5Dcreate2(file_id_reopen,
3000 property_enum_dataspace,
3005 status = H5Dwrite(property_enum_dataset,
3013 status = H5Dclose(state_enum_dataset);
3015 status = H5Dclose(property_enum_dataset);
3017 status = H5Sclose(state_enum_dataspace);
3019 status = H5Sclose(property_enum_dataspace);
3021 status = H5Tclose(state_enum_id);
3023 status = H5Tclose(property_enum_id);
3025 status = H5Fclose(file_id_reopen);
3034 template <
typename NumberType>
3038 # ifndef DEAL_II_WITH_HDF5 3042 # ifdef H5_HAVE_PARALLEL 3044 load_parallel(filename);
3048 load_serial(filename);
3055 template <
typename NumberType>
3059 # ifndef DEAL_II_WITH_HDF5 3070 const auto one_grid =
3071 std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,
3075 const int MB = n_rows, NB = n_columns;
3079 int property_int = -1;
3083 if (tmp.grid->mpi_process_is_active)
3088 hid_t file_id = H5Fopen(filename.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT);
3091 hid_t dataset_id = H5Dopen2(file_id,
"/matrix", H5P_DEFAULT);
3097 hid_t datatype = H5Dget_type(dataset_id);
3098 H5T_class_t t_class_in = H5Tget_class(datatype);
3099 H5T_class_t t_class = H5Tget_class(hdf5_type_id(tmp.values.data()));
3101 t_class_in == t_class,
3103 "The data type of the matrix to be read does not match the archive"));
3106 hid_t dataspace_id = H5Dget_space(dataset_id);
3108 const int ndims = H5Sget_simple_extent_ndims(dataspace_id);
3112 H5Sget_simple_extent_dims(dataspace_id, dims,
nullptr);
3114 static_cast<int>(dims[0]) == n_columns,
3116 "The number of columns of the matrix does not match the content of the archive"));
3118 static_cast<int>(dims[1]) == n_rows,
3120 "The number of rows of the matrix does not match the content of the archive"));
3123 status = H5Dread(dataset_id,
3124 hdf5_type_id(tmp.values.data()),
3133 hid_t state_enum_id, property_enum_id;
3134 internal::create_HDF5_state_enum_id(state_enum_id);
3135 internal::create_HDF5_property_enum_id(property_enum_id);
3138 hid_t dataset_state_id = H5Dopen2(file_id,
"/state", H5P_DEFAULT);
3139 hid_t datatype_state = H5Dget_type(dataset_state_id);
3140 H5T_class_t t_class_state = H5Tget_class(datatype_state);
3143 hid_t dataset_property_id = H5Dopen2(file_id,
"/property", H5P_DEFAULT);
3144 hid_t datatype_property = H5Dget_type(dataset_property_id);
3145 H5T_class_t t_class_property = H5Tget_class(datatype_property);
3149 hid_t dataspace_state = H5Dget_space(dataset_state_id);
3150 hid_t dataspace_property = H5Dget_space(dataset_property_id);
3152 const int ndims_state = H5Sget_simple_extent_ndims(dataspace_state);
3154 const int ndims_property = H5Sget_simple_extent_ndims(dataspace_property);
3157 hsize_t dims_state[1];
3158 H5Sget_simple_extent_dims(dataspace_state, dims_state,
nullptr);
3160 hsize_t dims_property[1];
3161 H5Sget_simple_extent_dims(dataspace_property, dims_property,
nullptr);
3165 status = H5Dread(dataset_state_id,
3175 state_int =
static_cast<int>(tmp.state);
3177 status = H5Dread(dataset_property_id,
3187 property_int =
static_cast<int>(tmp.property);
3190 status = H5Sclose(dataspace_id);
3192 status = H5Sclose(dataspace_state);
3194 status = H5Sclose(dataspace_property);
3198 status = H5Tclose(datatype);
3200 status = H5Tclose(state_enum_id);
3202 status = H5Tclose(property_enum_id);
3206 status = H5Dclose(dataset_state_id);
3208 status = H5Dclose(dataset_id);
3210 status = H5Dclose(dataset_property_id);
3214 status = H5Fclose(file_id);
3218 tmp.grid->send_to_inactive(&state_int, 1);
3221 tmp.grid->send_to_inactive(&property_int, 1);
3228 # endif // DEAL_II_WITH_HDF5 3233 template <
typename NumberType>
3237 # ifndef DEAL_II_WITH_HDF5 3241 # ifndef H5_HAVE_PARALLEL 3247 MPI_Info info = MPI_INFO_NULL;
3254 const auto column_grid =
3255 std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,
3259 const int MB = n_rows;
3261 const int NB = std::max(static_cast<int>(std::ceil(
3262 static_cast<double>(n_columns) / n_mpi_processes)),
3268 NumberType *data = (tmp.values.size() > 0) ? tmp.values.data() :
nullptr;
3273 hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
3274 status = H5Pset_fapl_mpio(plist_id, tmp.grid->mpi_communicator, info);
3279 hid_t file_id = H5Fopen(filename.c_str(), H5F_ACC_RDONLY, plist_id);
3280 status = H5Pclose(plist_id);
3284 hid_t dataset_id = H5Dopen2(file_id,
"/matrix", H5P_DEFAULT);
3290 hid_t datatype = hdf5_type_id(data);
3291 hid_t datatype_inp = H5Dget_type(dataset_id);
3292 H5T_class_t t_class_inp = H5Tget_class(datatype_inp);
3293 H5T_class_t t_class = H5Tget_class(datatype);
3295 t_class_inp == t_class,
3297 "The data type of the matrix to be read does not match the archive"));
3301 hid_t dataspace_id = H5Dget_space(dataset_id);
3303 const int ndims = H5Sget_simple_extent_ndims(dataspace_id);
3307 status = H5Sget_simple_extent_dims(dataspace_id, dims,
nullptr);
3310 static_cast<int>(dims[0]) == n_columns,
3312 "The number of columns of the matrix does not match the content of the archive"));
3314 static_cast<int>(dims[1]) == n_rows,
3316 "The number of rows of the matrix does not match the content of the archive"));
3319 std::vector<int> proc_n_local_rows(n_mpi_processes),
3320 proc_n_local_columns(n_mpi_processes);
3321 int ierr = MPI_Allgather(&tmp.n_local_rows,
3324 proc_n_local_rows.data(),
3327 tmp.grid->mpi_communicator);
3329 ierr = MPI_Allgather(&tmp.n_local_columns,
3332 proc_n_local_columns.data(),
3335 tmp.grid->mpi_communicator);
3338 const unsigned int my_rank(
3345 count[0] = tmp.n_local_columns;
3346 count[1] = tmp.n_local_rows;
3348 hsize_t offset[2] = {0};
3349 for (
unsigned int i = 0; i < my_rank; ++i)
3350 offset[0] += proc_n_local_columns[i];
3353 status = H5Sselect_hyperslab(
3354 dataspace_id, H5S_SELECT_SET, offset,
nullptr, count,
nullptr);
3358 hid_t memspace = H5Screate_simple(2, count,
nullptr);
3362 H5Dread(dataset_id, datatype, memspace, dataspace_id, H5P_DEFAULT, data);
3366 hid_t state_enum_id, property_enum_id;
3367 internal::create_HDF5_state_enum_id(state_enum_id);
3368 internal::create_HDF5_property_enum_id(property_enum_id);
3371 hid_t dataset_state_id = H5Dopen2(file_id,
"/state", H5P_DEFAULT);
3372 hid_t datatype_state = H5Dget_type(dataset_state_id);
3373 H5T_class_t t_class_state = H5Tget_class(datatype_state);
3376 hid_t dataset_property_id = H5Dopen2(file_id,
"/property", H5P_DEFAULT);
3377 hid_t datatype_property = H5Dget_type(dataset_property_id);
3378 H5T_class_t t_class_property = H5Tget_class(datatype_property);
3382 hid_t dataspace_state = H5Dget_space(dataset_state_id);
3383 hid_t dataspace_property = H5Dget_space(dataset_property_id);
3385 const int ndims_state = H5Sget_simple_extent_ndims(dataspace_state);
3387 const int ndims_property = H5Sget_simple_extent_ndims(dataspace_property);
3390 hsize_t dims_state[1];
3391 H5Sget_simple_extent_dims(dataspace_state, dims_state,
nullptr);
3393 hsize_t dims_property[1];
3394 H5Sget_simple_extent_dims(dataspace_property, dims_property,
nullptr);
3399 dataset_state_id, state_enum_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, &tmp.state);
3402 status = H5Dread(dataset_property_id,
3411 status = H5Sclose(memspace);
3413 status = H5Dclose(dataset_id);
3415 status = H5Dclose(dataset_state_id);
3417 status = H5Dclose(dataset_property_id);
3419 status = H5Sclose(dataspace_id);
3421 status = H5Sclose(dataspace_state);
3423 status = H5Sclose(dataspace_property);
3427 status = H5Tclose(state_enum_id);
3429 status = H5Tclose(property_enum_id);
3431 status = H5Fclose(file_id);
3437 # endif // H5_HAVE_PARALLEL 3438 # endif // DEAL_II_WITH_HDF5 3447 template <
typename NumberType>
3455 for (
unsigned int i = 0; i <
matrix.local_n(); ++i)
3457 const NumberType s = factors[
matrix.global_column(i)];
3459 for (
unsigned int j = 0; j <
matrix.local_m(); ++j)
3460 matrix.local_el(j, i) *= s;
3464 template <
typename NumberType>
3472 for (
unsigned int i = 0; i <
matrix.local_m(); ++i)
3474 const NumberType s = factors[
matrix.global_row(i)];
3476 for (
unsigned int j = 0; j <
matrix.local_n(); ++j)
3477 matrix.local_el(i, j) *= s;
3486 template <
typename NumberType>
3487 template <
class InputVector>
3491 if (this->grid->mpi_process_is_active)
3497 template <
typename NumberType>
3498 template <
class InputVector>
3502 if (this->grid->mpi_process_is_active)
3509 # include "scalapack.inst" 3512 DEAL_II_NAMESPACE_CLOSE
3514 #endif // DEAL_II_WITH_SCALAPACK std::vector< NumberType > eigenpairs_symmetric_by_index_MRRR(const std::pair< unsigned int, unsigned int > &index_limits, const bool compute_eigenvectors)
void copy_transposed(const ScaLAPACKMatrix< NumberType > &B)
ScaLAPACKMatrix(const size_type n_rows, const size_type n_columns, const std::shared_ptr< const Utilities::MPI::ProcessGrid > &process_grid, const size_type row_block_size=32, const size_type column_block_size=32, const LAPACKSupport::Property property=LAPACKSupport::Property::general)
std::vector< NumberType > eigenpairs_symmetric_by_value_MRRR(const std::pair< NumberType, NumberType > &value_limits, const bool compute_eigenvectors)
void mmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
static const unsigned int invalid_unsigned_int
LAPACKSupport::State state
unsigned int global_row(const unsigned int loc_row) const
NumberType l1_norm() const
void Tmmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
Contents is actually a matrix.
ScaLAPACKMatrix< NumberType > & operator=(const FullMatrix< NumberType > &)
static ::ExceptionBase & ExcIO()
std::array< Number, 1 > eigenvalues(const SymmetricTensor< 2, 1, Number > &T)
unsigned int pseudoinverse(const NumberType ratio)
std::array< std::pair< Number, Tensor< 1, dim, Number > >, std::integral_constant< int, dim >::value > eigenvectors(const SymmetricTensor< 2, dim, Number > &T, const SymmetricTensorEigenvectorMethod method=SymmetricTensorEigenvectorMethod::ql_implicit_shifts)
Matrix is upper triangular.
std::vector< NumberType > eigenpairs_symmetric(const bool compute_eigenvectors, const std::pair< unsigned int, unsigned int > &index_limits=std::make_pair(numbers::invalid_unsigned_int, numbers::invalid_unsigned_int), const std::pair< NumberType, NumberType > &value_limits=std::make_pair(std::numeric_limits< NumberType >::quiet_NaN(), std::numeric_limits< NumberType >::quiet_NaN()))
Contents is the inverse of a matrix.
std::vector< NumberType > eigenpairs_symmetric_by_index(const std::pair< unsigned int, unsigned int > &index_limits, const bool compute_eigenvectors)
#define AssertIndexRange(index, range)
void scale_columns(const InputVector &factors)
std::shared_ptr< const Utilities::MPI::ProcessGrid > grid
void load(const std::string &filename)
#define AssertThrow(cond, exc)
static ::ExceptionBase & ExcErrorCode(std::string arg1, types::blas_int arg2)
static ::ExceptionBase & ExcIndexRange(int arg1, int arg2, int arg3)
const int submatrix_column
void reinit(const size_type n_rows, const size_type n_columns, const std::shared_ptr< const Utilities::MPI::ProcessGrid > &process_grid, const size_type row_block_size=32, const size_type column_block_size=32, const LAPACKSupport::Property property=LAPACKSupport::Property::general)
void TmTmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
void least_squares(ScaLAPACKMatrix< NumberType > &B, const bool transpose=false)
void mTmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
static ::ExceptionBase & ExcMessage(std::string arg1)
Contents is a Cholesky decomposition.
void scale_rows(const InputVector &factors)
NumberType norm_symmetric(const char type) const
T sum(const T &t, const MPI_Comm &mpi_communicator)
void set_property(const LAPACKSupport::Property property)
#define Assert(cond, exc)
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
LAPACKSupport::State get_state() const
void reinit(const size_type size1, const size_type size2, const bool omit_default_initialization=false)
unsigned int global_column(const unsigned int loc_column) const
int create_group(const MPI_Comm &comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
void add(const ScaLAPACKMatrix< NumberType > &B, const NumberType a=0., const NumberType b=1., const bool transpose_B=false)
Contents is something useless.
std::vector< NumberType > eigenpairs_symmetric_by_value(const std::pair< NumberType, NumberType > &value_limits, const bool compute_eigenvectors)
NumberType norm_general(const char type) const
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
#define AssertThrowMPI(error_code)
std::vector< NumberType > eigenpairs_symmetric_MRRR(const bool compute_eigenvectors, const std::pair< unsigned int, unsigned int > &index_limits=std::make_pair(numbers::invalid_unsigned_int, numbers::invalid_unsigned_int), const std::pair< NumberType, NumberType > &value_limits=std::make_pair(std::numeric_limits< NumberType >::quiet_NaN(), std::numeric_limits< NumberType >::quiet_NaN()))
T min(const T &t, const MPI_Comm &mpi_communicator)
std::vector< NumberType > compute_SVD(ScaLAPACKMatrix< NumberType > *U=nullptr, ScaLAPACKMatrix< NumberType > *VT=nullptr)
void mult(const NumberType b, const ScaLAPACKMatrix< NumberType > &B, const NumberType c, ScaLAPACKMatrix< NumberType > &C, const bool transpose_A=false, const bool transpose_B=false) const
void copy_to(FullMatrix< NumberType > &matrix) const
void save(const std::string &filename, const std::pair< unsigned int, unsigned int > &chunk_size=std::make_pair(numbers::invalid_unsigned_int, numbers::invalid_unsigned_int)) const
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
static ::ExceptionBase & ExcNotImplemented()
LAPACKSupport::Property property
void Tadd(const NumberType b, const ScaLAPACKMatrix< NumberType > &B)
ArrayView< typename std::remove_reference< typename std::iterator_traits< Iterator >::reference >::type, MemorySpaceType > make_array_view(const Iterator begin, const Iterator end)
NumberType reciprocal_condition_number(const NumberType a_norm) const
NumberType linfty_norm() const
Eigenvalue vector is filled.
AlignedVector< NumberType > values
void compute_cholesky_factorization()
Matrix is lower triangular.
#define AssertIsFinite(number)
T max(const T &t, const MPI_Comm &mpi_communicator)
NumberType frobenius_norm() const
void copy_from(const LAPACKFullMatrix< NumberType > &matrix, const unsigned int rank)
LAPACKSupport::Property get_property() const
static ::ExceptionBase & ExcInternalError()
void compute_lu_factorization()