19#ifdef DEAL_II_WITH_SCALAPACK
23# include <deal.II/base/mpi.templates.h>
25# include <deal.II/lac/scalapack.templates.h>
27# ifdef DEAL_II_WITH_HDF5
36# ifdef DEAL_II_WITH_HDF5
38template <
typename number>
50 return H5T_NATIVE_DOUBLE;
56 return H5T_NATIVE_FLOAT;
62 return H5T_NATIVE_INT;
68 return H5T_NATIVE_UINT;
74 return H5T_NATIVE_CHAR;
80template <
typename NumberType>
84 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
91 , first_process_column(0)
105template <
typename NumberType>
108 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
121template <
typename NumberType>
123 const std::string & filename,
124 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
130 , first_process_column(0)
132 , submatrix_column(1)
134# ifndef DEAL_II_WITH_HDF5
142 "This function is only available when deal.II is configured with HDF5"));
145 const unsigned int this_mpi_process(
151 if (this_mpi_process == 0)
156 hid_t file = H5Fopen(filename.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT);
160 hid_t dataset = H5Dopen2(file,
"/matrix", H5P_DEFAULT);
164 hid_t filespace = H5Dget_space(dataset);
167 int rank = H5Sget_simple_extent_ndims(filespace);
170 status = H5Sget_simple_extent_dims(filespace, dims,
nullptr);
179 status = H5Sclose(filespace);
181 status = H5Dclose(dataset);
183 status = H5Fclose(file);
186 int ierr = MPI_Bcast(&
n_rows,
190 process_grid->mpi_communicator);
197 process_grid->mpi_communicator);
208 load(filename.c_str());
215template <
typename NumberType>
220 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
225 Assert(row_block_size_ > 0,
ExcMessage(
"Row block size has to be positive."));
226 Assert(column_block_size_ > 0,
227 ExcMessage(
"Column block size has to be positive."));
229 row_block_size_ <= n_rows_,
231 "Row block size can not be greater than the number of rows of the matrix"));
233 column_block_size_ <= n_columns_,
235 "Column block size can not be greater than the number of columns of the matrix"));
238 property = property_;
241 n_columns = n_columns_;
242 row_block_size = row_block_size_;
243 column_block_size = column_block_size_;
245 if (grid->mpi_process_is_active)
248 n_local_rows = numroc_(&n_rows,
250 &(grid->this_process_row),
252 &(grid->n_process_rows));
253 n_local_columns = numroc_(&n_columns,
255 &(grid->this_process_column),
256 &first_process_column,
257 &(grid->n_process_columns));
261 int lda =
std::max(1, n_local_rows);
264 descinit_(descriptor,
270 &first_process_column,
271 &(grid->blacs_context),
282 n_local_columns = -1;
283 std::fill(std::begin(descriptor), std::end(descriptor), -1);
289template <
typename NumberType>
293 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
297 reinit(size, size, process_grid, block_size, block_size, property);
302template <
typename NumberType>
307 property = property_;
312template <
typename NumberType>
321template <
typename NumberType>
330template <
typename NumberType>
340 Assert(n_columns ==
int(matrix.n()),
343 if (grid->mpi_process_is_active)
345 for (
int i = 0; i < n_local_rows; ++i)
347 const int glob_i = global_row(i);
348 for (
int j = 0; j < n_local_columns; ++j)
350 const int glob_j = global_column(j);
351 local_el(i, j) = matrix(glob_i, glob_j);
361template <
typename NumberType>
364 const unsigned int rank)
366 if (n_rows * n_columns == 0)
369 const unsigned int this_mpi_process(
374 ExcMessage(
"All processes have to call routine with identical rank"));
376 ExcMessage(
"All processes have to call routine with identical rank"));
380 if (this_mpi_process == rank)
391 MPI_Comm_group(this->grid->mpi_communicator, &group_A);
393 const std::vector<int> ranks(n, rank);
395 MPI_Group_incl(group_A, n, ranks.data(), &group_B);
399 const int ierr = MPI_Comm_create_group(this->grid->mpi_communicator,
404 int n_proc_rows_B = 1, n_proc_cols_B = 1;
405 int this_process_row_B = -1, this_process_column_B = -1;
406 int blacs_context_B = -1;
407 if (MPI_COMM_NULL != communicator_B)
410 blacs_context_B = Csys2blacs_handle(communicator_B);
411 const char *order =
"Col";
412 Cblacs_gridinit(&blacs_context_B, order, n_proc_rows_B, n_proc_cols_B);
413 Cblacs_gridinfo(blacs_context_B,
417 &this_process_column_B);
423 const bool mpi_process_is_active_B =
424 (this_process_row_B >= 0 && this_process_column_B >= 0);
427 std::vector<int> descriptor_B(9, -1);
428 const int first_process_row_B = 0, first_process_col_B = 0;
430 if (mpi_process_is_active_B)
433 int n_local_rows_B = numroc_(&n_rows,
436 &first_process_row_B,
438 int n_local_cols_B = numroc_(&n_columns,
440 &this_process_column_B,
441 &first_process_col_B,
445 (void)n_local_cols_B;
447 int lda =
std::max(1, n_local_rows_B);
449 descinit_(descriptor_B.data(),
454 &first_process_row_B,
455 &first_process_col_B,
461 if (this->grid->mpi_process_is_active)
464 NumberType *loc_vals_A =
465 this->values.size() > 0 ? this->values.data() :
nullptr;
466 const NumberType *loc_vals_B =
467 mpi_process_is_active_B ? &(B(0, 0)) :
nullptr;
481 &(this->grid->blacs_context));
483 if (mpi_process_is_active_B)
484 Cblacs_gridexit(blacs_context_B);
486 MPI_Group_free(&group_A);
487 MPI_Group_free(&group_B);
488 if (MPI_COMM_NULL != communicator_B)
496template <
typename NumberType>
500 Assert(n_local_rows >= 0 && loc_row <
static_cast<unsigned int>(n_local_rows),
502 const int i = loc_row + 1;
505 &(grid->this_process_row),
507 &(grid->n_process_rows)) -
513template <
typename NumberType>
517 Assert(n_local_columns >= 0 &&
518 loc_column <
static_cast<unsigned int>(n_local_columns),
520 const int j = loc_column + 1;
523 &(grid->this_process_column),
524 &first_process_column,
525 &(grid->n_process_columns)) -
531template <
typename NumberType>
534 const unsigned int rank)
const
536 if (n_rows * n_columns == 0)
539 const unsigned int this_mpi_process(
544 ExcMessage(
"All processes have to call routine with identical rank"));
546 ExcMessage(
"All processes have to call routine with identical rank"));
549 if (this_mpi_process == rank)
562 MPI_Comm_group(this->grid->mpi_communicator, &group_A);
564 const std::vector<int> ranks(n, rank);
566 MPI_Group_incl(group_A, n, ranks.data(), &group_B);
570 const int ierr = MPI_Comm_create_group(this->grid->mpi_communicator,
575 int n_proc_rows_B = 1, n_proc_cols_B = 1;
576 int this_process_row_B = -1, this_process_column_B = -1;
577 int blacs_context_B = -1;
578 if (MPI_COMM_NULL != communicator_B)
581 blacs_context_B = Csys2blacs_handle(communicator_B);
582 const char *order =
"Col";
583 Cblacs_gridinit(&blacs_context_B, order, n_proc_rows_B, n_proc_cols_B);
584 Cblacs_gridinfo(blacs_context_B,
588 &this_process_column_B);
594 const bool mpi_process_is_active_B =
595 (this_process_row_B >= 0 && this_process_column_B >= 0);
598 std::vector<int> descriptor_B(9, -1);
599 const int first_process_row_B = 0, first_process_col_B = 0;
601 if (mpi_process_is_active_B)
604 int n_local_rows_B = numroc_(&n_rows,
607 &first_process_row_B,
609 int n_local_cols_B = numroc_(&n_columns,
611 &this_process_column_B,
612 &first_process_col_B,
616 (void)n_local_cols_B;
618 int lda =
std::max(1, n_local_rows_B);
621 descinit_(descriptor_B.data(),
626 &first_process_row_B,
627 &first_process_col_B,
635 if (this->grid->mpi_process_is_active)
638 const NumberType *loc_vals_A =
639 this->values.size() > 0 ? this->values.data() :
nullptr;
640 NumberType *loc_vals_B = mpi_process_is_active_B ? &(B(0, 0)) :
nullptr;
652 &(this->grid->blacs_context));
654 if (mpi_process_is_active_B)
655 Cblacs_gridexit(blacs_context_B);
657 MPI_Group_free(&group_A);
658 MPI_Group_free(&group_B);
659 if (MPI_COMM_NULL != communicator_B)
665template <
typename NumberType>
674 Assert(n_columns ==
int(matrix.n()),
678 if (grid->mpi_process_is_active)
680 for (
int i = 0; i < n_local_rows; ++i)
682 const int glob_i = global_row(i);
683 for (
int j = 0; j < n_local_columns; ++j)
685 const int glob_j = global_column(j);
686 matrix(glob_i, glob_j) = local_el(i, j);
698 for (
unsigned int i = 0; i < matrix.n(); ++i)
699 for (
unsigned int j = i + 1; j < matrix.m(); ++j)
702 for (
unsigned int i = 0; i < matrix.n(); ++i)
703 for (
unsigned int j = 0; j < i; ++j)
710 for (
unsigned int i = 0; i < matrix.n(); ++i)
711 for (
unsigned int j = i + 1; j < matrix.m(); ++j)
712 matrix(i, j) = matrix(j, i);
713 else if (uplo ==
'U')
714 for (
unsigned int i = 0; i < matrix.n(); ++i)
715 for (
unsigned int j = 0; j < i; ++j)
716 matrix(i, j) = matrix(j, i);
722template <
typename NumberType>
726 const std::pair<unsigned int, unsigned int> &offset_A,
727 const std::pair<unsigned int, unsigned int> &offset_B,
728 const std::pair<unsigned int, unsigned int> &submatrix_size)
const
731 if (submatrix_size.first == 0 || submatrix_size.second == 0)
744 int ierr, comparison;
745 ierr = MPI_Comm_compare(grid->mpi_communicator,
746 B.
grid->mpi_communicator,
749 Assert(comparison == MPI_IDENT,
750 ExcMessage(
"Matrix A and B must have a common MPI Communicator"));
758 int union_blacs_context = Csys2blacs_handle(this->grid->mpi_communicator);
759 const char *order =
"Col";
760 int union_n_process_rows =
762 int union_n_process_columns = 1;
763 Cblacs_gridinit(&union_blacs_context,
765 union_n_process_rows,
766 union_n_process_columns);
768 int n_grid_rows_A, n_grid_columns_A, my_row_A, my_column_A;
769 Cblacs_gridinfo(this->grid->blacs_context,
776 const bool in_context_A =
777 (my_row_A >= 0 && my_row_A < n_grid_rows_A) &&
778 (my_column_A >= 0 && my_column_A < n_grid_columns_A);
780 int n_grid_rows_B, n_grid_columns_B, my_row_B, my_column_B;
781 Cblacs_gridinfo(B.
grid->blacs_context,
788 const bool in_context_B =
789 (my_row_B >= 0 && my_row_B < n_grid_rows_B) &&
790 (my_column_B >= 0 && my_column_B < n_grid_columns_B);
792 const int n_rows_submatrix = submatrix_size.first;
793 const int n_columns_submatrix = submatrix_size.second;
796 int ia = offset_A.first + 1, ja = offset_A.second + 1;
797 int ib = offset_B.first + 1, jb = offset_B.second + 1;
799 std::array<int, 9> desc_A, desc_B;
801 const NumberType *loc_vals_A =
nullptr;
802 NumberType * loc_vals_B =
nullptr;
811 if (this->values.size() != 0)
812 loc_vals_A = this->values.data();
814 for (
unsigned int i = 0; i < desc_A.size(); ++i)
815 desc_A[i] = this->descriptor[i];
823 loc_vals_B = B.
values.data();
825 for (
unsigned int i = 0; i < desc_B.size(); ++i)
831 pgemr2d(&n_rows_submatrix,
832 &n_columns_submatrix,
841 &union_blacs_context);
846 Cblacs_gridexit(union_blacs_context);
851template <
typename NumberType>
859 if (this->grid->mpi_process_is_active)
861 this->descriptor[0] == 1,
863 "Copying of ScaLAPACK matrices only implemented for dense matrices"));
864 if (dest.
grid->mpi_process_is_active)
868 "Copying of ScaLAPACK matrices only implemented for dense matrices"));
884 MPI_Group group_source, group_dest, group_union;
885 ierr = MPI_Comm_group(this->grid->mpi_communicator, &group_source);
887 ierr = MPI_Comm_group(dest.
grid->mpi_communicator, &group_dest);
889 ierr = MPI_Group_union(group_source, group_dest, &group_union);
906 ierr = MPI_Comm_create_group(MPI_COMM_WORLD,
909 &mpi_communicator_union);
917 int union_blacs_context = Csys2blacs_handle(mpi_communicator_union);
918 const char *order =
"Col";
919 int union_n_process_rows =
921 int union_n_process_columns = 1;
922 Cblacs_gridinit(&union_blacs_context,
924 union_n_process_rows,
925 union_n_process_columns);
927 const NumberType *loc_vals_source =
nullptr;
928 NumberType * loc_vals_dest =
nullptr;
930 if (this->grid->mpi_process_is_active && (this->values.size() > 0))
934 "source: process is active but local matrix empty"));
935 loc_vals_source = this->values.data();
937 if (dest.
grid->mpi_process_is_active && (dest.
values.size() > 0))
942 "destination: process is active but local matrix empty"));
943 loc_vals_dest = dest.
values.data();
955 &union_blacs_context);
957 Cblacs_gridexit(union_blacs_context);
959 if (mpi_communicator_union != MPI_COMM_NULL)
961 ierr = MPI_Group_free(&group_source);
963 ierr = MPI_Group_free(&group_dest);
965 ierr = MPI_Group_free(&group_union);
970 if (this->grid->mpi_process_is_active)
971 dest.
values = this->values;
979template <
typename NumberType>
989template <
typename NumberType>
992 const NumberType alpha,
993 const NumberType beta,
994 const bool transpose_B)
1016 ExcMessage(
"The matrices A and B need to have the same process grid"));
1018 if (this->grid->mpi_process_is_active)
1020 char trans_b = transpose_B ?
'T' :
'N';
1022 (this->values.size() > 0) ? this->values.data() :
nullptr;
1023 const NumberType *B_loc =
1045template <
typename NumberType>
1050 add(B, 1, a,
false);
1055template <
typename NumberType>
1065template <
typename NumberType>
1071 const bool transpose_A,
1072 const bool transpose_B)
const
1075 ExcMessage(
"The matrices A and B need to have the same process grid"));
1077 ExcMessage(
"The matrices B and C need to have the same process grid"));
1081 if (!transpose_A && !transpose_B)
1085 Assert(this->n_rows == C.n_rows,
1089 Assert(this->row_block_size == C.row_block_size,
1096 else if (transpose_A && !transpose_B)
1100 Assert(this->n_columns == C.n_rows,
1104 Assert(this->column_block_size == C.row_block_size,
1111 else if (!transpose_A && transpose_B)
1115 Assert(this->n_rows == C.n_rows,
1119 Assert(this->row_block_size == C.row_block_size,
1131 Assert(this->n_columns == C.n_rows,
1135 Assert(this->column_block_size == C.row_block_size,
1143 if (this->grid->mpi_process_is_active)
1145 char trans_a = transpose_A ?
'T' :
'N';
1146 char trans_b = transpose_B ?
'T' :
'N';
1148 const NumberType *A_loc =
1149 (this->values.size() > 0) ? this->values.data() :
nullptr;
1150 const NumberType *B_loc =
1152 NumberType *C_loc = (C.values.size() > 0) ? C.values.data() :
nullptr;
1154 int n = C.n_columns;
1155 int k = transpose_A ? this->n_rows : this->n_columns;
1164 &(this->submatrix_row),
1165 &(this->submatrix_column),
1174 &C.submatrix_column,
1182template <
typename NumberType>
1186 const bool adding)
const
1189 mult(1., B, 1., C,
false,
false);
1191 mult(1., B, 0, C,
false,
false);
1196template <
typename NumberType>
1200 const bool adding)
const
1203 mult(1., B, 1., C,
true,
false);
1205 mult(1., B, 0, C,
true,
false);
1210template <
typename NumberType>
1214 const bool adding)
const
1217 mult(1., B, 1., C,
false,
true);
1219 mult(1., B, 0, C,
false,
true);
1224template <
typename NumberType>
1228 const bool adding)
const
1231 mult(1., B, 1., C,
true,
true);
1233 mult(1., B, 0, C,
true,
true);
1238template <
typename NumberType>
1245 "Cholesky factorization can be applied to symmetric matrices only."));
1248 "Matrix has to be in Matrix state before calling this function."));
1250 if (grid->mpi_process_is_active)
1253 NumberType *A_loc = this->values.data();
1271template <
typename NumberType>
1277 "Matrix has to be in Matrix state before calling this function."));
1279 if (grid->mpi_process_is_active)
1282 NumberType *A_loc = this->values.data();
1284 const int iarow = indxg2p_(&submatrix_row,
1286 &(grid->this_process_row),
1288 &(grid->n_process_rows));
1289 const int mp = numroc_(&n_rows,
1291 &(grid->this_process_row),
1293 &(grid->n_process_rows));
1294 ipiv.resize(mp + row_block_size);
1312template <
typename NumberType>
1330 if (grid->mpi_process_is_active)
1332 const char uploTriangular =
1334 const char diag =
'N';
1336 NumberType *A_loc = this->values.data();
1337 ptrtri(&uploTriangular,
1359 compute_cholesky_factorization();
1361 compute_lu_factorization();
1363 if (grid->mpi_process_is_active)
1366 NumberType *A_loc = this->values.data();
1383 int lwork = -1, liwork = -1;
1401 lwork =
static_cast<int>(work[0]);
1404 iwork.resize(liwork);
1428template <
typename NumberType>
1429std::vector<NumberType>
1431 const std::pair<unsigned int, unsigned int> &index_limits,
1432 const bool compute_eigenvectors)
1438 std::pair<unsigned int, unsigned int> idx =
1439 std::make_pair(
std::min(index_limits.first, index_limits.second),
1440 std::max(index_limits.first, index_limits.second));
1443 if (idx.first == 0 && idx.second ==
static_cast<unsigned int>(n_rows - 1))
1444 return eigenpairs_symmetric(compute_eigenvectors);
1446 return eigenpairs_symmetric(compute_eigenvectors, idx);
1451template <
typename NumberType>
1452std::vector<NumberType>
1454 const std::pair<NumberType, NumberType> &value_limits,
1455 const bool compute_eigenvectors)
1457 Assert(!std::isnan(value_limits.first),
1459 Assert(!std::isnan(value_limits.second),
1462 std::pair<unsigned int, unsigned int> indices =
1466 return eigenpairs_symmetric(compute_eigenvectors, indices, value_limits);
1471template <
typename NumberType>
1472std::vector<NumberType>
1474 const bool compute_eigenvectors,
1475 const std::pair<unsigned int, unsigned int> &eigenvalue_idx,
1476 const std::pair<NumberType, NumberType> & eigenvalue_limits)
1480 "Matrix has to be in Matrix state before calling this function."));
1482 ExcMessage(
"Matrix has to be symmetric for this operation."));
1484 std::lock_guard<std::mutex> lock(mutex);
1486 const bool use_values = (std::isnan(eigenvalue_limits.first) ||
1487 std::isnan(eigenvalue_limits.second)) ?
1490 const bool use_indices =
1497 !(use_values && use_indices),
1499 "Prescribing both the index and value range for the eigenvalues is ambiguous"));
1503 std::unique_ptr<ScaLAPACKMatrix<NumberType>>
eigenvectors =
1504 compute_eigenvectors ?
1505 std::make_unique<ScaLAPACKMatrix<NumberType>>(n_rows,
1508 std::make_unique<ScaLAPACKMatrix<NumberType>>(
1509 grid->n_process_rows, grid->n_process_columns, grid, 1, 1);
1516 std::vector<NumberType> ev(n_rows);
1518 if (grid->mpi_process_is_active)
1525 char jobz = compute_eigenvectors ?
'V' :
'N';
1528 bool all_eigenpairs =
true;
1529 NumberType vl = NumberType(), vu = NumberType();
1535 NumberType abstol = NumberType();
1542 NumberType orfac = 0;
1544 std::vector<int> ifail;
1551 std::vector<int> iclustr;
1557 std::vector<NumberType> gap(n_local_rows * n_local_columns);
1567 all_eigenpairs =
true;
1572 all_eigenpairs =
false;
1573 vl =
std::min(eigenvalue_limits.first, eigenvalue_limits.second);
1574 vu =
std::max(eigenvalue_limits.first, eigenvalue_limits.second);
1580 all_eigenpairs =
false;
1583 il =
std::min(eigenvalue_idx.first, eigenvalue_idx.second) + 1;
1584 iu =
std::max(eigenvalue_idx.first, eigenvalue_idx.second) + 1;
1586 NumberType *A_loc = this->values.data();
1593 NumberType *eigenvectors_loc =
1594 (compute_eigenvectors ?
eigenvectors->values.data() :
nullptr);
1629 char cmach = compute_eigenvectors ?
'U' :
'S';
1630 plamch(&(this->grid->blacs_context), &cmach, abstol);
1632 ifail.resize(n_rows);
1633 iclustr.resize(2 * grid->n_process_rows * grid->n_process_columns);
1634 gap.resize(grid->n_process_rows * grid->n_process_columns);
1667 lwork =
static_cast<int>(work[0]);
1694 iwork.resize(liwork);
1731 if (compute_eigenvectors)
1735 while (ev.size() >
static_cast<size_type>(m))
1741 grid->send_to_inactive(&m, 1);
1746 if (!grid->mpi_process_is_active)
1751 grid->send_to_inactive(ev.data(), ev.size());
1758 if (compute_eigenvectors)
1771template <
typename NumberType>
1772std::vector<NumberType>
1774 const std::pair<unsigned int, unsigned int> &index_limits,
1775 const bool compute_eigenvectors)
1781 const std::pair<unsigned int, unsigned int> idx =
1782 std::make_pair(
std::min(index_limits.first, index_limits.second),
1783 std::max(index_limits.first, index_limits.second));
1786 if (idx.first == 0 && idx.second ==
static_cast<unsigned int>(n_rows - 1))
1787 return eigenpairs_symmetric_MRRR(compute_eigenvectors);
1789 return eigenpairs_symmetric_MRRR(compute_eigenvectors, idx);
1794template <
typename NumberType>
1795std::vector<NumberType>
1797 const std::pair<NumberType, NumberType> &value_limits,
1798 const bool compute_eigenvectors)
1803 const std::pair<unsigned int, unsigned int> indices =
1807 return eigenpairs_symmetric_MRRR(compute_eigenvectors, indices, value_limits);
1812template <
typename NumberType>
1813std::vector<NumberType>
1815 const bool compute_eigenvectors,
1816 const std::pair<unsigned int, unsigned int> &eigenvalue_idx,
1817 const std::pair<NumberType, NumberType> & eigenvalue_limits)
1821 "Matrix has to be in Matrix state before calling this function."));
1823 ExcMessage(
"Matrix has to be symmetric for this operation."));
1825 std::lock_guard<std::mutex> lock(mutex);
1827 const bool use_values = (std::isnan(eigenvalue_limits.first) ||
1828 std::isnan(eigenvalue_limits.second)) ?
1831 const bool use_indices =
1838 !(use_values && use_indices),
1840 "Prescribing both the index and value range for the eigenvalues is ambiguous"));
1844 std::unique_ptr<ScaLAPACKMatrix<NumberType>>
eigenvectors =
1845 compute_eigenvectors ?
1846 std::make_unique<ScaLAPACKMatrix<NumberType>>(n_rows,
1849 std::make_unique<ScaLAPACKMatrix<NumberType>>(
1850 grid->n_process_rows, grid->n_process_columns, grid, 1, 1);
1856 std::vector<NumberType> ev(n_rows);
1863 if (grid->mpi_process_is_active)
1870 char jobz = compute_eigenvectors ?
'V' :
'N';
1874 NumberType vl = NumberType(), vu = NumberType();
1889 vl =
std::min(eigenvalue_limits.first, eigenvalue_limits.second);
1890 vu =
std::max(eigenvalue_limits.first, eigenvalue_limits.second);
1898 il =
std::min(eigenvalue_idx.first, eigenvalue_idx.second) + 1;
1899 iu =
std::max(eigenvalue_idx.first, eigenvalue_idx.second) + 1;
1901 NumberType *A_loc = this->values.data();
1909 NumberType *eigenvectors_loc =
1910 (compute_eigenvectors ?
eigenvectors->values.data() :
nullptr);
1941 lwork =
static_cast<int>(work[0]);
1944 iwork.resize(liwork);
1973 if (compute_eigenvectors)
1977 "psyevr failed to compute all eigenvectors for the selected eigenvalues"));
1982 if (compute_eigenvectors)
1986 while (ev.size() >
static_cast<size_type>(m))
1992 grid->send_to_inactive(&m, 1);
1997 if (!grid->mpi_process_is_active)
2002 grid->send_to_inactive(ev.data(), ev.size());
2009 if (compute_eigenvectors)
2022template <
typename NumberType>
2023std::vector<NumberType>
2029 "Matrix has to be in Matrix state before calling this function."));
2030 Assert(row_block_size == column_block_size,
2033 const bool left_singular_vectors = (U !=
nullptr) ?
true :
false;
2034 const bool right_singular_vectors = (VT !=
nullptr) ?
true :
false;
2036 if (left_singular_vectors)
2045 Assert(grid->blacs_context == U->
grid->blacs_context,
2048 if (right_singular_vectors)
2058 Assert(grid->blacs_context == VT->
grid->blacs_context,
2060 VT->
grid->blacs_context));
2062 std::lock_guard<std::mutex> lock(mutex);
2064 std::vector<NumberType> sv(
std::min(n_rows, n_columns));
2066 if (grid->mpi_process_is_active)
2068 char jobu = left_singular_vectors ?
'V' :
'N';
2069 char jobvt = right_singular_vectors ?
'V' :
'N';
2070 NumberType *A_loc = this->values.data();
2071 NumberType *U_loc = left_singular_vectors ? U->
values.data() :
nullptr;
2072 NumberType *VT_loc = right_singular_vectors ? VT->
values.data() :
nullptr;
2103 lwork =
static_cast<int>(work[0]);
2132 grid->send_to_inactive(sv.data(), sv.size());
2142template <
typename NumberType>
2148 ExcMessage(
"The matrices A and B need to have the same process grid"));
2151 "Matrix has to be in Matrix state before calling this function."));
2154 "Matrix B has to be in Matrix state before calling this function."));
2167 Assert(row_block_size == column_block_size,
2169 "Use identical block sizes for rows and columns of matrix A"));
2172 "Use identical block sizes for rows and columns of matrix B"));
2175 "Use identical block-cyclic distribution for matrices A and B"));
2177 std::lock_guard<std::mutex> lock(mutex);
2179 if (grid->mpi_process_is_active)
2182 NumberType *A_loc = this->values.data();
2183 NumberType *B_loc = B.
values.data();
2209 lwork =
static_cast<int>(work[0]);
2234template <
typename NumberType>
2240 "Matrix has to be in Matrix state before calling this function."));
2241 Assert(row_block_size == column_block_size,
2243 "Use identical block sizes for rows and columns of matrix A"));
2245 ratio > 0. && ratio < 1.,
2247 "input parameter ratio has to be larger than zero and smaller than 1"));
2261 std::vector<NumberType> sv = this->compute_SVD(&U, &VT);
2262 AssertThrow(sv[0] > std::numeric_limits<NumberType>::min(),
2269 unsigned int n_sv = 1;
2270 std::vector<NumberType> inv_sigma;
2271 inv_sigma.push_back(1 / sv[0]);
2273 for (
unsigned int i = 1; i < sv.size(); ++i)
2274 if (sv[i] > sv[0] * ratio)
2277 inv_sigma.push_back(1 / sv[i]);
2299 std::make_pair(0, 0),
2300 std::make_pair(0, 0),
2301 std::make_pair(n_rows, n_sv));
2303 std::make_pair(0, 0),
2304 std::make_pair(0, 0),
2305 std::make_pair(n_sv, n_columns));
2308 this->reinit(n_columns,
2314 VT_R.
mult(1, U_R, 0, *
this,
true,
true);
2321template <
typename NumberType>
2324 const NumberType a_norm)
const
2328 "Matrix has to be in Cholesky state before calling this function."));
2329 std::lock_guard<std::mutex> lock(mutex);
2330 NumberType rcond = 0.;
2332 if (grid->mpi_process_is_active)
2334 int liwork = n_local_rows;
2335 iwork.resize(liwork);
2338 const NumberType *A_loc = this->values.data();
2358 lwork =
static_cast<int>(std::ceil(work[0]));
2377 grid->send_to_inactive(&rcond);
2383template <
typename NumberType>
2387 const char type(
'O');
2390 return norm_symmetric(type);
2392 return norm_general(type);
2397template <
typename NumberType>
2401 const char type(
'I');
2404 return norm_symmetric(type);
2406 return norm_general(type);
2411template <
typename NumberType>
2415 const char type(
'F');
2418 return norm_symmetric(type);
2420 return norm_general(type);
2425template <
typename NumberType>
2431 ExcMessage(
"norms can be called in matrix state only."));
2432 std::lock_guard<std::mutex> lock(mutex);
2433 NumberType res = 0.;
2435 if (grid->mpi_process_is_active)
2437 const int iarow = indxg2p_(&submatrix_row,
2439 &(grid->this_process_row),
2441 &(grid->n_process_rows));
2442 const int iacol = indxg2p_(&submatrix_column,
2444 &(grid->this_process_column),
2445 &first_process_column,
2446 &(grid->n_process_columns));
2447 const int mp0 = numroc_(&n_rows,
2449 &(grid->this_process_row),
2451 &(grid->n_process_rows));
2452 const int nq0 = numroc_(&n_columns,
2454 &(grid->this_process_column),
2456 &(grid->n_process_columns));
2462 if (type ==
'O' || type ==
'1')
2464 else if (type ==
'I')
2468 const NumberType *A_loc = this->values.begin();
2478 grid->send_to_inactive(&res);
2484template <
typename NumberType>
2490 ExcMessage(
"norms can be called in matrix state only."));
2492 ExcMessage(
"Matrix has to be symmetric for this operation."));
2493 std::lock_guard<std::mutex> lock(mutex);
2494 NumberType res = 0.;
2496 if (grid->mpi_process_is_active)
2501 ilcm_(&(grid->n_process_rows), &(grid->n_process_columns));
2502 const int v2 = lcm / (grid->n_process_rows);
2504 const int IAROW = indxg2p_(&submatrix_row,
2506 &(grid->this_process_row),
2508 &(grid->n_process_rows));
2509 const int IACOL = indxg2p_(&submatrix_column,
2511 &(grid->this_process_column),
2512 &first_process_column,
2513 &(grid->n_process_columns));
2514 const int Np0 = numroc_(&n_columns ,
2516 &(grid->this_process_row),
2518 &(grid->n_process_rows));
2519 const int Nq0 = numroc_(&n_columns ,
2521 &(grid->this_process_column),
2523 &(grid->n_process_columns));
2525 const int v1 = iceil_(&Np0, &row_block_size);
2526 const int ldw = (n_local_rows == n_local_columns) ?
2528 row_block_size * iceil_(&
v1, &v2);
2531 (type ==
'M' || type ==
'F' || type ==
'E') ? 0 : 2 * Nq0 + Np0 + ldw;
2533 const NumberType *A_loc = this->values.begin();
2543 grid->send_to_inactive(&res);
2549# ifdef DEAL_II_WITH_HDF5
2555 create_HDF5_state_enum_id(hid_t &state_enum_id)
2561 herr_t status = H5Tenum_insert(state_enum_id,
"cholesky", &val);
2564 status = H5Tenum_insert(state_enum_id,
"eigenvalues", &val);
2567 status = H5Tenum_insert(state_enum_id,
"inverse_matrix", &val);
2570 status = H5Tenum_insert(state_enum_id,
"inverse_svd", &val);
2573 status = H5Tenum_insert(state_enum_id,
"lu", &val);
2576 status = H5Tenum_insert(state_enum_id,
"matrix", &val);
2579 status = H5Tenum_insert(state_enum_id,
"svd", &val);
2582 status = H5Tenum_insert(state_enum_id,
"unusable", &val);
2587 create_HDF5_property_enum_id(hid_t &property_enum_id)
2592 herr_t status = H5Tenum_insert(property_enum_id,
"diagonal", &prop);
2595 status = H5Tenum_insert(property_enum_id,
"general", &prop);
2598 status = H5Tenum_insert(property_enum_id,
"hessenberg", &prop);
2601 status = H5Tenum_insert(property_enum_id,
"lower_triangular", &prop);
2604 status = H5Tenum_insert(property_enum_id,
"symmetric", &prop);
2607 status = H5Tenum_insert(property_enum_id,
"upper_triangular", &prop);
2616template <
typename NumberType>
2619 const std::string & filename,
2620 const std::pair<unsigned int, unsigned int> &chunk_size)
const
2622# ifndef DEAL_II_WITH_HDF5
2628 std::pair<unsigned int, unsigned int> chunks_size_ = chunk_size;
2634 chunks_size_.first = n_rows;
2635 chunks_size_.second = 1;
2637 Assert(chunks_size_.first > 0,
2638 ExcMessage(
"The row chunk size must be larger than 0."));
2640 Assert(chunks_size_.second > 0,
2641 ExcMessage(
"The column chunk size must be larger than 0."));
2644# ifdef H5_HAVE_PARALLEL
2646 save_parallel(filename, chunks_size_);
2650 save_serial(filename, chunks_size_);
2658template <
typename NumberType>
2661 const std::string & filename,
2662 const std::pair<unsigned int, unsigned int> &chunk_size)
const
2664# ifndef DEAL_II_WITH_HDF5
2679 const auto column_grid =
2680 std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,
2684 const int MB = n_rows, NB = n_columns;
2690 if (tmp.
grid->mpi_process_is_active)
2696 H5Fcreate(filename.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
2699 hsize_t chunk_dims[2];
2702 chunk_dims[0] = chunk_size.second;
2703 chunk_dims[1] = chunk_size.first;
2704 hid_t data_property = H5Pcreate(H5P_DATASET_CREATE);
2705 status = H5Pset_chunk(data_property, 2, chunk_dims);
2712 dims[0] = n_columns;
2714 hid_t dataspace_id = H5Screate_simple(2, dims,
nullptr);
2718 hid_t dataset_id = H5Dcreate2(file_id,
2728 dataset_id, type_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, tmp.
values.data());
2733 hid_t state_enum_id, property_enum_id;
2734 internal::create_HDF5_state_enum_id(state_enum_id);
2735 internal::create_HDF5_property_enum_id(property_enum_id);
2738 hsize_t dims_state[1];
2740 hid_t state_enum_dataspace = H5Screate_simple(1, dims_state,
nullptr);
2742 hid_t state_enum_dataset = H5Dcreate2(file_id,
2745 state_enum_dataspace,
2750 status = H5Dwrite(state_enum_dataset,
2759 hsize_t dims_property[1];
2760 dims_property[0] = 1;
2761 hid_t property_enum_dataspace =
2762 H5Screate_simple(1, dims_property,
nullptr);
2764 hid_t property_enum_dataset = H5Dcreate2(file_id,
2767 property_enum_dataspace,
2772 status = H5Dwrite(property_enum_dataset,
2781 status = H5Dclose(dataset_id);
2783 status = H5Dclose(state_enum_dataset);
2785 status = H5Dclose(property_enum_dataset);
2789 status = H5Sclose(dataspace_id);
2791 status = H5Sclose(state_enum_dataspace);
2793 status = H5Sclose(property_enum_dataspace);
2797 status = H5Tclose(state_enum_id);
2799 status = H5Tclose(property_enum_id);
2803 status = H5Pclose(data_property);
2807 status = H5Fclose(file_id);
2815template <
typename NumberType>
2818 const std::string & filename,
2819 const std::pair<unsigned int, unsigned int> &chunk_size)
const
2821# ifndef DEAL_II_WITH_HDF5
2827 const unsigned int n_mpi_processes(
2829 MPI_Info info = MPI_INFO_NULL;
2837 const auto column_grid =
2838 std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,
2842 const int MB = n_rows;
2855 const int NB =
std::max(
static_cast<int>(std::ceil(
2856 static_cast<double>(n_columns) / n_mpi_processes)),
2863 NumberType *data = (tmp.
values.size() > 0) ? tmp.
values.data() :
nullptr;
2870 hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
2871 status = H5Pset_fapl_mpio(plist_id, tmp.
grid->mpi_communicator, info);
2876 H5Fcreate(filename.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, plist_id);
2877 status = H5Pclose(plist_id);
2886 hid_t filespace = H5Screate_simple(2, dims,
nullptr);
2889 hsize_t chunk_dims[2];
2891 chunk_dims[0] = chunk_size.second;
2892 chunk_dims[1] = chunk_size.first;
2893 plist_id = H5Pcreate(H5P_DATASET_CREATE);
2894 H5Pset_chunk(plist_id, 2, chunk_dims);
2896 hid_t dset_id = H5Dcreate2(
2897 file_id,
"/matrix", type_id, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
2899 status = H5Sclose(filespace);
2902 status = H5Pclose(plist_id);
2906 std::vector<int> proc_n_local_rows(n_mpi_processes),
2907 proc_n_local_columns(n_mpi_processes);
2911 proc_n_local_rows.data(),
2914 tmp.
grid->mpi_communicator);
2919 proc_n_local_columns.data(),
2922 tmp.
grid->mpi_communicator);
2925 const unsigned int my_rank(
2934 hid_t memspace = H5Screate_simple(2, count,
nullptr);
2936 hsize_t offset[2] = {0};
2937 for (
unsigned int i = 0; i < my_rank; ++i)
2938 offset[0] += proc_n_local_columns[i];
2941 filespace = H5Dget_space(dset_id);
2942 status = H5Sselect_hyperslab(
2943 filespace, H5S_SELECT_SET, offset,
nullptr, count,
nullptr);
2947 plist_id = H5Pcreate(H5P_DATASET_XFER);
2948 status = H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_INDEPENDENT);
2952 if (tmp.
values.size() > 0)
2954 status = H5Dwrite(dset_id, type_id, memspace, filespace, plist_id, data);
2958 status = H5Dclose(dset_id);
2960 status = H5Sclose(filespace);
2962 status = H5Sclose(memspace);
2964 status = H5Pclose(plist_id);
2966 status = H5Fclose(file_id);
2971 ierr = MPI_Barrier(tmp.
grid->mpi_communicator);
2975 if (tmp.
grid->this_mpi_process == 0)
2978 hid_t file_id_reopen =
2979 H5Fopen(filename.c_str(), H5F_ACC_RDWR, H5P_DEFAULT);
2983 hid_t state_enum_id, property_enum_id;
2984 internal::create_HDF5_state_enum_id(state_enum_id);
2985 internal::create_HDF5_property_enum_id(property_enum_id);
2988 hsize_t dims_state[1];
2990 hid_t state_enum_dataspace = H5Screate_simple(1, dims_state,
nullptr);
2992 hid_t state_enum_dataset = H5Dcreate2(file_id_reopen,
2995 state_enum_dataspace,
3000 status = H5Dwrite(state_enum_dataset,
3009 hsize_t dims_property[1];
3010 dims_property[0] = 1;
3011 hid_t property_enum_dataspace =
3012 H5Screate_simple(1, dims_property,
nullptr);
3014 hid_t property_enum_dataset = H5Dcreate2(file_id_reopen,
3017 property_enum_dataspace,
3022 status = H5Dwrite(property_enum_dataset,
3030 status = H5Dclose(state_enum_dataset);
3032 status = H5Dclose(property_enum_dataset);
3034 status = H5Sclose(state_enum_dataspace);
3036 status = H5Sclose(property_enum_dataspace);
3038 status = H5Tclose(state_enum_id);
3040 status = H5Tclose(property_enum_id);
3042 status = H5Fclose(file_id_reopen);
3051template <
typename NumberType>
3055# ifndef DEAL_II_WITH_HDF5
3059# ifdef H5_HAVE_PARALLEL
3061 load_parallel(filename);
3065 load_serial(filename);
3072template <
typename NumberType>
3076# ifndef DEAL_II_WITH_HDF5
3087 const auto one_grid =
3088 std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,
3092 const int MB = n_rows, NB = n_columns;
3096 int property_int = -1;
3100 if (tmp.
grid->mpi_process_is_active)
3105 hid_t file_id = H5Fopen(filename.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT);
3108 hid_t dataset_id = H5Dopen2(file_id,
"/matrix", H5P_DEFAULT);
3114 hid_t datatype = H5Dget_type(dataset_id);
3115 H5T_class_t t_class_in = H5Tget_class(datatype);
3118 t_class_in == t_class,
3120 "The data type of the matrix to be read does not match the archive"));
3123 hid_t dataspace_id = H5Dget_space(dataset_id);
3125 const int ndims = H5Sget_simple_extent_ndims(dataspace_id);
3129 H5Sget_simple_extent_dims(dataspace_id, dims,
nullptr);
3131 static_cast<int>(dims[0]) == n_columns,
3133 "The number of columns of the matrix does not match the content of the archive"));
3135 static_cast<int>(dims[1]) == n_rows,
3137 "The number of rows of the matrix does not match the content of the archive"));
3140 status = H5Dread(dataset_id,
3150 hid_t state_enum_id, property_enum_id;
3151 internal::create_HDF5_state_enum_id(state_enum_id);
3152 internal::create_HDF5_property_enum_id(property_enum_id);
3155 hid_t dataset_state_id = H5Dopen2(file_id,
"/state", H5P_DEFAULT);
3156 hid_t datatype_state = H5Dget_type(dataset_state_id);
3157 H5T_class_t t_class_state = H5Tget_class(datatype_state);
3160 hid_t dataset_property_id = H5Dopen2(file_id,
"/property", H5P_DEFAULT);
3161 hid_t datatype_property = H5Dget_type(dataset_property_id);
3162 H5T_class_t t_class_property = H5Tget_class(datatype_property);
3166 hid_t dataspace_state = H5Dget_space(dataset_state_id);
3167 hid_t dataspace_property = H5Dget_space(dataset_property_id);
3169 const int ndims_state = H5Sget_simple_extent_ndims(dataspace_state);
3171 const int ndims_property = H5Sget_simple_extent_ndims(dataspace_property);
3174 hsize_t dims_state[1];
3175 H5Sget_simple_extent_dims(dataspace_state, dims_state,
nullptr);
3177 hsize_t dims_property[1];
3178 H5Sget_simple_extent_dims(dataspace_property, dims_property,
nullptr);
3182 status = H5Dread(dataset_state_id,
3192 state_int =
static_cast<int>(tmp.
state);
3194 status = H5Dread(dataset_property_id,
3204 property_int =
static_cast<int>(tmp.
property);
3207 status = H5Sclose(dataspace_id);
3209 status = H5Sclose(dataspace_state);
3211 status = H5Sclose(dataspace_property);
3215 status = H5Tclose(datatype);
3217 status = H5Tclose(state_enum_id);
3219 status = H5Tclose(property_enum_id);
3223 status = H5Dclose(dataset_state_id);
3225 status = H5Dclose(dataset_id);
3227 status = H5Dclose(dataset_property_id);
3231 status = H5Fclose(file_id);
3235 tmp.
grid->send_to_inactive(&state_int, 1);
3238 tmp.
grid->send_to_inactive(&property_int, 1);
3250template <
typename NumberType>
3254# ifndef DEAL_II_WITH_HDF5
3258# ifndef H5_HAVE_PARALLEL
3262 const unsigned int n_mpi_processes(
3264 MPI_Info info = MPI_INFO_NULL;
3271 const auto column_grid =
3272 std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,
3276 const int MB = n_rows;
3278 const int NB =
std::max(
static_cast<int>(std::ceil(
3279 static_cast<double>(n_columns) / n_mpi_processes)),
3285 NumberType *data = (tmp.
values.size() > 0) ? tmp.
values.data() :
nullptr;
3290 hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
3291 status = H5Pset_fapl_mpio(plist_id, tmp.
grid->mpi_communicator, info);
3296 hid_t file_id = H5Fopen(filename.c_str(), H5F_ACC_RDONLY, plist_id);
3297 status = H5Pclose(plist_id);
3301 hid_t dataset_id = H5Dopen2(file_id,
"/matrix", H5P_DEFAULT);
3308 hid_t datatype_inp = H5Dget_type(dataset_id);
3309 H5T_class_t t_class_inp = H5Tget_class(datatype_inp);
3310 H5T_class_t t_class = H5Tget_class(datatype);
3312 t_class_inp == t_class,
3314 "The data type of the matrix to be read does not match the archive"));
3318 hid_t dataspace_id = H5Dget_space(dataset_id);
3320 const int ndims = H5Sget_simple_extent_ndims(dataspace_id);
3324 status = H5Sget_simple_extent_dims(dataspace_id, dims,
nullptr);
3327 static_cast<int>(dims[0]) == n_columns,
3329 "The number of columns of the matrix does not match the content of the archive"));
3331 static_cast<int>(dims[1]) == n_rows,
3333 "The number of rows of the matrix does not match the content of the archive"));
3336 std::vector<int> proc_n_local_rows(n_mpi_processes),
3337 proc_n_local_columns(n_mpi_processes);
3341 proc_n_local_rows.data(),
3344 tmp.
grid->mpi_communicator);
3349 proc_n_local_columns.data(),
3352 tmp.
grid->mpi_communicator);
3355 const unsigned int my_rank(
3365 hsize_t offset[2] = {0};
3366 for (
unsigned int i = 0; i < my_rank; ++i)
3367 offset[0] += proc_n_local_columns[i];
3370 status = H5Sselect_hyperslab(
3371 dataspace_id, H5S_SELECT_SET, offset,
nullptr, count,
nullptr);
3375 hid_t memspace = H5Screate_simple(2, count,
nullptr);
3379 H5Dread(dataset_id, datatype, memspace, dataspace_id, H5P_DEFAULT, data);
3383 hid_t state_enum_id, property_enum_id;
3384 internal::create_HDF5_state_enum_id(state_enum_id);
3385 internal::create_HDF5_property_enum_id(property_enum_id);
3388 hid_t dataset_state_id = H5Dopen2(file_id,
"/state", H5P_DEFAULT);
3389 hid_t datatype_state = H5Dget_type(dataset_state_id);
3390 H5T_class_t t_class_state = H5Tget_class(datatype_state);
3393 hid_t dataset_property_id = H5Dopen2(file_id,
"/property", H5P_DEFAULT);
3394 hid_t datatype_property = H5Dget_type(dataset_property_id);
3395 H5T_class_t t_class_property = H5Tget_class(datatype_property);
3399 hid_t dataspace_state = H5Dget_space(dataset_state_id);
3400 hid_t dataspace_property = H5Dget_space(dataset_property_id);
3402 const int ndims_state = H5Sget_simple_extent_ndims(dataspace_state);
3404 const int ndims_property = H5Sget_simple_extent_ndims(dataspace_property);
3407 hsize_t dims_state[1];
3408 H5Sget_simple_extent_dims(dataspace_state, dims_state,
nullptr);
3410 hsize_t dims_property[1];
3411 H5Sget_simple_extent_dims(dataspace_property, dims_property,
nullptr);
3416 dataset_state_id, state_enum_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, &tmp.
state);
3419 status = H5Dread(dataset_property_id,
3428 status = H5Sclose(memspace);
3430 status = H5Dclose(dataset_id);
3432 status = H5Dclose(dataset_state_id);
3434 status = H5Dclose(dataset_property_id);
3436 status = H5Sclose(dataspace_id);
3438 status = H5Sclose(dataspace_state);
3440 status = H5Sclose(dataspace_property);
3444 status = H5Tclose(state_enum_id);
3446 status = H5Tclose(property_enum_id);
3448 status = H5Fclose(file_id);
3464 template <
typename NumberType>
3472 for (
unsigned int i = 0; i < matrix.local_n(); ++i)
3474 const NumberType s = factors[matrix.global_column(i)];
3476 for (
unsigned int j = 0; j < matrix.local_m(); ++j)
3477 matrix.local_el(j, i) *= s;
3481 template <
typename NumberType>
3489 for (
unsigned int i = 0; i <
matrix.local_m(); ++i)
3491 const NumberType s = factors[
matrix.global_row(i)];
3493 for (
unsigned int j = 0; j <
matrix.local_n(); ++j)
3494 matrix.local_el(i, j) *= s;
3503template <
typename NumberType>
3504template <
class InputVector>
3508 if (this->grid->mpi_process_is_active)
3514template <
typename NumberType>
3515template <
class InputVector>
3519 if (this->grid->mpi_process_is_active)
3526# include "scalapack.inst"
ArrayView< typename std::remove_reference< typename std::iterator_traits< Iterator >::reference >::type, MemorySpaceType > make_array_view(const Iterator begin, const Iterator end)
std::vector< NumberType > compute_SVD(ScaLAPACKMatrix< NumberType > *U=nullptr, ScaLAPACKMatrix< NumberType > *VT=nullptr)
std::vector< NumberType > eigenpairs_symmetric_by_value(const std::pair< NumberType, NumberType > &value_limits, const bool compute_eigenvectors)
NumberType frobenius_norm() const
unsigned int pseudoinverse(const NumberType ratio)
std::vector< NumberType > eigenpairs_symmetric_by_value_MRRR(const std::pair< NumberType, NumberType > &value_limits, const bool compute_eigenvectors)
void copy_from(const LAPACKFullMatrix< NumberType > &matrix, const unsigned int rank)
void save_parallel(const std::string &filename, const std::pair< unsigned int, unsigned int > &chunk_size) const
void least_squares(ScaLAPACKMatrix< NumberType > &B, const bool transpose=false)
ScaLAPACKMatrix< NumberType > & operator=(const FullMatrix< NumberType > &)
void Tadd(const NumberType b, const ScaLAPACKMatrix< NumberType > &B)
void mTmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
void add(const ScaLAPACKMatrix< NumberType > &B, const NumberType a=0., const NumberType b=1., const bool transpose_B=false)
LAPACKSupport::State get_state() const
LAPACKSupport::Property get_property() const
void Tmmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
std::vector< NumberType > eigenpairs_symmetric_MRRR(const bool compute_eigenvectors, const std::pair< unsigned int, unsigned int > &index_limits=std::make_pair(numbers::invalid_unsigned_int, numbers::invalid_unsigned_int), const std::pair< NumberType, NumberType > &value_limits=std::make_pair(std::numeric_limits< NumberType >::quiet_NaN(), std::numeric_limits< NumberType >::quiet_NaN()))
void scale_rows(const InputVector &factors)
std::shared_ptr< const Utilities::MPI::ProcessGrid > grid
ScaLAPACKMatrix(const size_type n_rows, const size_type n_columns, const std::shared_ptr< const Utilities::MPI::ProcessGrid > &process_grid, const size_type row_block_size=32, const size_type column_block_size=32, const LAPACKSupport::Property property=LAPACKSupport::Property::general)
void load(const std::string &filename)
const int submatrix_column
void mmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
std::vector< NumberType > eigenpairs_symmetric(const bool compute_eigenvectors, const std::pair< unsigned int, unsigned int > &index_limits=std::make_pair(numbers::invalid_unsigned_int, numbers::invalid_unsigned_int), const std::pair< NumberType, NumberType > &value_limits=std::make_pair(std::numeric_limits< NumberType >::quiet_NaN(), std::numeric_limits< NumberType >::quiet_NaN()))
void save_serial(const std::string &filename, const std::pair< unsigned int, unsigned int > &chunk_size) const
NumberType norm_general(const char type) const
void save(const std::string &filename, const std::pair< unsigned int, unsigned int > &chunk_size=std::make_pair(numbers::invalid_unsigned_int, numbers::invalid_unsigned_int)) const
void load_parallel(const std::string &filename)
NumberType l1_norm() const
void compute_lu_factorization()
NumberType norm_symmetric(const char type) const
void mult(const NumberType b, const ScaLAPACKMatrix< NumberType > &B, const NumberType c, ScaLAPACKMatrix< NumberType > &C, const bool transpose_A=false, const bool transpose_B=false) const
LAPACKSupport::Property property
void set_property(const LAPACKSupport::Property property)
void reinit(const size_type n_rows, const size_type n_columns, const std::shared_ptr< const Utilities::MPI::ProcessGrid > &process_grid, const size_type row_block_size=32, const size_type column_block_size=32, const LAPACKSupport::Property property=LAPACKSupport::Property::general)
void load_serial(const std::string &filename)
std::vector< NumberType > eigenpairs_symmetric_by_index_MRRR(const std::pair< unsigned int, unsigned int > &index_limits, const bool compute_eigenvectors)
NumberType reciprocal_condition_number(const NumberType a_norm) const
LAPACKSupport::State state
void TmTmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
std::vector< NumberType > eigenpairs_symmetric_by_index(const std::pair< unsigned int, unsigned int > &index_limits, const bool compute_eigenvectors)
unsigned int global_column(const unsigned int loc_column) const
void copy_to(FullMatrix< NumberType > &matrix) const
unsigned int global_row(const unsigned int loc_row) const
void compute_cholesky_factorization()
void copy_transposed(const ScaLAPACKMatrix< NumberType > &B)
NumberType linfty_norm() const
void scale_columns(const InputVector &factors)
AlignedVector< T > values
void reinit(const size_type size1, const size_type size2, const bool omit_default_initialization=false)
#define DEAL_II_NAMESPACE_OPEN
#define DEAL_II_NAMESPACE_CLOSE
static ::ExceptionBase & ExcIO()
static ::ExceptionBase & ExcNotImplemented()
static ::ExceptionBase & ExcErrorCode(std::string arg1, types::blas_int arg2)
#define Assert(cond, exc)
#define AssertIsFinite(number)
#define AssertThrowMPI(error_code)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcNeedsHDF5()
static ::ExceptionBase & ExcIndexRange(std::size_t arg1, std::size_t arg2, std::size_t arg3)
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
@ cholesky
Contents is a Cholesky decomposition.
@ lu
Contents is an LU decomposition.
@ matrix
Contents is actually a matrix.
@ unusable
Contents is something useless.
@ inverse_matrix
Contents is the inverse of a matrix.
@ svd
Matrix contains singular value decomposition,.
@ inverse_svd
Matrix is the inverse of a singular value decomposition.
@ eigenvalues
Eigenvalue vector is filled.
@ symmetric
Matrix is symmetric.
@ hessenberg
Matrix is in upper Hessenberg form.
@ diagonal
Matrix is diagonal.
@ upper_triangular
Matrix is upper triangular.
@ lower_triangular
Matrix is lower triangular.
@ general
No special properties.
T sum(const T &t, const MPI_Comm mpi_communicator)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
T max(const T &t, const MPI_Comm mpi_communicator)
T min(const T &t, const MPI_Comm mpi_communicator)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
const MPI_Datatype mpi_type_id_for_type
void free_communicator(MPI_Comm mpi_communicator)
static const unsigned int invalid_unsigned_int
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
hid_t hdf5_type_id(const number *)
std::array< std::pair< Number, Tensor< 1, dim, Number > >, std::integral_constant< int, dim >::value > eigenvectors(const SymmetricTensor< 2, dim, Number > &T, const SymmetricTensorEigenvectorMethod method=SymmetricTensorEigenvectorMethod::ql_implicit_shifts)