17 #include <deal.II/lac/scalapack.h> 19 #ifdef DEAL_II_WITH_SCALAPACK 21 #include <deal.II/base/std_cxx14/memory.h> 23 #include <deal.II/base/mpi.h> 24 #include <deal.II/base/mpi.templates.h> 25 #include <deal.II/base/array_view.h> 26 #include <deal.II/lac/scalapack.templates.h> 28 #ifdef DEAL_II_WITH_HDF5 32 DEAL_II_NAMESPACE_OPEN
34 #ifdef DEAL_II_WITH_HDF5 36 template<
typename number>
37 inline hid_t hdf5_type_id (
const number *)
44 inline hid_t hdf5_type_id (
const double *)
46 return H5T_NATIVE_DOUBLE;
49 inline hid_t hdf5_type_id (
const float *)
51 return H5T_NATIVE_FLOAT;
54 inline hid_t hdf5_type_id (
const int *)
56 return H5T_NATIVE_INT;
59 inline hid_t hdf5_type_id (
const unsigned int *)
61 return H5T_NATIVE_UINT;
64 inline hid_t hdf5_type_id (
const char *)
66 return H5T_NATIVE_CHAR;
68 #endif // DEAL_II_WITH_HDF5 72 template <
typename NumberType>
75 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
82 first_process_column(0),
86 reinit(n_rows_, n_columns_, process_grid, row_block_size_, column_block_size_, property_);
91 template <
typename NumberType>
93 const std::shared_ptr<const Utilities::MPI::ProcessGrid> process_grid,
97 ScaLAPACKMatrix<NumberType>(size, size, process_grid, block_size, block_size, property)
102 template <
typename NumberType>
106 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
111 Assert (row_block_size_ > 0,
112 ExcMessage(
"Row block size has to be positive."));
113 Assert (column_block_size_ > 0,
114 ExcMessage(
"Column block size has to be positive."));
115 Assert (row_block_size_ <= n_rows_,
116 ExcMessage(
"Row block size can not be greater than the number of rows of the matrix"));
117 Assert (column_block_size_ <= n_columns_,
118 ExcMessage(
"Column block size can not be greater than the number of columns of the matrix"));
120 state = LAPACKSupport::State::matrix;
121 property = property_;
124 n_columns = n_columns_;
125 row_block_size = row_block_size_;
126 column_block_size = column_block_size_;
128 if (grid->mpi_process_is_active)
131 n_local_rows = numroc_(&n_rows, &row_block_size, &(grid->this_process_row), &first_process_row, &(grid->n_process_rows));
132 n_local_columns = numroc_(&n_columns, &column_block_size, &(grid->this_process_column), &first_process_column, &(grid->n_process_columns));
135 int lda = std::max(1,n_local_rows);
138 descinit_(descriptor, &n_rows, &n_columns,
139 &row_block_size, &column_block_size,
140 &first_process_row, &first_process_column,
141 &(grid->blacs_context), &lda, &info);
150 n_local_columns = -1;
151 for (
unsigned int i = 0; i < 9; ++i)
158 template <
typename NumberType>
161 const std::shared_ptr<const Utilities::MPI::ProcessGrid> process_grid,
165 reinit(size, size, process_grid, block_size, block_size, property);
170 template <
typename NumberType>
174 property = property_;
179 template <
typename NumberType>
188 template <
typename NumberType>
197 template <
typename NumberType>
209 if (grid->mpi_process_is_active)
211 for (
int i=0; i < n_local_rows; ++i)
213 const int glob_i = global_row(i);
214 for (
int j = 0; j < n_local_columns; ++j)
216 const int glob_j = global_column(j);
217 local_el(i,j) = matrix(glob_i, glob_j);
227 template <
typename NumberType>
231 Assert (n_local_rows >= 0 && loc_row < static_cast<unsigned int>(n_local_rows),
233 const int i = loc_row+1;
234 return indxl2g_ (&i, &row_block_size, &(grid->this_process_row), &first_process_row, &(grid->n_process_rows)) - 1;
239 template <
typename NumberType>
243 Assert (n_local_columns >= 0 && loc_column < static_cast<unsigned int>(n_local_columns),
245 const int j = loc_column+1;
246 return indxl2g_ (&j, &column_block_size, &(grid->this_process_column), &first_process_column, &(grid->n_process_columns)) - 1;
251 template <
typename NumberType>
262 if (grid->mpi_process_is_active)
265 for (
int i=0; i < n_local_rows; ++i)
267 const int glob_i = global_row(i);
268 for (
int j = 0; j < n_local_columns; ++j)
270 const int glob_j = global_column(j);
271 matrix(glob_i, glob_j) = local_el(i,j);
281 for (
unsigned int i = 0; i < matrix.n(); ++i)
282 for (
unsigned int j = i+1; j < matrix.m(); ++j)
285 for (
unsigned int i = 0; i < matrix.n(); ++i)
286 for (
unsigned int j = 0; j < i; ++j)
292 template <
typename NumberType>
295 const std::pair<unsigned int,unsigned int> &offset_A,
296 const std::pair<unsigned int,unsigned int> &offset_B,
297 const std::pair<unsigned int,unsigned int> &submatrix_size)
const 300 if (submatrix_size.first == 0 || submatrix_size.second == 0)
304 Assert (offset_A.first<(
unsigned int)(n_rows-submatrix_size.first+1),
305 ExcIndexRange(offset_A.first,0,n_rows-submatrix_size.first+1));
306 Assert (offset_A.second<(
unsigned int)(n_columns-submatrix_size.second+1),
307 ExcIndexRange(offset_A.second,0,n_columns-submatrix_size.second+1));
310 Assert (offset_B.first<(
unsigned int)(B.
n_rows-submatrix_size.first+1),
312 Assert (offset_B.second<(
unsigned int)(B.
n_columns-submatrix_size.second+1),
316 int ierr, comparison;
317 ierr = MPI_Comm_compare(grid->mpi_communicator,B.
grid->mpi_communicator,&comparison);
319 Assert (comparison == MPI_IDENT,
ExcMessage(
"Matrix A and B must have a common MPI Communicator"));
327 int union_blacs_context = Csys2blacs_handle(this->grid->mpi_communicator);
328 const char *order =
"Col";
330 int union_n_process_columns = 1;
331 Cblacs_gridinit(&union_blacs_context, order, union_n_process_rows, union_n_process_columns);
333 int n_grid_rows_A,n_grid_columns_A,my_row_A,my_column_A;
334 Cblacs_gridinfo(this->grid->blacs_context,&n_grid_rows_A,&n_grid_columns_A,&my_row_A,&my_column_A);
337 const bool in_context_A = (my_row_A>=0 && my_row_A<n_grid_rows_A) &&
338 (my_column_A>=0 && my_column_A<n_grid_columns_A);
341 int n_grid_rows_B,n_grid_columns_B,my_row_B,my_column_B;
342 Cblacs_gridinfo(B.
grid->blacs_context,&n_grid_rows_B,&n_grid_columns_B,&my_row_B,&my_column_B);
345 const bool in_context_B = (my_row_B>=0 && my_row_B<n_grid_rows_B) &&
346 (my_column_B>=0 && my_column_B<n_grid_columns_B);
348 const int n_rows_submatrix = submatrix_size.first;
349 const int n_columns_submatrix = submatrix_size.second;
352 int ia = offset_A.first+1, ja = offset_A.second+1;
353 int ib = offset_B.first+1, jb = offset_B.second+1;
355 std::array<int,9> desc_A, desc_B;
357 const NumberType *loc_vals_A =
nullptr;
358 NumberType *loc_vals_B =
nullptr;
367 if (this->values.size() != 0)
368 loc_vals_A = & this->values[0];
370 for (
unsigned int i=0; i<desc_A.size(); ++i)
371 desc_A[i] = this->descriptor[i];
379 loc_vals_B = & B.
values[0];
381 for (
unsigned int i=0; i<desc_B.size(); ++i)
387 pgemr2d(&n_rows_submatrix, &n_columns_submatrix,
388 loc_vals_A, &ia, &ja, desc_A.data(),
389 loc_vals_B, &ib, &jb, desc_B.data(),
390 &union_blacs_context);
395 Cblacs_gridexit(union_blacs_context);
400 template <
typename NumberType>
407 if (this->grid->mpi_process_is_active)
408 AssertThrow (this->descriptor[0]==1,
ExcMessage(
"Copying of ScaLAPACK matrices only implemented for dense matrices"));
409 if (dest.
grid->mpi_process_is_active)
423 MPI_Group group_source, group_dest, group_union;
424 ierr = MPI_Comm_group(this->grid->mpi_communicator, &group_source);
426 ierr = MPI_Comm_group(dest.
grid->mpi_communicator, &group_dest);
428 ierr = MPI_Group_union(group_source, group_dest, &group_union);
430 MPI_Comm mpi_communicator_union;
450 int union_blacs_context = Csys2blacs_handle(mpi_communicator_union);
451 const char *order =
"Col";
453 int union_n_process_columns = 1;
454 Cblacs_gridinit(&union_blacs_context, order, union_n_process_rows, union_n_process_columns);
456 const NumberType *loc_vals_source =
nullptr;
457 NumberType *loc_vals_dest =
nullptr;
459 if (this->grid->mpi_process_is_active && (this->values.size()>0))
462 loc_vals_source = &this->values[0];
467 loc_vals_dest = &dest.
values[0];
469 pgemr2d(&n_rows, &n_columns, loc_vals_source, &submatrix_row, &submatrix_column, descriptor,
471 &union_blacs_context);
473 Cblacs_gridexit(union_blacs_context);
475 if (mpi_communicator_union != MPI_COMM_NULL)
477 ierr = MPI_Comm_free(&mpi_communicator_union);
480 ierr = MPI_Group_free(&group_source);
482 ierr = MPI_Group_free(&group_dest);
484 ierr = MPI_Group_free(&group_union);
489 if (this->grid->mpi_process_is_active)
490 dest.
values = this->values;
498 template <
typename NumberType>
506 template <
typename NumberType>
508 const NumberType alpha,
509 const NumberType beta,
510 const bool transpose_B)
526 Assert(this->grid==B.
grid,
ExcMessage(
"The matrices A and B need to have the same process grid"));
528 if (this->grid->mpi_process_is_active)
530 char trans_b = transpose_B ?
'T' :
'N';
531 NumberType *A_loc = (this->values.size()>0) ? &this->values[0] :
nullptr;
534 pgeadd(&trans_b,&n_rows,&n_columns,
536 &alpha,A_loc,&submatrix_row,&submatrix_column,descriptor);
543 template <
typename NumberType>
552 template <
typename NumberType>
561 template <
typename NumberType>
566 const bool transpose_A,
567 const bool transpose_B)
const 569 Assert(this->grid==B.
grid,
ExcMessage(
"The matrices A and B need to have the same process grid"));
574 if (!transpose_A && !transpose_B)
583 else if (transpose_A && !transpose_B)
592 else if (!transpose_A && transpose_B)
611 if (this->grid->mpi_process_is_active)
613 char trans_a = transpose_A ?
'T' :
'N';
614 char trans_b = transpose_B ?
'T' :
'N';
616 const NumberType *A_loc = (this->values.size()>0) ? (&(this->values[0])) :
nullptr;
618 NumberType *C_loc = (C.values.size()>0) ? (&(C.values[0])) :
nullptr;
621 int k = transpose_A ? this->n_rows : this->n_columns;
623 pgemm(&trans_a,&trans_b,&m,&n,&k,
624 &b,A_loc,&(this->submatrix_row),&(this->submatrix_column),this->descriptor,
626 &c,C_loc,&C.submatrix_row,&C.submatrix_column,C.descriptor);
633 template <
typename NumberType>
636 const bool adding)
const 639 mult(1.,B,1.,C,
false,
false);
641 mult(1.,B,0,C,
false,
false);
646 template <
typename NumberType>
649 const bool adding)
const 652 mult(1.,B,1.,C,
true,
false);
654 mult(1.,B,0,C,
true,
false);
659 template <
typename NumberType>
662 const bool adding)
const 665 mult(1.,B,1.,C,
false,
true);
667 mult(1.,B,0,C,
false,
true);
672 template <
typename NumberType>
675 const bool adding)
const 678 mult(1.,B,1.,C,
true,
true);
680 mult(1.,B,0,C,
true,
true);
685 template <
typename NumberType>
688 Assert (n_columns==n_rows && property == LAPACKSupport::Property::symmetric,
689 ExcMessage(
"Cholesky factorization can be applied to symmetric matrices only."));
691 ExcMessage(
"Matrix has to be in Matrix state before calling this function."));
693 if (grid->mpi_process_is_active)
696 NumberType *A_loc = &this->values[0];
698 ppotrf(&uplo,&n_columns,A_loc,&submatrix_row,&submatrix_column,descriptor,&info);
707 template <
typename NumberType>
711 ExcMessage(
"Matrix has to be in Matrix state before calling this function."));
713 if (grid->mpi_process_is_active)
716 NumberType *A_loc = &this->values[0];
718 const int iarow = indxg2p_(&submatrix_row, &row_block_size, &(grid->this_process_row), &first_process_row, &(grid->n_process_rows));
719 const int mp = numroc_(&n_rows, &row_block_size, &(grid->this_process_row), &iarow, &(grid->n_process_rows));
720 ipiv.resize(mp+row_block_size);
722 pgetrf(&n_rows,&n_columns,A_loc,&submatrix_row,&submatrix_column,descriptor,ipiv.data(),&info);
725 state = LAPACKSupport::State::lu;
731 template <
typename NumberType>
741 if ( !(state==LAPACKSupport::State::lu || state==LAPACKSupport::State::cholesky) )
744 compute_cholesky_factorization();
746 compute_lu_factorization();
748 if (grid->mpi_process_is_active)
751 NumberType *A_loc = &this->values[0];
755 ppotri(&uplo, &n_columns, A_loc, &submatrix_row, &submatrix_column, descriptor, &info);
760 int lwork=-1, liwork=-1;
764 pgetri(&n_columns, A_loc, &submatrix_row, &submatrix_column, descriptor,
765 ipiv.data(), work.data(), &lwork, iwork.data(), &liwork, &info);
771 iwork.resize(liwork);
773 pgetri(&n_columns, A_loc, &submatrix_row, &submatrix_column, descriptor,
774 ipiv.data(), work.data(), &lwork, iwork.data(), &liwork, &info);
779 state = LAPACKSupport::State::inverse_matrix;
784 template <
typename NumberType>
786 const bool compute_eigenvectors)
789 Assert (index_limits.first < (
unsigned int)n_rows,
ExcIndexRange(index_limits.first,0,n_rows));
790 Assert (index_limits.second < (
unsigned int)n_rows,
ExcIndexRange(index_limits.second,0,n_rows));
792 std::pair<unsigned int,unsigned int> idx = std::make_pair(std::min(index_limits.first,index_limits.second),
793 std::max(index_limits.first,index_limits.second));
796 if (idx.first==0 && idx.second==(
unsigned int)n_rows-1)
797 return eigenpairs_symmetric(compute_eigenvectors);
799 return eigenpairs_symmetric(compute_eigenvectors,idx);
804 template <
typename NumberType>
806 const bool compute_eigenvectors)
808 Assert (!std::isnan(value_limits.first),
ExcMessage(
"value_limits.first is NaN"));
809 Assert (!std::isnan(value_limits.second),
ExcMessage(
"value_limits.second is NaN"));
813 return eigenpairs_symmetric(compute_eigenvectors,indices,value_limits);
818 template <
typename NumberType>
819 std::vector<NumberType>
821 const std::pair<unsigned int, unsigned int> &eigenvalue_idx,
822 const std::pair<NumberType,NumberType> &eigenvalue_limits)
825 ExcMessage(
"Matrix has to be in Matrix state before calling this function."));
827 ExcMessage(
"Matrix has to be symmetric for this operation."));
831 const bool use_values = (std::isnan(eigenvalue_limits.first) || std::isnan(eigenvalue_limits.second)) ?
false :
true;
834 Assert(!(use_values && use_indices),
ExcMessage(
"Prescribing both the index and value range for the eigenvalues is ambiguous"));
837 std::unique_ptr<ScaLAPACKMatrix<NumberType>>
eigenvectors = compute_eigenvectors ?
838 std_cxx14::make_unique<ScaLAPACKMatrix<NumberType>>(n_rows,grid,row_block_size) :
845 std::vector<NumberType> ev(n_rows);
847 if (grid->mpi_process_is_active)
853 char jobz = compute_eigenvectors ?
'V' :
'N';
856 bool all_eigenpairs=
true;
857 NumberType vl=NumberType(),vu=NumberType();
862 NumberType abstol = NumberType();
867 NumberType orfac = 0;
869 std::vector<int> ifail;
874 std::vector<int> iclustr;
878 std::vector<NumberType> gap(n_local_rows * n_local_columns);
887 all_eigenpairs =
true;
892 all_eigenpairs =
false;
893 vl = std::min(eigenvalue_limits.first,eigenvalue_limits.second);
894 vu = std::max(eigenvalue_limits.first,eigenvalue_limits.second);
900 all_eigenpairs =
false;
902 il = std::min(eigenvalue_idx.first,eigenvalue_idx.second) + 1;
903 iu = std::max(eigenvalue_idx.first,eigenvalue_idx.second) + 1;
905 NumberType *A_loc = &this->values[0];
911 NumberType *eigenvectors_loc = (compute_eigenvectors ? &
eigenvectors->values[0] :
nullptr);
917 psyev(&jobz, &uplo, &n_rows, A_loc, &submatrix_row, &submatrix_column, descriptor, &ev[0],
919 &work[0], &lwork, &info);
924 char cmach = compute_eigenvectors ?
'U' :
'S';
925 plamch( &(this->grid->blacs_context), &cmach, abstol);
927 ifail.resize(n_rows);
928 iclustr.resize(2 * grid->n_process_rows * grid->n_process_columns);
929 gap.resize(grid->n_process_rows * grid->n_process_columns);
931 psyevx(&jobz, &range, &uplo, &n_rows, A_loc, &submatrix_row, &submatrix_column, descriptor,
932 &vl, &vu, &il, &iu, &abstol, &m, &nz, &ev[0], &orfac,
934 &work[0], &lwork, &iwork[0], &liwork, &ifail[0], &iclustr[0], &gap[0], &info);
942 psyev(&jobz, &uplo, &n_rows, A_loc, &submatrix_row, &submatrix_column, descriptor, &ev[0],
944 &work[0], &lwork, &info);
952 iwork.resize(liwork);
954 psyevx(&jobz, &range, &uplo, &n_rows, A_loc, &submatrix_row, &submatrix_column, descriptor,
955 &vl, &vu, &il, &iu, &abstol, &m, &nz, &ev[0], &orfac,
957 &work[0], &lwork, &iwork[0], &liwork, &ifail[0], &iclustr[0], &gap[0], &info);
964 if (compute_eigenvectors)
968 while ((
int)ev.size() > m)
974 grid->send_to_inactive(&m, 1);
979 if (! grid->mpi_process_is_active)
984 grid->send_to_inactive(ev.data(), ev.size());
990 if (compute_eigenvectors)
1003 template <
typename NumberType>
1005 const bool compute_eigenvectors)
1011 const std::pair<unsigned int,unsigned int> idx = std::make_pair(std::min(index_limits.first,index_limits.second),
1012 std::max(index_limits.first,index_limits.second));
1015 if (idx.first==0 && idx.second==static_cast<unsigned int>(n_rows-1))
1016 return eigenpairs_symmetric_MRRR(compute_eigenvectors);
1018 return eigenpairs_symmetric_MRRR(compute_eigenvectors,idx);
1023 template <
typename NumberType>
1025 const bool compute_eigenvectors)
1032 return eigenpairs_symmetric_MRRR(compute_eigenvectors,indices,value_limits);
1037 template <
typename NumberType>
1038 std::vector<NumberType>
1040 const std::pair<unsigned int, unsigned int> &eigenvalue_idx,
1041 const std::pair<NumberType,NumberType> &eigenvalue_limits)
1044 ExcMessage(
"Matrix has to be in Matrix state before calling this function."));
1046 ExcMessage(
"Matrix has to be symmetric for this operation."));
1050 const bool use_values = (std::isnan(eigenvalue_limits.first) || std::isnan(eigenvalue_limits.second)) ?
false :
true;
1053 Assert(!(use_values && use_indices),
1054 ExcMessage(
"Prescribing both the index and value range for the eigenvalues is ambiguous"));
1057 std::unique_ptr<ScaLAPACKMatrix<NumberType>>
eigenvectors = compute_eigenvectors ?
1058 std_cxx14::make_unique<ScaLAPACKMatrix<NumberType>>(n_rows,grid,row_block_size) :
1064 std::vector<NumberType> ev(n_rows);
1070 if (grid->mpi_process_is_active)
1076 char jobz = compute_eigenvectors ?
'V' :
'N';
1079 NumberType vl=NumberType(),vu=NumberType();
1093 vl = std::min(eigenvalue_limits.first,eigenvalue_limits.second);
1094 vu = std::max(eigenvalue_limits.first,eigenvalue_limits.second);
1101 il = std::min(eigenvalue_idx.first,eigenvalue_idx.second) + 1;
1102 iu = std::max(eigenvalue_idx.first,eigenvalue_idx.second) + 1;
1104 NumberType *A_loc = &this->values[0];
1111 NumberType *eigenvectors_loc = (compute_eigenvectors ? &
eigenvectors->values[0] :
nullptr);
1115 psyevr(&jobz, &range, &uplo, &n_rows, A_loc, &submatrix_row, &submatrix_column, descriptor,
1116 &vl, &vu, &il, &iu, &m, &nz, ev.data(),
1118 work.data(), &lwork, iwork.data(), &liwork, &info);
1125 iwork.resize(liwork);
1127 psyevr(&jobz, &range, &uplo, &n_rows, A_loc, &submatrix_row, &submatrix_column, descriptor,
1128 &vl, &vu, &il, &iu, &m, &nz, ev.data(),
1130 work.data(), &lwork, iwork.data(), &liwork, &info);
1134 if (compute_eigenvectors)
1135 AssertThrow(m==nz,
ExcMessage(
"psyevr failed to compute all eigenvectors for the selected eigenvalues"));
1140 if (compute_eigenvectors)
1144 while ((
int)ev.size() > m)
1150 grid->send_to_inactive(&m, 1);
1155 if (! grid->mpi_process_is_active)
1160 grid->send_to_inactive(ev.data(), ev.size());
1166 if (compute_eigenvectors)
1179 template <
typename NumberType>
1184 ExcMessage(
"Matrix has to be in Matrix state before calling this function."));
1187 const bool left_singluar_vectors = (U !=
nullptr) ?
true :
false;
1188 const bool right_singluar_vectors = (VT !=
nullptr) ?
true :
false;
1190 if (left_singluar_vectors)
1198 if (right_singluar_vectors)
1208 std::vector<NumberType> sv(std::min(n_rows,n_columns));
1210 if (grid->mpi_process_is_active)
1212 char jobu = left_singluar_vectors ?
'V' :
'N';
1213 char jobvt = right_singluar_vectors ?
'V' :
'N';
1214 NumberType *A_loc = &this->values[0];
1215 NumberType *U_loc = left_singluar_vectors ? &(U->
values[0]) :
nullptr;
1216 NumberType *VT_loc = right_singluar_vectors ? &(VT->
values[0]) :
nullptr;
1224 pgesvd(&jobu,&jobvt,&n_rows,&n_columns,A_loc,&submatrix_row,&submatrix_column,descriptor,
1227 &work[0],&lwork,&info);
1233 pgesvd(&jobu,&jobvt,&n_rows,&n_columns,A_loc,&submatrix_row,&submatrix_column,descriptor,
1236 &work[0],&lwork,&info);
1243 grid->send_to_inactive(sv.data(), sv.size());
1246 state = LAPACKSupport::State::unusable;
1253 template <
typename NumberType>
1259 ExcMessage(
"Matrix has to be in Matrix state before calling this function."));
1261 ExcMessage(
"Matrix B has to be in Matrix state before calling this function."));
1273 Assert(row_block_size==column_block_size,
ExcMessage(
"Use identical block sizes for rows and columns of matrix A"));
1279 if (grid->mpi_process_is_active)
1282 NumberType *A_loc = & this->values[0];
1283 NumberType *B_loc = & B.
values[0];
1291 pgels(&trans,&n_rows,&n_columns,&B.
n_columns,A_loc,&submatrix_row,&submatrix_column,descriptor,
1298 pgels(&trans,&n_rows,&n_columns,&B.
n_columns,A_loc,&submatrix_row,&submatrix_column,descriptor,
1302 state = LAPACKSupport::State::unusable;
1307 template <
typename NumberType>
1311 ExcMessage(
"Matrix has to be in Matrix state before calling this function."));
1312 Assert(row_block_size==column_block_size,
1313 ExcMessage(
"Use identical block sizes for rows and columns of matrix A"));
1314 Assert(ratio>0. && ratio<1.,
1315 ExcMessage(
"input parameter ratio has to be larger than zero and smaller than 1"));
1319 std::vector<NumberType> sv = this->compute_SVD(&U,&VT);
1326 unsigned int n_sv=1;
1327 std::vector<NumberType> inv_sigma;
1328 inv_sigma.push_back(1/sv[0]);
1330 for (
unsigned int i=1; i<sv.size(); ++i)
1331 if (sv[i] > sv[0] * ratio)
1334 inv_sigma.push_back(1/sv[i]);
1343 U.
copy_to(U_R,std::make_pair(0,0),std::make_pair(0,0),std::make_pair(n_rows,n_sv));
1344 VT.
copy_to(VT_R,std::make_pair(0,0),std::make_pair(0,0),std::make_pair(n_sv,n_columns));
1348 VT_R.
mult(1,U_R,0,*
this,
true,
true);
1349 state = LAPACKSupport::State::inverse_matrix;
1355 template <
typename NumberType>
1359 ExcMessage(
"Matrix has to be in Cholesky state before calling this function."));
1361 NumberType rcond = 0.;
1363 if (grid->mpi_process_is_active)
1365 int liwork = n_local_rows;
1366 iwork.resize(liwork);
1369 const NumberType *A_loc = &this->values[0];
1374 ppocon(&uplo, &n_columns, A_loc, &submatrix_row, &submatrix_column, descriptor,
1375 &a_norm, &rcond, &work[0], &lwork, &iwork[0], &liwork, &info);
1377 lwork = std::ceil(work[0]);
1381 ppocon(&uplo, &n_columns, A_loc, &submatrix_row, &submatrix_column, descriptor,
1382 &a_norm, &rcond, &work[0], &lwork, &iwork[0], &liwork, &info);
1385 grid->send_to_inactive(&rcond);
1391 template <
typename NumberType>
1394 const char type(
'O');
1397 return norm_symmetric(type);
1399 return norm_general(type);
1404 template <
typename NumberType>
1407 const char type(
'I');
1410 return norm_symmetric(type);
1412 return norm_general(type);
1417 template <
typename NumberType>
1420 const char type(
'F');
1423 return norm_symmetric(type);
1425 return norm_general(type);
1430 template <
typename NumberType>
1435 ExcMessage(
"norms can be called in matrix state only."));
1437 NumberType res = 0.;
1439 if (grid->mpi_process_is_active)
1441 const int iarow = indxg2p_(&submatrix_row, &row_block_size, &(grid->this_process_row), &first_process_row, &(grid->n_process_rows));
1442 const int iacol = indxg2p_(&submatrix_column, &column_block_size, &(grid->this_process_column), &first_process_column, &(grid->n_process_columns));
1443 const int mp0 = numroc_(&n_rows, &row_block_size, &(grid->this_process_row), &iarow, &(grid->n_process_rows));
1444 const int nq0 = numroc_(&n_columns, &column_block_size, &(grid->this_process_column), &iacol, &(grid->n_process_columns));
1450 if (type==
'O' || type==
'1')
1456 const NumberType *A_loc = this->values.begin();
1457 res = plange(&type, &n_rows, &n_columns, A_loc, &submatrix_row, &submatrix_column, descriptor, work.data());
1459 grid->send_to_inactive(&res);
1465 template <
typename NumberType>
1470 ExcMessage(
"norms can be called in matrix state only."));
1472 ExcMessage(
"Matrix has to be symmetric for this operation."));
1474 NumberType res = 0.;
1476 if (grid->mpi_process_is_active)
1480 const int lcm = ilcm_(&(grid->n_process_rows), &(grid->n_process_columns));
1481 const int v2 = lcm/(grid->n_process_rows);
1483 const int IAROW = indxg2p_(&submatrix_row, &row_block_size, &(grid->this_process_row), &first_process_row, &(grid->n_process_rows));
1484 const int IACOL = indxg2p_(&submatrix_column, &column_block_size, &(grid->this_process_column), &first_process_column, &(grid->n_process_columns));
1485 const int Np0 = numroc_(&n_columns, &row_block_size, &(grid->this_process_row), &IAROW, &(grid->n_process_rows));
1486 const int Nq0 = numroc_(&n_columns, &column_block_size, &(grid->this_process_column), &IACOL, &(grid->n_process_columns));
1488 const int v1 = iceil_(&Np0, &row_block_size);
1489 const int ldw = (n_local_rows==n_local_columns) ?
1491 row_block_size*iceil_(&v1,&v2);
1493 const int lwork = (type ==
'M' || type ==
'F' || type ==
'E' ) ?
1497 const NumberType *A_loc = this->values.begin();
1498 res = plansy(&type, &uplo, &n_columns, A_loc, &submatrix_row, &submatrix_column, descriptor, work.data());
1500 grid->send_to_inactive(&res);
1506 #ifdef DEAL_II_WITH_HDF5 1511 void create_HDF5_state_enum_id(hid_t &state_enum_id)
1516 val = LAPACKSupport::State::cholesky;
1517 herr_t status = H5Tenum_insert (state_enum_id,
"cholesky", (
int *)&val);
1520 status = H5Tenum_insert (state_enum_id,
"eigenvalues", (
int *)&val);
1522 val = LAPACKSupport::State::inverse_matrix;
1523 status = H5Tenum_insert (state_enum_id,
"inverse_matrix", (
int *)&val);
1525 val = LAPACKSupport::State::inverse_svd;
1526 status = H5Tenum_insert (state_enum_id,
"inverse_svd", (
int *)&val);
1528 val = LAPACKSupport::State::lu;
1529 status = H5Tenum_insert (state_enum_id,
"lu", (
int *)&val);
1531 val = LAPACKSupport::State::matrix;
1532 status = H5Tenum_insert (state_enum_id,
"matrix", (
int *)&val);
1534 val = LAPACKSupport::State::svd;
1535 status = H5Tenum_insert (state_enum_id,
"svd", (
int *)&val);
1537 val = LAPACKSupport::State::unusable;
1538 status = H5Tenum_insert (state_enum_id,
"unusable", (
int *)&val);
1542 void create_HDF5_property_enum_id(hid_t &property_enum_id)
1547 herr_t status = H5Tenum_insert (property_enum_id,
"diagonal", (
int *)&prop);
1550 status = H5Tenum_insert (property_enum_id,
"general", (
int *)&prop);
1552 prop = LAPACKSupport::Property::hessenberg;
1553 status = H5Tenum_insert (property_enum_id,
"hessenberg", (
int *)&prop);
1555 prop = LAPACKSupport::Property::lower_triangular;
1556 status = H5Tenum_insert (property_enum_id,
"lower_triangular", (
int *)&prop);
1558 prop = LAPACKSupport::Property::symmetric;
1559 status = H5Tenum_insert (property_enum_id,
"symmetric", (
int *)&prop);
1561 prop = LAPACKSupport::Property::upper_triangular;
1562 status = H5Tenum_insert (property_enum_id,
"upper_triangular", (
int *)&prop);
1571 template <
typename NumberType>
1573 const std::pair<unsigned int,unsigned int> &chunk_size)
const 1575 #ifndef DEAL_II_WITH_HDF5 1581 std::pair<unsigned int,unsigned int> chunks_size_ = chunk_size;
1586 chunks_size_.first = n_rows;
1587 chunks_size_.second = 1;
1589 Assert((chunks_size_.first <= (
unsigned int)n_rows) && (chunks_size_.first>0),
ExcIndexRange(chunks_size_.first,1,n_rows+1));
1590 Assert((chunks_size_.second <= (
unsigned int)n_columns) && (chunks_size_.second>0),
ExcIndexRange(chunks_size_.second,1,n_columns+1));
1592 # ifdef H5_HAVE_PARALLEL 1594 save_parallel(filename,chunks_size_);
1598 save_serial(filename,chunks_size_);
1606 template <
typename NumberType>
1608 const std::pair<unsigned int,unsigned int> &chunk_size)
const 1610 # ifndef DEAL_II_WITH_HDF5 1623 const auto column_grid = std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,1,1);
1625 const int MB=n_rows, NB=n_columns;
1631 if (tmp.grid->mpi_process_is_active)
1636 hid_t file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
1639 hsize_t chunk_dims[2];
1641 chunk_dims[0] = chunk_size.second;
1642 chunk_dims[1] = chunk_size.first;
1643 hid_t data_property = H5Pcreate (H5P_DATASET_CREATE);
1644 status = H5Pset_chunk (data_property, 2, chunk_dims);
1650 dims[0] = n_columns;
1652 hid_t dataspace_id = H5Screate_simple(2, dims,
nullptr);
1655 hid_t type_id = hdf5_type_id(&tmp.values[0]);
1656 hid_t dataset_id = H5Dcreate2(file_id,
"/matrix",
1657 type_id, dataspace_id,
1658 H5P_DEFAULT, data_property, H5P_DEFAULT);
1661 status = H5Dwrite(dataset_id, type_id,
1662 H5S_ALL, H5S_ALL, H5P_DEFAULT,
1667 hid_t state_enum_id, property_enum_id;
1668 internal::create_HDF5_state_enum_id(state_enum_id);
1669 internal::create_HDF5_property_enum_id(property_enum_id);
1672 hsize_t dims_state[1];
1674 hid_t state_enum_dataspace = H5Screate_simple(1, dims_state,
nullptr);
1676 hid_t state_enum_dataset = H5Dcreate2(file_id,
"/state", state_enum_id, state_enum_dataspace,
1677 H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
1679 status = H5Dwrite(state_enum_dataset, state_enum_id,
1680 H5S_ALL, H5S_ALL, H5P_DEFAULT,
1685 hsize_t dims_property[1];
1687 hid_t property_enum_dataspace = H5Screate_simple(1, dims_property,
nullptr);
1689 hid_t property_enum_dataset = H5Dcreate2(file_id,
"/property", property_enum_id, property_enum_dataspace,
1690 H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
1692 status = H5Dwrite(property_enum_dataset, property_enum_id,
1693 H5S_ALL, H5S_ALL, H5P_DEFAULT,
1698 status = H5Dclose(dataset_id);
1700 status = H5Dclose(state_enum_dataset);
1702 status = H5Dclose(property_enum_dataset);
1706 status = H5Sclose(dataspace_id);
1708 status = H5Sclose(state_enum_dataspace);
1710 status = H5Sclose(property_enum_dataspace);
1714 status = H5Tclose(state_enum_id);
1716 status = H5Tclose(property_enum_id);
1720 status = H5Pclose (data_property);
1724 status = H5Fclose(file_id);
1732 template <
typename NumberType>
1734 const std::pair<unsigned int,unsigned int> &chunk_size)
const 1736 # ifndef DEAL_II_WITH_HDF5 1743 MPI_Info info = MPI_INFO_NULL;
1750 const auto column_grid = std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,1,
n_mpi_processes);
1752 const int MB=n_rows, NB=std::ceil(n_columns/n_mpi_processes);
1757 NumberType *data = (tmp.values.size()>0) ? &tmp.values[0] :
nullptr;
1764 hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
1765 status = H5Pset_fapl_mpio(plist_id, tmp.grid->mpi_communicator, info);
1769 hid_t file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist_id);
1770 status = H5Pclose(plist_id);
1777 dims[0] = tmp.n_columns;
1778 dims[1] = tmp.n_rows;
1780 hid_t filespace = H5Screate_simple(2, dims,
nullptr);
1783 hsize_t chunk_dims[2];
1787 plist_id = H5Pcreate(H5P_DATASET_CREATE);
1788 H5Pset_chunk(plist_id, 2, chunk_dims);
1789 hid_t type_id = hdf5_type_id(data);
1790 hid_t dset_id = H5Dcreate2(file_id,
"/matrix", type_id,
1791 filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
1793 status = H5Sclose(filespace);
1796 status = H5Pclose(plist_id);
1800 std::vector<int> proc_n_local_rows(n_mpi_processes), proc_n_local_columns(n_mpi_processes);
1801 int ierr = MPI_Allgather(&tmp.n_local_rows, 1, MPI_INT,
1802 proc_n_local_rows.data(), 1, MPI_INT,
1803 tmp.grid->mpi_communicator);
1805 ierr = MPI_Allgather(&tmp.n_local_columns, 1, MPI_INT,
1806 proc_n_local_columns.data(), 1, MPI_INT,
1807 tmp.grid->mpi_communicator);
1815 count[0] = tmp.n_local_columns;
1816 count[1] = tmp.n_rows;
1817 hid_t memspace = H5Screate_simple(2, count,
nullptr);
1819 hsize_t offset[2] = {0};
1820 for (
unsigned int i=0; i<my_rank; ++i)
1821 offset[0] += proc_n_local_columns[i];
1824 filespace = H5Dget_space(dset_id);
1825 status = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset,
nullptr, count,
nullptr);
1829 plist_id = H5Pcreate(H5P_DATASET_XFER);
1830 status = H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_INDEPENDENT);
1834 if (tmp.values.size()>0)
1836 status = H5Dwrite(dset_id, type_id, memspace, filespace,
1841 status = H5Dclose(dset_id);
1843 status = H5Sclose(filespace);
1845 status = H5Sclose(memspace);
1847 status = H5Pclose(plist_id);
1849 status = H5Fclose(file_id);
1854 ierr = MPI_Barrier(tmp.grid->mpi_communicator);
1858 if (tmp.grid->this_mpi_process==0)
1861 hid_t file_id_reopen = H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT);
1864 hid_t state_enum_id, property_enum_id;
1865 internal::create_HDF5_state_enum_id(state_enum_id);
1866 internal::create_HDF5_property_enum_id(property_enum_id);
1869 hsize_t dims_state[1];
1871 hid_t state_enum_dataspace = H5Screate_simple(1, dims_state,
nullptr);
1873 hid_t state_enum_dataset = H5Dcreate2(file_id_reopen,
"/state", state_enum_id, state_enum_dataspace,
1874 H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
1876 status = H5Dwrite(state_enum_dataset, state_enum_id,
1877 H5S_ALL, H5S_ALL, H5P_DEFAULT,
1882 hsize_t dims_property[1];
1884 hid_t property_enum_dataspace = H5Screate_simple(1, dims_property,
nullptr);
1886 hid_t property_enum_dataset = H5Dcreate2(file_id_reopen,
"/property", property_enum_id, property_enum_dataspace,
1887 H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
1889 status = H5Dwrite(property_enum_dataset, property_enum_id,
1890 H5S_ALL, H5S_ALL, H5P_DEFAULT,
1894 status = H5Dclose(state_enum_dataset);
1896 status = H5Dclose(property_enum_dataset);
1898 status = H5Sclose(state_enum_dataspace);
1900 status = H5Sclose(property_enum_dataspace);
1902 status = H5Tclose(state_enum_id);
1904 status = H5Tclose(property_enum_id);
1906 status = H5Fclose(file_id_reopen);
1915 template <
typename NumberType>
1918 #ifndef DEAL_II_WITH_HDF5 1922 # ifdef H5_HAVE_PARALLEL 1924 load_parallel(filename);
1928 load_serial(filename);
1935 template <
typename NumberType>
1938 # ifndef DEAL_II_WITH_HDF5 1948 const auto one_grid = std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,1,1);
1950 const int MB=n_rows, NB=n_columns;
1954 int property_int = -1;
1958 if (tmp.grid->mpi_process_is_active)
1963 hid_t file_id = H5Fopen(filename, H5F_ACC_RDONLY, H5P_DEFAULT);
1966 hid_t dataset_id = H5Dopen2(file_id,
"/matrix", H5P_DEFAULT);
1971 hid_t datatype = H5Dget_type(dataset_id);
1972 H5T_class_t t_class_in = H5Tget_class(datatype);
1973 H5T_class_t t_class = H5Tget_class(hdf5_type_id(&tmp.values[0]));
1975 ExcMessage(
"The data type of the matrix to be read does not match the archive"));
1978 hid_t dataspace_id = H5Dget_space(dataset_id);
1980 const int ndims = H5Sget_simple_extent_ndims(dataspace_id);
1984 H5Sget_simple_extent_dims(dataspace_id, dims,
nullptr);
1986 ExcMessage(
"The number of columns of the matrix does not match the content of the archive"));
1988 ExcMessage(
"The number of rows of the matrix does not match the content of the archive"));
1991 status = H5Dread(dataset_id, hdf5_type_id(&tmp.values[0]), H5S_ALL, H5S_ALL,
1992 H5P_DEFAULT, &tmp.values[0]);
1996 hid_t state_enum_id, property_enum_id;
1997 internal::create_HDF5_state_enum_id(state_enum_id);
1998 internal::create_HDF5_property_enum_id(property_enum_id);
2001 hid_t dataset_state_id = H5Dopen2(file_id,
"/state", H5P_DEFAULT);
2002 hid_t datatype_state = H5Dget_type(dataset_state_id);
2003 H5T_class_t t_class_state = H5Tget_class(datatype_state);
2006 hid_t dataset_property_id = H5Dopen2(file_id,
"/property", H5P_DEFAULT);
2007 hid_t datatype_property = H5Dget_type(dataset_property_id);
2008 H5T_class_t t_class_property = H5Tget_class(datatype_property);
2012 hid_t dataspace_state = H5Dget_space(dataset_state_id);
2013 hid_t dataspace_property = H5Dget_space(dataset_property_id);
2015 const int ndims_state = H5Sget_simple_extent_ndims(dataspace_state);
2017 const int ndims_property = H5Sget_simple_extent_ndims(dataspace_property);
2020 hsize_t dims_state[1];
2021 H5Sget_simple_extent_dims(dataspace_state, dims_state,
nullptr);
2023 hsize_t dims_property[1];
2024 H5Sget_simple_extent_dims(dataspace_property, dims_property,
nullptr);
2028 status = H5Dread(dataset_state_id, state_enum_id, H5S_ALL, H5S_ALL,
2029 H5P_DEFAULT, &tmp.state);
2034 state_int =
static_cast<int>(tmp.state);
2036 status = H5Dread(dataset_property_id, property_enum_id, H5S_ALL, H5S_ALL,
2037 H5P_DEFAULT, &tmp.property);
2042 property_int =
static_cast<int>(tmp.property);
2045 status = H5Sclose(dataspace_id);
2047 status = H5Sclose(dataspace_state);
2049 status = H5Sclose(dataspace_property);
2053 status = H5Tclose(datatype);
2055 status = H5Tclose(state_enum_id);
2057 status = H5Tclose(property_enum_id);
2061 status = H5Dclose(dataset_state_id);
2063 status = H5Dclose(dataset_id);
2065 status = H5Dclose(dataset_property_id);
2069 status = H5Fclose(file_id);
2073 tmp.grid->send_to_inactive(&state_int,1);
2075 tmp.grid->send_to_inactive(&property_int,1);
2082 # endif // DEAL_II_WITH_HDF5 2087 template <
typename NumberType>
2090 # ifndef DEAL_II_WITH_HDF5 2094 # ifndef H5_HAVE_PARALLEL 2099 MPI_Info info = MPI_INFO_NULL;
2105 const auto column_grid = std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,1,
n_mpi_processes);
2107 const int MB=n_rows, NB=std::ceil(n_columns/n_mpi_processes);
2111 NumberType *data = (tmp.values.size()>0) ? &tmp.values[0] :
nullptr;
2116 hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
2117 status = H5Pset_fapl_mpio(plist_id, tmp.grid->mpi_communicator, info);
2121 hid_t file_id = H5Fopen(filename, H5F_ACC_RDONLY, plist_id);
2122 status = H5Pclose(plist_id);
2126 hid_t dataset_id = H5Dopen2(file_id,
"/matrix", H5P_DEFAULT);
2131 hid_t datatype = hdf5_type_id(data);
2132 hid_t datatype_inp = H5Dget_type(dataset_id);
2133 H5T_class_t t_class_inp = H5Tget_class(datatype_inp);
2134 H5T_class_t t_class = H5Tget_class(datatype);
2136 ExcMessage(
"The data type of the matrix to be read does not match the archive"));
2140 hid_t dataspace_id = H5Dget_space(dataset_id);
2142 const int ndims = H5Sget_simple_extent_ndims(dataspace_id);
2146 status = H5Sget_simple_extent_dims(dataspace_id, dims,
nullptr);
2149 ExcMessage(
"The number of columns of the matrix does not match the content of the archive"));
2151 ExcMessage(
"The number of rows of the matrix does not match the content of the archive"));
2154 std::vector<int> proc_n_local_rows(n_mpi_processes), proc_n_local_columns(n_mpi_processes);
2155 int ierr = MPI_Allgather(&tmp.n_local_rows, 1, MPI_INT,
2156 proc_n_local_rows.data(), 1, MPI_INT,
2157 tmp.grid->mpi_communicator);
2159 ierr = MPI_Allgather(&tmp.n_local_columns, 1, MPI_INT,
2160 proc_n_local_columns.data(), 1, MPI_INT,
2161 tmp.grid->mpi_communicator);
2169 count[0] = tmp.n_local_columns;
2170 count[1] = tmp.n_local_rows;
2172 hsize_t offset[2] = {0};
2173 for (
unsigned int i=0; i<my_rank; ++i)
2174 offset[0] += proc_n_local_columns[i];
2177 status = H5Sselect_hyperslab(dataspace_id, H5S_SELECT_SET, offset,
nullptr, count,
nullptr);
2181 hid_t memspace = H5Screate_simple(2, count,
nullptr);
2184 status = H5Dread(dataset_id, datatype, memspace, dataspace_id, H5P_DEFAULT, data);
2188 hid_t state_enum_id, property_enum_id;
2189 internal::create_HDF5_state_enum_id(state_enum_id);
2190 internal::create_HDF5_property_enum_id(property_enum_id);
2193 hid_t dataset_state_id = H5Dopen2(file_id,
"/state", H5P_DEFAULT);
2194 hid_t datatype_state = H5Dget_type(dataset_state_id);
2195 H5T_class_t t_class_state = H5Tget_class(datatype_state);
2198 hid_t dataset_property_id = H5Dopen2(file_id,
"/property", H5P_DEFAULT);
2199 hid_t datatype_property = H5Dget_type(dataset_property_id);
2200 H5T_class_t t_class_property = H5Tget_class(datatype_property);
2204 hid_t dataspace_state = H5Dget_space(dataset_state_id);
2205 hid_t dataspace_property = H5Dget_space(dataset_property_id);
2207 const int ndims_state = H5Sget_simple_extent_ndims(dataspace_state);
2209 const int ndims_property = H5Sget_simple_extent_ndims(dataspace_property);
2212 hsize_t dims_state[1];
2213 H5Sget_simple_extent_dims(dataspace_state, dims_state,
nullptr);
2215 hsize_t dims_property[1];
2216 H5Sget_simple_extent_dims(dataspace_property, dims_property,
nullptr);
2220 status = H5Dread(dataset_state_id, state_enum_id, H5S_ALL, H5S_ALL,
2221 H5P_DEFAULT, &tmp.state);
2224 status = H5Dread(dataset_property_id, property_enum_id, H5S_ALL, H5S_ALL,
2225 H5P_DEFAULT, &tmp.property);
2229 status = H5Sclose(memspace);
2231 status = H5Dclose(dataset_id);
2233 status = H5Dclose(dataset_state_id);
2235 status = H5Dclose(dataset_property_id);
2237 status = H5Sclose(dataspace_id);
2239 status = H5Sclose(dataspace_state);
2241 status = H5Sclose(dataspace_property);
2245 status = H5Tclose(state_enum_id);
2247 status = H5Tclose(property_enum_id);
2249 status = H5Fclose(file_id);
2255 # endif // H5_HAVE_PARALLEL 2256 # endif // DEAL_II_WITH_HDF5 2266 template <
typename NumberType>
2272 for (
unsigned int i=0; i<
matrix.local_n(); ++i)
2274 const NumberType s = factors[
matrix.global_column(i)];
2276 for (
unsigned int j=0; j<
matrix.local_m(); ++j)
2277 matrix.local_el(j,i) *= s;
2281 template <
typename NumberType>
2287 for (
unsigned int i=0; i<
matrix.local_m(); ++i)
2289 const NumberType s = factors[
matrix.global_row(i)];
2291 for (
unsigned int j=0; j<
matrix.local_n(); ++j)
2292 matrix.local_el(i,j) *= s;
2301 template <
typename NumberType>
2302 template <
class InputVector>
2305 if (this->grid->mpi_process_is_active)
2311 template <
typename NumberType>
2312 template <
class InputVector>
2315 if (this->grid->mpi_process_is_active)
2322 #include "scalapack.inst" 2325 DEAL_II_NAMESPACE_CLOSE
2327 #endif // DEAL_II_WITH_SCALAPACK std::vector< NumberType > eigenpairs_symmetric_by_index_MRRR(const std::pair< unsigned int, unsigned int > &index_limits, const bool compute_eigenvectors)
void copy_transposed(const ScaLAPACKMatrix< NumberType > &B)
ScaLAPACKMatrix(const size_type n_rows, const size_type n_columns, const std::shared_ptr< const Utilities::MPI::ProcessGrid > &process_grid, const size_type row_block_size=32, const size_type column_block_size=32, const LAPACKSupport::Property property=LAPACKSupport::Property::general)
std::vector< NumberType > eigenpairs_symmetric_by_value_MRRR(const std::pair< NumberType, NumberType > &value_limits, const bool compute_eigenvectors)
void mmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
static const unsigned int invalid_unsigned_int
LAPACKSupport::State state
unsigned int global_row(const unsigned int loc_row) const
NumberType l1_norm() const
void Tmmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
Contents is actually a matrix.
ScaLAPACKMatrix< NumberType > & operator=(const FullMatrix< NumberType > &)
static ::ExceptionBase & ExcIO()
std::array< Number, 1 > eigenvalues(const SymmetricTensor< 2, 1, Number > &T)
unsigned int pseudoinverse(const NumberType ratio)
std::array< std::pair< Number, Tensor< 1, dim, Number > >, std::integral_constant< int, dim >::value > eigenvectors(const SymmetricTensor< 2, dim, Number > &T, const SymmetricTensorEigenvectorMethod method=SymmetricTensorEigenvectorMethod::ql_implicit_shifts)
void load(const char *filename)
Matrix is upper triangular.
std::vector< NumberType > eigenpairs_symmetric(const bool compute_eigenvectors, const std::pair< unsigned int, unsigned int > &index_limits=std::make_pair(numbers::invalid_unsigned_int, numbers::invalid_unsigned_int), const std::pair< NumberType, NumberType > &value_limits=std::make_pair(std::numeric_limits< NumberType >::quiet_NaN(), std::numeric_limits< NumberType >::quiet_NaN()))
Contents is the inverse of a matrix.
std::vector< NumberType > eigenpairs_symmetric_by_index(const std::pair< unsigned int, unsigned int > &index_limits, const bool compute_eigenvectors)
#define AssertIndexRange(index, range)
void scale_columns(const InputVector &factors)
std::shared_ptr< const Utilities::MPI::ProcessGrid > grid
#define AssertThrow(cond, exc)
static ::ExceptionBase & ExcIndexRange(int arg1, int arg2, int arg3)
void resize(const size_type size_in)
const int submatrix_column
void reinit(const size_type n_rows, const size_type n_columns, const std::shared_ptr< const Utilities::MPI::ProcessGrid > &process_grid, const size_type row_block_size=32, const size_type column_block_size=32, const LAPACKSupport::Property property=LAPACKSupport::Property::general)
ArrayView< typename std::remove_reference< typename std::iterator_traits< Iterator >::reference >::type > make_array_view(const Iterator begin, const Iterator end)
void TmTmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
void least_squares(ScaLAPACKMatrix< NumberType > &B, const bool transpose=false)
void mTmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
static ::ExceptionBase & ExcMessage(std::string arg1)
Contents is a Cholesky decomposition.
void scale_rows(const InputVector &factors)
NumberType norm_symmetric(const char type) const
T sum(const T &t, const MPI_Comm &mpi_communicator)
void set_property(const LAPACKSupport::Property property)
#define Assert(cond, exc)
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
static ::ExceptionBase & ExcErrorCode(char *arg1, types::blas_int arg2)
LAPACKSupport::State get_state() const
void reinit(const size_type size1, const size_type size2, const bool omit_default_initialization=false)
unsigned int global_column(const unsigned int loc_column) const
int create_group(const MPI_Comm &comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
void add(const ScaLAPACKMatrix< NumberType > &B, const NumberType a=0., const NumberType b=1., const bool transpose_B=false)
Contents is something useless.
std::vector< NumberType > eigenpairs_symmetric_by_value(const std::pair< NumberType, NumberType > &value_limits, const bool compute_eigenvectors)
NumberType norm_general(const char type) const
void save(const char *filename, const std::pair< unsigned int, unsigned int > &chunk_size=std::make_pair(numbers::invalid_unsigned_int, numbers::invalid_unsigned_int)) const
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
#define AssertThrowMPI(error_code)
std::vector< NumberType > eigenpairs_symmetric_MRRR(const bool compute_eigenvectors, const std::pair< unsigned int, unsigned int > &index_limits=std::make_pair(numbers::invalid_unsigned_int, numbers::invalid_unsigned_int), const std::pair< NumberType, NumberType > &value_limits=std::make_pair(std::numeric_limits< NumberType >::quiet_NaN(), std::numeric_limits< NumberType >::quiet_NaN()))
std::vector< NumberType > compute_SVD(ScaLAPACKMatrix< NumberType > *U=nullptr, ScaLAPACKMatrix< NumberType > *VT=nullptr)
void mult(const NumberType b, const ScaLAPACKMatrix< NumberType > &B, const NumberType c, ScaLAPACKMatrix< NumberType > &C, const bool transpose_A=false, const bool transpose_B=false) const
void copy_to(FullMatrix< NumberType > &matrix) const
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
static ::ExceptionBase & ExcNotImplemented()
LAPACKSupport::Property property
void Tadd(const NumberType b, const ScaLAPACKMatrix< NumberType > &B)
NumberType reciprocal_condition_number(const NumberType a_norm) const
NumberType linfty_norm() const
Eigenvalue vector is filled.
AlignedVector< NumberType > values
void compute_cholesky_factorization()
Matrix is lower triangular.
#define AssertIsFinite(number)
NumberType frobenius_norm() const
LAPACKSupport::Property get_property() const
static ::ExceptionBase & ExcInternalError()
void compute_lu_factorization()