Reference documentation for deal.II version 9.0.0
scalapack.cc
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2017 - 2018 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE at
12 // the top level of the deal.II distribution.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #include <deal.II/lac/scalapack.h>
18 
19 #ifdef DEAL_II_WITH_SCALAPACK
20 
21 #include <deal.II/base/std_cxx14/memory.h>
22 
23 #include <deal.II/base/mpi.h>
24 #include <deal.II/base/mpi.templates.h>
25 #include <deal.II/base/array_view.h>
26 #include <deal.II/lac/scalapack.templates.h>
27 
28 #ifdef DEAL_II_WITH_HDF5
29 #include <hdf5.h>
30 #endif
31 
32 DEAL_II_NAMESPACE_OPEN
33 
34 #ifdef DEAL_II_WITH_HDF5
35 
36 template<typename number>
37 inline hid_t hdf5_type_id (const number *)
38 {
39  Assert (false, ::ExcNotImplemented());
40  //don't know what to put here; it does not matter
41  return -1;
42 }
43 
44 inline hid_t hdf5_type_id (const double *)
45 {
46  return H5T_NATIVE_DOUBLE;
47 }
48 
49 inline hid_t hdf5_type_id (const float *)
50 {
51  return H5T_NATIVE_FLOAT;
52 }
53 
54 inline hid_t hdf5_type_id (const int *)
55 {
56  return H5T_NATIVE_INT;
57 }
58 
59 inline hid_t hdf5_type_id (const unsigned int *)
60 {
61  return H5T_NATIVE_UINT;
62 }
63 
64 inline hid_t hdf5_type_id (const char *)
65 {
66  return H5T_NATIVE_CHAR;
67 }
68 #endif // DEAL_II_WITH_HDF5
69 
70 
71 
72 template <typename NumberType>
74  const size_type n_columns_,
75  const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
76  const size_type row_block_size_,
77  const size_type column_block_size_,
78  const LAPACKSupport::Property property_)
79  :
80  uplo('L'), // for non-symmetric matrices this is not needed
81  first_process_row(0),
82  first_process_column(0),
83  submatrix_row(1),
84  submatrix_column(1)
85 {
86  reinit(n_rows_, n_columns_, process_grid, row_block_size_, column_block_size_, property_);
87 }
88 
89 
90 
91 template <typename NumberType>
93  const std::shared_ptr<const Utilities::MPI::ProcessGrid> process_grid,
94  const size_type block_size,
95  const LAPACKSupport::Property property)
96  :
97  ScaLAPACKMatrix<NumberType>(size, size, process_grid, block_size, block_size, property)
98 {}
99 
100 
101 
102 template <typename NumberType>
103 void
105  const size_type n_columns_,
106  const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
107  const size_type row_block_size_,
108  const size_type column_block_size_,
109  const LAPACKSupport::Property property_)
110 {
111  Assert (row_block_size_ > 0,
112  ExcMessage("Row block size has to be positive."));
113  Assert (column_block_size_ > 0,
114  ExcMessage("Column block size has to be positive."));
115  Assert (row_block_size_ <= n_rows_,
116  ExcMessage("Row block size can not be greater than the number of rows of the matrix"));
117  Assert (column_block_size_ <= n_columns_,
118  ExcMessage("Column block size can not be greater than the number of columns of the matrix"));
119 
120  state = LAPACKSupport::State::matrix;
121  property = property_;
122  grid = process_grid;
123  n_rows = n_rows_;
124  n_columns = n_columns_;
125  row_block_size = row_block_size_;
126  column_block_size = column_block_size_;
127 
128  if (grid->mpi_process_is_active)
129  {
130  // Get local sizes:
131  n_local_rows = numroc_(&n_rows, &row_block_size, &(grid->this_process_row), &first_process_row, &(grid->n_process_rows));
132  n_local_columns = numroc_(&n_columns, &column_block_size, &(grid->this_process_column), &first_process_column, &(grid->n_process_columns));
133 
134  // LLD_A = MAX(1,NUMROC(M_A, MB_A, MYROW, RSRC_A, NPROW)), different between processes
135  int lda = std::max(1,n_local_rows);
136 
137  int info=0;
138  descinit_(descriptor, &n_rows, &n_columns,
139  &row_block_size, &column_block_size,
140  &first_process_row, &first_process_column,
141  &(grid->blacs_context), &lda, &info);
142  AssertThrow (info==0, LAPACKSupport::ExcErrorCode("descinit_", info));
143 
144  this->TransposeTable<NumberType>::reinit(n_local_rows, n_local_columns);
145  }
146  else
147  {
148  // set process-local variables to something telling:
149  n_local_rows = -1;
150  n_local_columns = -1;
151  for (unsigned int i = 0; i < 9; ++i)
152  descriptor[i] = -1;
153  }
154 }
155 
156 
157 
158 template <typename NumberType>
159 void
161  const std::shared_ptr<const Utilities::MPI::ProcessGrid> process_grid,
162  const size_type block_size,
163  const LAPACKSupport::Property property)
164 {
165  reinit(size, size, process_grid, block_size, block_size, property);
166 }
167 
168 
169 
170 template <typename NumberType>
171 void
173 {
174  property = property_;
175 }
176 
177 
178 
179 template <typename NumberType>
182 {
183  return property;
184 }
185 
186 
187 
188 template <typename NumberType>
191 {
192  return state;
193 }
194 
195 
196 
197 template <typename NumberType>
200 {
201  // FIXME: another way to copy is to use pdgeadd_ PBLAS routine.
202  // This routine computes the sum of two matrices B:=a*A+b*B.
203  // Matrices can have different distribution,in particular matrix A can
204  // be owned by only one process, so we can set a=1 and b=0 to copy
205  // non-distributed matrix A into distributed matrix B.
206  Assert (n_rows == int(matrix.m()), ExcDimensionMismatch(n_rows, matrix.m()));
207  Assert (n_columns == int(matrix.n()), ExcDimensionMismatch(n_columns, matrix.n()));
208 
209  if (grid->mpi_process_is_active)
210  {
211  for (int i=0; i < n_local_rows; ++i)
212  {
213  const int glob_i = global_row(i);
214  for (int j = 0; j < n_local_columns; ++j)
215  {
216  const int glob_j = global_column(j);
217  local_el(i,j) = matrix(glob_i, glob_j);
218  }
219  }
220  }
221  state = LAPACKSupport::matrix;
222  return *this;
223 }
224 
225 
226 
227 template <typename NumberType>
228 unsigned int
229 ScaLAPACKMatrix<NumberType>::global_row(const unsigned int loc_row) const
230 {
231  Assert (n_local_rows >= 0 && loc_row < static_cast<unsigned int>(n_local_rows),
232  ExcIndexRange(loc_row,0,n_local_rows));
233  const int i = loc_row+1;
234  return indxl2g_ (&i, &row_block_size, &(grid->this_process_row), &first_process_row, &(grid->n_process_rows)) - 1;
235 }
236 
237 
238 
239 template <typename NumberType>
240 unsigned int
241 ScaLAPACKMatrix<NumberType>::global_column(const unsigned int loc_column) const
242 {
243  Assert (n_local_columns >= 0 && loc_column < static_cast<unsigned int>(n_local_columns),
244  ExcIndexRange(loc_column,0,n_local_columns));
245  const int j = loc_column+1;
246  return indxl2g_ (&j, &column_block_size, &(grid->this_process_column), &first_process_column, &(grid->n_process_columns)) - 1;
247 }
248 
249 
250 
251 template <typename NumberType>
252 void
254 {
255  // FIXME: use PDGEMR2D for copying?
256  // PDGEMR2D copies a submatrix of A on a submatrix of B.
257  // A and B can have different distributions
258  // see http://icl.cs.utk.edu/lapack-forum/viewtopic.php?t=50
259  Assert (n_rows == int(matrix.m()), ExcDimensionMismatch(n_rows, matrix.m()));
260  Assert (n_columns == int(matrix.n()), ExcDimensionMismatch(n_columns, matrix.n()));
261 
262  if (grid->mpi_process_is_active)
263  {
264  matrix = 0.;
265  for (int i=0; i < n_local_rows; ++i)
266  {
267  const int glob_i = global_row(i);
268  for (int j = 0; j < n_local_columns; ++j)
269  {
270  const int glob_j = global_column(j);
271  matrix(glob_i, glob_j) = local_el(i,j);
272  }
273  }
274  }
275  Utilities::MPI::sum(matrix, grid->mpi_communicator, matrix);
276 
277  // we could move the following lines under the main loop above,
278  // but they would be dependent on glob_i and glob_j, which
279  // won't make it much prettier
280  if (property == LAPACKSupport::lower_triangular)
281  for (unsigned int i = 0; i < matrix.n(); ++i)
282  for (unsigned int j = i+1; j < matrix.m(); ++j)
283  matrix(i,j) = (state == LAPACKSupport::inverse_matrix ? matrix(j,i) : 0.);
284  else if (property == LAPACKSupport::upper_triangular)
285  for (unsigned int i = 0; i < matrix.n(); ++i)
286  for (unsigned int j = 0; j < i; ++j)
287  matrix(i,j) = (state == LAPACKSupport::inverse_matrix ? matrix(j,i) : 0.);
288 }
289 
290 
291 
292 template <typename NumberType>
293 void
295  const std::pair<unsigned int,unsigned int> &offset_A,
296  const std::pair<unsigned int,unsigned int> &offset_B,
297  const std::pair<unsigned int,unsigned int> &submatrix_size) const
298 {
299  //submatrix is empty
300  if (submatrix_size.first == 0 || submatrix_size.second == 0)
301  return;
302 
303  //range checking for matrix A
304  Assert (offset_A.first<(unsigned int)(n_rows-submatrix_size.first+1),
305  ExcIndexRange(offset_A.first,0,n_rows-submatrix_size.first+1));
306  Assert (offset_A.second<(unsigned int)(n_columns-submatrix_size.second+1),
307  ExcIndexRange(offset_A.second,0,n_columns-submatrix_size.second+1));
308 
309  //range checking for matrix B
310  Assert (offset_B.first<(unsigned int)(B.n_rows-submatrix_size.first+1),
311  ExcIndexRange(offset_B.first,0,B.n_rows-submatrix_size.first+1));
312  Assert (offset_B.second<(unsigned int)(B.n_columns-submatrix_size.second+1),
313  ExcIndexRange(offset_B.second,0,B.n_columns-submatrix_size.second+1));
314 
315  //Currently, copying of matrices will only be supported if A and B share the same MPI communicator
316  int ierr, comparison;
317  ierr = MPI_Comm_compare(grid->mpi_communicator,B.grid->mpi_communicator,&comparison);
318  AssertThrowMPI(ierr);
319  Assert (comparison == MPI_IDENT,ExcMessage("Matrix A and B must have a common MPI Communicator"));
320 
321  /*
322  * The routine pgemr2d requires a BLACS context resembling at least the union of process grids
323  * described by the BLACS contexts held by the ProcessGrids of matrix A and B.
324  * As A and B share the same MPI communicator, there is no need to create a union MPI
325  * communicator to initialise the BLACS context
326  */
327  int union_blacs_context = Csys2blacs_handle(this->grid->mpi_communicator);
328  const char *order = "Col";
329  int union_n_process_rows = Utilities::MPI::n_mpi_processes(this->grid->mpi_communicator);
330  int union_n_process_columns = 1;
331  Cblacs_gridinit(&union_blacs_context, order, union_n_process_rows, union_n_process_columns);
332 
333  int n_grid_rows_A,n_grid_columns_A,my_row_A,my_column_A;
334  Cblacs_gridinfo(this->grid->blacs_context,&n_grid_rows_A,&n_grid_columns_A,&my_row_A,&my_column_A);
335 
336  //check whether process is in the BLACS context of matrix A
337  const bool in_context_A = (my_row_A>=0 && my_row_A<n_grid_rows_A) &&
338  (my_column_A>=0 && my_column_A<n_grid_columns_A);
339 
340 
341  int n_grid_rows_B,n_grid_columns_B,my_row_B,my_column_B;
342  Cblacs_gridinfo(B.grid->blacs_context,&n_grid_rows_B,&n_grid_columns_B,&my_row_B,&my_column_B);
343 
344  //check whether process is in the BLACS context of matrix B
345  const bool in_context_B = (my_row_B>=0 && my_row_B<n_grid_rows_B) &&
346  (my_column_B>=0 && my_column_B<n_grid_columns_B);
347 
348  const int n_rows_submatrix = submatrix_size.first;
349  const int n_columns_submatrix = submatrix_size.second;
350 
351  // due to Fortran indexing one has to be added
352  int ia = offset_A.first+1, ja = offset_A.second+1;
353  int ib = offset_B.first+1, jb = offset_B.second+1;
354 
355  std::array<int,9> desc_A, desc_B;
356 
357  const NumberType *loc_vals_A = nullptr;
358  NumberType *loc_vals_B = nullptr;
359 
360  // Note: the function pgemr2d has to be called for all processes in the union BLACS context
361  // If the calling process is not part of the BLACS context of A, desc_A[1] has to be -1
362  // and all other parameters do not have to be set
363  // If the calling process is not part of the BLACS context of B, desc_B[1] has to be -1
364  // and all other parameters do not have to be set
365  if (in_context_A)
366  {
367  if (this->values.size() != 0)
368  loc_vals_A = & this->values[0];
369 
370  for (unsigned int i=0; i<desc_A.size(); ++i)
371  desc_A[i] = this->descriptor[i];
372  }
373  else
374  desc_A[1] =-1;
375 
376  if (in_context_B)
377  {
378  if (B.values.size() != 0)
379  loc_vals_B = & B.values[0];
380 
381  for (unsigned int i=0; i<desc_B.size(); ++i)
382  desc_B[i] = B.descriptor[i];
383  }
384  else
385  desc_B[1]=-1;
386 
387  pgemr2d(&n_rows_submatrix, &n_columns_submatrix,
388  loc_vals_A, &ia, &ja, desc_A.data(),
389  loc_vals_B, &ib, &jb, desc_B.data(),
390  &union_blacs_context);
391 
393 
394  //releasing the union BLACS context
395  Cblacs_gridexit(union_blacs_context);
396 }
397 
398 
399 
400 template <typename NumberType>
401 void
403 {
404  Assert (n_rows == dest.n_rows, ExcDimensionMismatch(n_rows, dest.n_rows));
405  Assert (n_columns == dest.n_columns, ExcDimensionMismatch(n_columns, dest.n_columns));
406 
407  if (this->grid->mpi_process_is_active)
408  AssertThrow (this->descriptor[0]==1,ExcMessage("Copying of ScaLAPACK matrices only implemented for dense matrices"));
409  if (dest.grid->mpi_process_is_active)
410  AssertThrow (dest.descriptor[0]==1,ExcMessage("Copying of ScaLAPACK matrices only implemented for dense matrices"));
411 
412  /*
413  * just in case of different process grids or block-cyclic distributions
414  * inter-process communication is necessary
415  * if distributed matrices have the same process grid and block sizes, local copying is enough
416  */
417  if ( (this->grid != dest.grid) || (row_block_size != dest.row_block_size) || (column_block_size != dest.column_block_size) )
418  {
419  /*
420  * get the MPI communicator, which is the union of the source and destination MPI communicator
421  */
422  int ierr = 0;
423  MPI_Group group_source, group_dest, group_union;
424  ierr = MPI_Comm_group(this->grid->mpi_communicator, &group_source);
425  AssertThrowMPI(ierr);
426  ierr = MPI_Comm_group(dest.grid->mpi_communicator, &group_dest);
427  AssertThrowMPI(ierr);
428  ierr = MPI_Group_union(group_source, group_dest, &group_union);
429  AssertThrowMPI(ierr);
430  MPI_Comm mpi_communicator_union;
431 
432  // to create a communicator representing the union of the source
433  // and destination MPI communicator we need a communicator that
434  // is guaranteed to contain all desired processes -- i.e.,
435  // MPI_COMM_WORLD. on the other hand, as documented in the MPI
436  // standard, MPI_Comm_create_group is not collective on all
437  // processes in the first argument, but instead is collective on
438  // only those processes listed in the group. in other words,
439  // there is really no harm in passing MPI_COMM_WORLD as the
440  // first argument, even if the program we are currently running
441  // and that is calling this function only works on a subset of
442  // processes. the same holds for the wrapper/fallback we are using here.
443  ierr = Utilities::MPI::create_group(MPI_COMM_WORLD, group_union, 5, &mpi_communicator_union);
444  AssertThrowMPI(ierr);
445 
446  /*
447  * The routine pgemr2d requires a BLACS context resembling at least the union of process grids
448  * described by the BLACS contexts of matrix A and B
449  */
450  int union_blacs_context = Csys2blacs_handle(mpi_communicator_union);
451  const char *order = "Col";
452  int union_n_process_rows = Utilities::MPI::n_mpi_processes(mpi_communicator_union);
453  int union_n_process_columns = 1;
454  Cblacs_gridinit(&union_blacs_context, order, union_n_process_rows, union_n_process_columns);
455 
456  const NumberType *loc_vals_source = nullptr;
457  NumberType *loc_vals_dest = nullptr;
458 
459  if (this->grid->mpi_process_is_active && (this->values.size()>0))
460  {
461  AssertThrow(this->values.size()>0,::ExcMessage("source: process is active but local matrix empty"));
462  loc_vals_source = &this->values[0];
463  }
464  if (dest.grid->mpi_process_is_active && (dest.values.size()>0))
465  {
466  AssertThrow(dest.values.size()>0,::ExcMessage("destination: process is active but local matrix empty"));
467  loc_vals_dest = &dest.values[0];
468  }
469  pgemr2d(&n_rows, &n_columns, loc_vals_source, &submatrix_row, &submatrix_column, descriptor,
470  loc_vals_dest, &dest.submatrix_row, &dest.submatrix_column, dest.descriptor,
471  &union_blacs_context);
472 
473  Cblacs_gridexit(union_blacs_context);
474 
475  if (mpi_communicator_union != MPI_COMM_NULL)
476  {
477  ierr = MPI_Comm_free(&mpi_communicator_union);
478  AssertThrowMPI(ierr);
479  }
480  ierr = MPI_Group_free(&group_source);
481  AssertThrowMPI(ierr);
482  ierr = MPI_Group_free(&group_dest);
483  AssertThrowMPI(ierr);
484  ierr = MPI_Group_free(&group_union);
485  AssertThrowMPI(ierr);
486  }
487  else
488  //process is active in the process grid
489  if (this->grid->mpi_process_is_active)
490  dest.values = this->values;
491 
492  dest.state = state;
493  dest.property = property;
494 }
495 
496 
497 
498 template <typename NumberType>
500 {
501  add(B,0,1,true);
502 }
503 
504 
505 
506 template <typename NumberType>
508  const NumberType alpha,
509  const NumberType beta,
510  const bool transpose_B)
511 {
512  if (transpose_B)
513  {
514  Assert (n_rows == B.n_columns, ExcDimensionMismatch(n_rows,B.n_columns));
515  Assert (n_columns == B.n_rows, ExcDimensionMismatch(n_columns,B.n_rows));
516  Assert(column_block_size==B.row_block_size,ExcDimensionMismatch(column_block_size,B.row_block_size));
517  Assert(row_block_size==B.column_block_size,ExcDimensionMismatch(row_block_size,B.column_block_size));
518  }
519  else
520  {
521  Assert (n_rows == B.n_rows, ExcDimensionMismatch(n_rows,B.n_rows));
522  Assert (n_columns == B.n_columns, ExcDimensionMismatch(n_columns,B.n_columns));
523  Assert(column_block_size==B.column_block_size,ExcDimensionMismatch(column_block_size,B.column_block_size));
524  Assert(row_block_size==B.row_block_size,ExcDimensionMismatch(row_block_size,B.row_block_size));
525  }
526  Assert(this->grid==B.grid,ExcMessage("The matrices A and B need to have the same process grid"));
527 
528  if (this->grid->mpi_process_is_active)
529  {
530  char trans_b = transpose_B ? 'T' : 'N';
531  NumberType *A_loc = (this->values.size()>0) ? &this->values[0] : nullptr;
532  const NumberType *B_loc = (B.values.size()>0) ? &B.values[0] : nullptr;
533 
534  pgeadd(&trans_b,&n_rows,&n_columns,
535  &beta,B_loc,&B.submatrix_row,&B.submatrix_column,B.descriptor,
536  &alpha,A_loc,&submatrix_row,&submatrix_column,descriptor);
537  }
538  state = LAPACKSupport::matrix;
539 }
540 
541 
542 
543 template <typename NumberType>
544 void ScaLAPACKMatrix<NumberType>::add(const NumberType a,
546 {
547  add(B,1,a,false);
548 }
549 
550 
551 
552 template <typename NumberType>
553 void ScaLAPACKMatrix<NumberType>::Tadd(const NumberType a,
555 {
556  add(B,1,a,true);
557 }
558 
559 
560 
561 template <typename NumberType>
562 void ScaLAPACKMatrix<NumberType>::mult(const NumberType b,
564  const NumberType c,
566  const bool transpose_A,
567  const bool transpose_B) const
568 {
569  Assert(this->grid==B.grid,ExcMessage("The matrices A and B need to have the same process grid"));
570  Assert(C.grid==B.grid,ExcMessage("The matrices B and C need to have the same process grid"));
571 
572  // see for further info:
573  // https://www.ibm.com/support/knowledgecenter/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_lgemm.htm
574  if (!transpose_A && !transpose_B)
575  {
576  Assert(this->n_columns==B.n_rows,ExcDimensionMismatch(this->n_columns,B.n_rows));
577  Assert(this->n_rows==C.n_rows,ExcDimensionMismatch(this->n_rows,C.n_rows));
578  Assert(B.n_columns==C.n_columns,ExcDimensionMismatch(B.n_columns,C.n_columns));
579  Assert(this->row_block_size==C.row_block_size,ExcDimensionMismatch(this->row_block_size,C.row_block_size));
580  Assert(this->column_block_size==B.row_block_size,ExcDimensionMismatch(this->column_block_size,B.row_block_size));
581  Assert(B.column_block_size==C.column_block_size,ExcDimensionMismatch(B.column_block_size,C.column_block_size));
582  }
583  else if (transpose_A && !transpose_B)
584  {
585  Assert(this->n_rows==B.n_rows,ExcDimensionMismatch(this->n_rows,B.n_rows));
586  Assert(this->n_columns==C.n_rows,ExcDimensionMismatch(this->n_columns,C.n_rows));
587  Assert(B.n_columns==C.n_columns,ExcDimensionMismatch(B.n_columns,C.n_columns));
588  Assert(this->column_block_size==C.row_block_size,ExcDimensionMismatch(this->column_block_size,C.row_block_size));
589  Assert(this->row_block_size==B.row_block_size,ExcDimensionMismatch(this->row_block_size,B.row_block_size));
590  Assert(B.column_block_size==C.column_block_size,ExcDimensionMismatch(B.column_block_size,C.column_block_size));
591  }
592  else if (!transpose_A && transpose_B)
593  {
594  Assert(this->n_columns==B.n_columns,ExcDimensionMismatch(this->n_columns,B.n_columns));
595  Assert(this->n_rows==C.n_rows,ExcDimensionMismatch(this->n_rows,C.n_rows));
596  Assert(B.n_rows==C.n_columns,ExcDimensionMismatch(B.n_rows,C.n_columns));
597  Assert(this->row_block_size==C.row_block_size,ExcDimensionMismatch(this->row_block_size,C.row_block_size));
598  Assert(this->column_block_size==B.column_block_size,ExcDimensionMismatch(this->column_block_size,B.column_block_size));
599  Assert(B.row_block_size==C.column_block_size,ExcDimensionMismatch(B.row_block_size,C.column_block_size));
600  }
601  else // if (transpose_A && transpose_B)
602  {
603  Assert(this->n_rows==B.n_columns,ExcDimensionMismatch(this->n_rows,B.n_columns));
604  Assert(this->n_columns==C.n_rows,ExcDimensionMismatch(this->n_columns,C.n_rows));
605  Assert(B.n_rows==C.n_columns,ExcDimensionMismatch(B.n_rows,C.n_columns));
606  Assert(this->column_block_size==C.row_block_size,ExcDimensionMismatch(this->row_block_size,C.row_block_size));
607  Assert(this->row_block_size==B.column_block_size,ExcDimensionMismatch(this->column_block_size,B.row_block_size));
608  Assert(B.row_block_size==C.column_block_size,ExcDimensionMismatch(B.column_block_size,C.column_block_size));
609  }
610 
611  if (this->grid->mpi_process_is_active)
612  {
613  char trans_a = transpose_A ? 'T' : 'N';
614  char trans_b = transpose_B ? 'T' : 'N';
615 
616  const NumberType *A_loc = (this->values.size()>0) ? (&(this->values[0])) : nullptr;
617  const NumberType *B_loc = (B.values.size()>0) ? (&(B.values[0])) : nullptr;
618  NumberType *C_loc = (C.values.size()>0) ? (&(C.values[0])) : nullptr;
619  int m = C.n_rows;
620  int n = C.n_columns;
621  int k = transpose_A ? this->n_rows : this->n_columns;
622 
623  pgemm(&trans_a,&trans_b,&m,&n,&k,
624  &b,A_loc,&(this->submatrix_row),&(this->submatrix_column),this->descriptor,
626  &c,C_loc,&C.submatrix_row,&C.submatrix_column,C.descriptor);
627  }
628  C.state = LAPACKSupport::matrix;
629 }
630 
631 
632 
633 template <typename NumberType>
636  const bool adding) const
637 {
638  if (adding)
639  mult(1.,B,1.,C,false,false);
640  else
641  mult(1.,B,0,C,false,false);
642 }
643 
644 
645 
646 template <typename NumberType>
649  const bool adding) const
650 {
651  if (adding)
652  mult(1.,B,1.,C,true,false);
653  else
654  mult(1.,B,0,C,true,false);
655 }
656 
657 
658 
659 template <typename NumberType>
662  const bool adding) const
663 {
664  if (adding)
665  mult(1.,B,1.,C,false,true);
666  else
667  mult(1.,B,0,C,false,true);
668 }
669 
670 
671 
672 template <typename NumberType>
675  const bool adding) const
676 {
677  if (adding)
678  mult(1.,B,1.,C,true,true);
679  else
680  mult(1.,B,0,C,true,true);
681 }
682 
683 
684 
685 template <typename NumberType>
687 {
688  Assert (n_columns==n_rows && property == LAPACKSupport::Property::symmetric,
689  ExcMessage("Cholesky factorization can be applied to symmetric matrices only."));
690  Assert (state == LAPACKSupport::matrix,
691  ExcMessage("Matrix has to be in Matrix state before calling this function."));
692 
693  if (grid->mpi_process_is_active)
694  {
695  int info = 0;
696  NumberType *A_loc = &this->values[0];
697  //pdpotrf_(&uplo,&n_columns,A_loc,&submatrix_row,&submatrix_column,descriptor,&info);
698  ppotrf(&uplo,&n_columns,A_loc,&submatrix_row,&submatrix_column,descriptor,&info);
699  AssertThrow (info==0, LAPACKSupport::ExcErrorCode("ppotrf", info));
700  }
701  state = LAPACKSupport::cholesky;
703 }
704 
705 
706 
707 template <typename NumberType>
709 {
710  Assert (state == LAPACKSupport::matrix,
711  ExcMessage("Matrix has to be in Matrix state before calling this function."));
712 
713  if (grid->mpi_process_is_active)
714  {
715  int info = 0;
716  NumberType *A_loc = &this->values[0];
717 
718  const int iarow = indxg2p_(&submatrix_row, &row_block_size, &(grid->this_process_row), &first_process_row, &(grid->n_process_rows));
719  const int mp = numroc_(&n_rows, &row_block_size, &(grid->this_process_row), &iarow, &(grid->n_process_rows));
720  ipiv.resize(mp+row_block_size);
721 
722  pgetrf(&n_rows,&n_columns,A_loc,&submatrix_row,&submatrix_column,descriptor,ipiv.data(),&info);
723  AssertThrow (info==0, LAPACKSupport::ExcErrorCode("pgetrf", info));
724  }
725  state = LAPACKSupport::State::lu;
727 }
728 
729 
730 
731 template <typename NumberType>
733 {
734  // Check whether matrix is symmetric and save flag.
735  // If a Cholesky factorization has been applied previously,
736  // the original matrix was symmetric.
737  const bool is_symmetric = (property == LAPACKSupport::symmetric || state == LAPACKSupport::State::cholesky);
738 
739  // Matrix is neither in Cholesky nor LU state.
740  // Compute the required factorizations based on the property of the matrix.
741  if ( !(state==LAPACKSupport::State::lu || state==LAPACKSupport::State::cholesky) )
742  {
743  if (is_symmetric)
744  compute_cholesky_factorization();
745  else
746  compute_lu_factorization();
747  }
748  if (grid->mpi_process_is_active)
749  {
750  int info = 0;
751  NumberType *A_loc = &this->values[0];
752 
753  if (is_symmetric)
754  {
755  ppotri(&uplo, &n_columns, A_loc, &submatrix_row, &submatrix_column, descriptor, &info);
756  AssertThrow (info==0, LAPACKSupport::ExcErrorCode("ppotri", info));
757  }
758  else
759  {
760  int lwork=-1, liwork=-1;
761  work.resize(1);
762  iwork.resize(1);
763 
764  pgetri(&n_columns, A_loc, &submatrix_row, &submatrix_column, descriptor,
765  ipiv.data(), work.data(), &lwork, iwork.data(), &liwork, &info);
766 
767  AssertThrow (info==0, LAPACKSupport::ExcErrorCode("pgetri", info));
768  lwork=work[0];
769  liwork=iwork[0];
770  work.resize(lwork);
771  iwork.resize(liwork);
772 
773  pgetri(&n_columns, A_loc, &submatrix_row, &submatrix_column, descriptor,
774  ipiv.data(), work.data(), &lwork, iwork.data(), &liwork, &info);
775 
776  AssertThrow (info==0, LAPACKSupport::ExcErrorCode("pgetri", info));
777  }
778  }
779  state = LAPACKSupport::State::inverse_matrix;
780 }
781 
782 
783 
784 template <typename NumberType>
785 std::vector<NumberType> ScaLAPACKMatrix<NumberType>::eigenpairs_symmetric_by_index(const std::pair<unsigned int,unsigned int> &index_limits,
786  const bool compute_eigenvectors)
787 {
788  // check validity of index limits
789  Assert (index_limits.first < (unsigned int)n_rows,ExcIndexRange(index_limits.first,0,n_rows));
790  Assert (index_limits.second < (unsigned int)n_rows,ExcIndexRange(index_limits.second,0,n_rows));
791 
792  std::pair<unsigned int,unsigned int> idx = std::make_pair(std::min(index_limits.first,index_limits.second),
793  std::max(index_limits.first,index_limits.second));
794 
795  // compute all eigenvalues/eigenvectors
796  if (idx.first==0 && idx.second==(unsigned int)n_rows-1)
797  return eigenpairs_symmetric(compute_eigenvectors);
798  else
799  return eigenpairs_symmetric(compute_eigenvectors,idx);
800 }
801 
802 
803 
804 template <typename NumberType>
805 std::vector<NumberType> ScaLAPACKMatrix<NumberType>::eigenpairs_symmetric_by_value(const std::pair<NumberType,NumberType> &value_limits,
806  const bool compute_eigenvectors)
807 {
808  Assert (!std::isnan(value_limits.first),ExcMessage("value_limits.first is NaN"));
809  Assert (!std::isnan(value_limits.second),ExcMessage("value_limits.second is NaN"));
810 
811  std::pair<unsigned int,unsigned int> indices = std::make_pair(numbers::invalid_unsigned_int,numbers::invalid_unsigned_int);
812 
813  return eigenpairs_symmetric(compute_eigenvectors,indices,value_limits);
814 }
815 
816 
817 
818 template <typename NumberType>
819 std::vector<NumberType>
821  const std::pair<unsigned int, unsigned int> &eigenvalue_idx,
822  const std::pair<NumberType,NumberType> &eigenvalue_limits)
823 {
824  Assert (state == LAPACKSupport::matrix,
825  ExcMessage("Matrix has to be in Matrix state before calling this function."));
826  Assert (property == LAPACKSupport::symmetric,
827  ExcMessage("Matrix has to be symmetric for this operation."));
828 
829  Threads::Mutex::ScopedLock lock (mutex);
830 
831  const bool use_values = (std::isnan(eigenvalue_limits.first) || std::isnan(eigenvalue_limits.second)) ? false : true;
832  const bool use_indices = ((eigenvalue_idx.first==numbers::invalid_unsigned_int) || (eigenvalue_idx.second==numbers::invalid_unsigned_int)) ? false : true;
833 
834  Assert(!(use_values && use_indices),ExcMessage("Prescribing both the index and value range for the eigenvalues is ambiguous"));
835 
836  // if computation of eigenvectors is not required use a sufficiently small distributed matrix
837  std::unique_ptr<ScaLAPACKMatrix<NumberType>> eigenvectors = compute_eigenvectors ?
838  std_cxx14::make_unique<ScaLAPACKMatrix<NumberType>>(n_rows,grid,row_block_size) :
839  std_cxx14::make_unique<ScaLAPACKMatrix<NumberType>>(grid->n_process_rows,grid->n_process_columns,grid,1,1);
840 
841  eigenvectors->property = property;
842  // number of eigenvalues to be returned from psyevx; upon successful exit ev contains the m seclected eigenvalues in ascending order
843  // set to all eigenvaleus in case we will be using psyev.
844  int m = n_rows;
845  std::vector<NumberType> ev(n_rows);
846 
847  if (grid->mpi_process_is_active)
848  {
849  int info = 0;
850  /*
851  * for jobz==N only eigenvalues are computed, for jobz='V' also the eigenvectors of the matrix are computed
852  */
853  char jobz = compute_eigenvectors ? 'V' : 'N';
854  char range='A';
855  // default value is to compute all eigenvalues and optionally eigenvectors
856  bool all_eigenpairs=true;
857  NumberType vl=NumberType(),vu=NumberType();
858  int il=1,iu=1;
859  // number of eigenvectors to be returned;
860  // upon successful exit the first m=nz columns contain the selected eigenvectors (only if jobz=='V')
861  int nz=0;
862  NumberType abstol = NumberType();
863 
864  // orfac decides which eigenvectors should be reorthogonalized
865  // see http://www.netlib.org/scalapack/explore-html/df/d1a/pdsyevx_8f_source.html for explanation
866  // to keeps simple no reorthogonalized will be done by setting orfac to 0
867  NumberType orfac = 0;
868  //contains the indices of eigenvectors that failed to converge
869  std::vector<int> ifail;
870  // This array contains indices of eigenvectors corresponding to
871  // a cluster of eigenvalues that could not be reorthogonalized
872  // due to insufficient workspace
873  // see http://www.netlib.org/scalapack/explore-html/df/d1a/pdsyevx_8f_source.html for explanation
874  std::vector<int> iclustr;
875  // This array contains the gap between eigenvalues whose
876  // eigenvectors could not be reorthogonalized.
877  // see http://www.netlib.org/scalapack/explore-html/df/d1a/pdsyevx_8f_source.html for explanation
878  std::vector<NumberType> gap(n_local_rows * n_local_columns);
879 
880  // index range for eigenvalues is not specified
881  if (!use_indices)
882  {
883  // interval for eigenvalues is not specified and consequently all eigenvalues/eigenpairs will be computed
884  if (!use_values)
885  {
886  range = 'A';
887  all_eigenpairs = true;
888  }
889  else
890  {
891  range = 'V';
892  all_eigenpairs = false;
893  vl = std::min(eigenvalue_limits.first,eigenvalue_limits.second);
894  vu = std::max(eigenvalue_limits.first,eigenvalue_limits.second);
895  }
896  }
897  else
898  {
899  range = 'I';
900  all_eigenpairs = false;
901  //as Fortran starts counting/indexing from 1 unlike C/C++, where it starts from 0
902  il = std::min(eigenvalue_idx.first,eigenvalue_idx.second) + 1;
903  iu = std::max(eigenvalue_idx.first,eigenvalue_idx.second) + 1;
904  }
905  NumberType *A_loc = &this->values[0];
906  /*
907  * by setting lwork to -1 a workspace query for optimal length of work is performed
908  */
909  int lwork=-1;
910  int liwork=-1;
911  NumberType *eigenvectors_loc = (compute_eigenvectors ? &eigenvectors->values[0] : nullptr);
912  work.resize(1);
913  iwork.resize (1);
914 
915  if (all_eigenpairs)
916  {
917  psyev(&jobz, &uplo, &n_rows, A_loc, &submatrix_row, &submatrix_column, descriptor, &ev[0],
918  eigenvectors_loc, &eigenvectors->submatrix_row, &eigenvectors->submatrix_column, eigenvectors->descriptor,
919  &work[0], &lwork, &info);
920  AssertThrow (info==0, LAPACKSupport::ExcErrorCode("psyev", info));
921  }
922  else
923  {
924  char cmach = compute_eigenvectors ? 'U' : 'S';
925  plamch( &(this->grid->blacs_context), &cmach, abstol);
926  abstol *= 2;
927  ifail.resize(n_rows);
928  iclustr.resize(2 * grid->n_process_rows * grid->n_process_columns);
929  gap.resize(grid->n_process_rows * grid->n_process_columns);
930 
931  psyevx(&jobz, &range, &uplo, &n_rows, A_loc, &submatrix_row, &submatrix_column, descriptor,
932  &vl, &vu, &il, &iu, &abstol, &m, &nz, &ev[0], &orfac,
933  eigenvectors_loc, &eigenvectors->submatrix_row, &eigenvectors->submatrix_column, eigenvectors->descriptor,
934  &work[0], &lwork, &iwork[0], &liwork, &ifail[0], &iclustr[0], &gap[0], &info);
935  AssertThrow (info==0, LAPACKSupport::ExcErrorCode("psyevx", info));
936  }
937  lwork=work[0];
938  work.resize (lwork);
939 
940  if (all_eigenpairs)
941  {
942  psyev(&jobz, &uplo, &n_rows, A_loc, &submatrix_row, &submatrix_column, descriptor, &ev[0],
943  eigenvectors_loc, &eigenvectors->submatrix_row, &eigenvectors->submatrix_column, eigenvectors->descriptor,
944  &work[0], &lwork, &info);
945 
946  AssertThrow (info==0, LAPACKSupport::ExcErrorCode("psyev", info));
947  }
948  else
949  {
950  liwork = iwork[0];
951  AssertThrow(liwork>0,ExcInternalError());
952  iwork.resize(liwork);
953 
954  psyevx(&jobz, &range, &uplo, &n_rows, A_loc, &submatrix_row, &submatrix_column, descriptor,
955  &vl, &vu, &il, &iu, &abstol, &m, &nz, &ev[0], &orfac,
956  eigenvectors_loc, &eigenvectors->submatrix_row, &eigenvectors->submatrix_column, eigenvectors->descriptor,
957  &work[0], &lwork, &iwork[0], &liwork, &ifail[0], &iclustr[0], &gap[0], &info);
958 
959  AssertThrow (info==0, LAPACKSupport::ExcErrorCode("psyevx", info));
960  }
961  // if eigenvectors are queried copy eigenvectors to original matrix
962  // as the temporary matrix eigenvectors has identical dimensions and
963  // block-cyclic distribution we simply swap the local array
964  if (compute_eigenvectors)
965  this->values.swap(eigenvectors->values);
966 
967  //adapt the size of ev to fit m upon return
968  while ((int)ev.size() > m)
969  ev.pop_back();
970  }
971  /*
972  * send number of computed eigenvalues to inactive processes
973  */
974  grid->send_to_inactive(&m, 1);
975 
976  /*
977  * inactive processes have to resize array of eigenvalues
978  */
979  if (! grid->mpi_process_is_active)
980  ev.resize (m);
981  /*
982  * send the eigenvalues to processors not being part of the process grid
983  */
984  grid->send_to_inactive(ev.data(), ev.size());
985 
986  /*
987  * if only eigenvalues are queried the content of the matrix will be destroyed
988  * if the eigenpairs are queried matrix A on exit stores the eigenvectors in the columns
989  */
990  if (compute_eigenvectors)
991  {
994  }
995  else
996  state = LAPACKSupport::unusable;
997 
998  return ev;
999 }
1000 
1001 
1002 
1003 template <typename NumberType>
1004 std::vector<NumberType> ScaLAPACKMatrix<NumberType>::eigenpairs_symmetric_by_index_MRRR(const std::pair<unsigned int,unsigned int> &index_limits,
1005  const bool compute_eigenvectors)
1006 {
1007  // Check validity of index limits.
1008  AssertIndexRange(index_limits.first,static_cast<unsigned int>(n_rows));
1009  AssertIndexRange(index_limits.second,static_cast<unsigned int>(n_rows));
1010 
1011  const std::pair<unsigned int,unsigned int> idx = std::make_pair(std::min(index_limits.first,index_limits.second),
1012  std::max(index_limits.first,index_limits.second));
1013 
1014  // Compute all eigenvalues/eigenvectors.
1015  if (idx.first==0 && idx.second==static_cast<unsigned int>(n_rows-1))
1016  return eigenpairs_symmetric_MRRR(compute_eigenvectors);
1017  else
1018  return eigenpairs_symmetric_MRRR(compute_eigenvectors,idx);
1019 }
1020 
1021 
1022 
1023 template <typename NumberType>
1024 std::vector<NumberType> ScaLAPACKMatrix<NumberType>::eigenpairs_symmetric_by_value_MRRR(const std::pair<NumberType,NumberType> &value_limits,
1025  const bool compute_eigenvectors)
1026 {
1027  AssertIsFinite(value_limits.first);
1028  AssertIsFinite(value_limits.second);
1029 
1030  const std::pair<unsigned int,unsigned int> indices = std::make_pair(numbers::invalid_unsigned_int,numbers::invalid_unsigned_int);
1031 
1032  return eigenpairs_symmetric_MRRR(compute_eigenvectors,indices,value_limits);
1033 }
1034 
1035 
1036 
1037 template <typename NumberType>
1038 std::vector<NumberType>
1040  const std::pair<unsigned int, unsigned int> &eigenvalue_idx,
1041  const std::pair<NumberType,NumberType> &eigenvalue_limits)
1042 {
1043  Assert (state == LAPACKSupport::matrix,
1044  ExcMessage("Matrix has to be in Matrix state before calling this function."));
1045  Assert (property == LAPACKSupport::symmetric,
1046  ExcMessage("Matrix has to be symmetric for this operation."));
1047 
1048  Threads::Mutex::ScopedLock lock(mutex);
1049 
1050  const bool use_values = (std::isnan(eigenvalue_limits.first) || std::isnan(eigenvalue_limits.second)) ? false : true;
1051  const bool use_indices = ((eigenvalue_idx.first==numbers::invalid_unsigned_int) || (eigenvalue_idx.second==numbers::invalid_unsigned_int)) ? false : true;
1052 
1053  Assert(!(use_values && use_indices),
1054  ExcMessage("Prescribing both the index and value range for the eigenvalues is ambiguous"));
1055 
1056  // If computation of eigenvectors is not required, use a sufficiently small distributed matrix.
1057  std::unique_ptr<ScaLAPACKMatrix<NumberType>> eigenvectors = compute_eigenvectors ?
1058  std_cxx14::make_unique<ScaLAPACKMatrix<NumberType>>(n_rows,grid,row_block_size) :
1059  std_cxx14::make_unique<ScaLAPACKMatrix<NumberType>>(grid->n_process_rows,grid->n_process_columns,grid,1,1);
1060 
1061  eigenvectors->property = property;
1062  // Number of eigenvalues to be returned from psyevr; upon successful exit ev contains the m seclected eigenvalues in ascending order.
1063  int m = n_rows;
1064  std::vector<NumberType> ev(n_rows);
1065 
1066  // Number of eigenvectors to be returned;
1067  // Upon successful exit the first m=nz columns contain the selected eigenvectors (only if jobz=='V').
1068  int nz=0;
1069 
1070  if (grid->mpi_process_is_active)
1071  {
1072  int info = 0;
1073  /*
1074  * For jobz==N only eigenvalues are computed, for jobz='V' also the eigenvectors of the matrix are computed.
1075  */
1076  char jobz = compute_eigenvectors ? 'V' : 'N';
1077  // Default value is to compute all eigenvalues and optionally eigenvectors.
1078  char range='A';
1079  NumberType vl=NumberType(),vu=NumberType();
1080  int il=1,iu=1;
1081 
1082  // Index range for eigenvalues is not specified.
1083  if (!use_indices)
1084  {
1085  // Interval for eigenvalues is not specified and consequently all eigenvalues/eigenpairs will be computed.
1086  if (!use_values)
1087  {
1088  range = 'A';
1089  }
1090  else
1091  {
1092  range = 'V';
1093  vl = std::min(eigenvalue_limits.first,eigenvalue_limits.second);
1094  vu = std::max(eigenvalue_limits.first,eigenvalue_limits.second);
1095  }
1096  }
1097  else
1098  {
1099  range = 'I';
1100  // As Fortran starts counting/indexing from 1 unlike C/C++, where it starts from 0.
1101  il = std::min(eigenvalue_idx.first,eigenvalue_idx.second) + 1;
1102  iu = std::max(eigenvalue_idx.first,eigenvalue_idx.second) + 1;
1103  }
1104  NumberType *A_loc = &this->values[0];
1105 
1106  /*
1107  * By setting lwork to -1 a workspace query for optimal length of work is performed.
1108  */
1109  int lwork=-1;
1110  int liwork=-1;
1111  NumberType *eigenvectors_loc = (compute_eigenvectors ? &eigenvectors->values[0] : nullptr);
1112  work.resize(1);
1113  iwork.resize (1);
1114 
1115  psyevr(&jobz, &range, &uplo, &n_rows, A_loc, &submatrix_row, &submatrix_column, descriptor,
1116  &vl, &vu, &il, &iu, &m, &nz, ev.data(),
1117  eigenvectors_loc, &eigenvectors->submatrix_row, &eigenvectors->submatrix_column, eigenvectors->descriptor,
1118  work.data(), &lwork, iwork.data(), &liwork, &info);
1119 
1120  AssertThrow (info==0, LAPACKSupport::ExcErrorCode("psyevr", info));
1121 
1122  lwork=work[0];
1123  work.resize(lwork);
1124  liwork = iwork[0];
1125  iwork.resize(liwork);
1126 
1127  psyevr(&jobz, &range, &uplo, &n_rows, A_loc, &submatrix_row, &submatrix_column, descriptor,
1128  &vl, &vu, &il, &iu, &m, &nz, ev.data(),
1129  eigenvectors_loc, &eigenvectors->submatrix_row, &eigenvectors->submatrix_column, eigenvectors->descriptor,
1130  work.data(), &lwork, iwork.data(), &liwork, &info);
1131 
1132  AssertThrow (info==0, LAPACKSupport::ExcErrorCode("psyevr", info));
1133 
1134  if (compute_eigenvectors)
1135  AssertThrow(m==nz,ExcMessage("psyevr failed to compute all eigenvectors for the selected eigenvalues"));
1136 
1137  // If eigenvectors are queried, copy eigenvectors to original matrix.
1138  // As the temporary matrix eigenvectors has identical dimensions and
1139  // block-cyclic distribution we simply swap the local array.
1140  if (compute_eigenvectors)
1141  this->values.swap(eigenvectors->values);
1142 
1143  // Adapt the size of ev to fit m upon return.
1144  while ((int)ev.size() > m)
1145  ev.pop_back();
1146  }
1147  /*
1148  * Send number of computed eigenvalues to inactive processes.
1149  */
1150  grid->send_to_inactive(&m, 1);
1151 
1152  /*
1153  * Inactive processes have to resize array of eigenvalues.
1154  */
1155  if (! grid->mpi_process_is_active)
1156  ev.resize(m);
1157  /*
1158  * Send the eigenvalues to processors not being part of the process grid.
1159  */
1160  grid->send_to_inactive(ev.data(), ev.size());
1161 
1162  /*
1163  * If only eigenvalues are queried, the content of the matrix will be destroyed.
1164  * If the eigenpairs are queried, matrix A on exit stores the eigenvectors in the columns.
1165  */
1166  if (compute_eigenvectors)
1167  {
1170  }
1171  else
1172  state = LAPACKSupport::unusable;
1173 
1174  return ev;
1175 }
1176 
1177 
1178 
1179 template <typename NumberType>
1182 {
1183  Assert (state == LAPACKSupport::matrix,
1184  ExcMessage("Matrix has to be in Matrix state before calling this function."));
1185  Assert(row_block_size==column_block_size,ExcDimensionMismatch(row_block_size,column_block_size));
1186 
1187  const bool left_singluar_vectors = (U != nullptr) ? true : false;
1188  const bool right_singluar_vectors = (VT != nullptr) ? true : false;
1189 
1190  if (left_singluar_vectors)
1191  {
1192  Assert(n_rows==U->n_rows,ExcDimensionMismatch(n_rows,U->n_rows));
1194  Assert(row_block_size==U->row_block_size,ExcDimensionMismatch(row_block_size,U->row_block_size));
1195  Assert(column_block_size==U->column_block_size,ExcDimensionMismatch(column_block_size,U->column_block_size));
1196  Assert(grid->blacs_context==U->grid->blacs_context,ExcDimensionMismatch(grid->blacs_context,U->grid->blacs_context));
1197  }
1198  if (right_singluar_vectors)
1199  {
1200  Assert(n_columns==VT->n_rows,ExcDimensionMismatch(n_columns,VT->n_rows));
1202  Assert(row_block_size==VT->row_block_size,ExcDimensionMismatch(row_block_size,VT->row_block_size));
1203  Assert(column_block_size==VT->column_block_size,ExcDimensionMismatch(column_block_size,VT->column_block_size));
1204  Assert(grid->blacs_context==VT->grid->blacs_context,ExcDimensionMismatch(grid->blacs_context,VT->grid->blacs_context));
1205  }
1206  Threads::Mutex::ScopedLock lock (mutex);
1207 
1208  std::vector<NumberType> sv(std::min(n_rows,n_columns));
1209 
1210  if (grid->mpi_process_is_active)
1211  {
1212  char jobu = left_singluar_vectors ? 'V' : 'N';
1213  char jobvt = right_singluar_vectors ? 'V' : 'N';
1214  NumberType *A_loc = &this->values[0];
1215  NumberType *U_loc = left_singluar_vectors ? &(U->values[0]) : nullptr;
1216  NumberType *VT_loc = right_singluar_vectors ? &(VT->values[0]) : nullptr;
1217  int info = 0;
1218  /*
1219  * by setting lwork to -1 a workspace query for optimal length of work is performed
1220  */
1221  int lwork=-1;
1222  work.resize(1);
1223 
1224  pgesvd(&jobu,&jobvt,&n_rows,&n_columns,A_loc,&submatrix_row,&submatrix_column,descriptor,
1225  & *sv.begin(),U_loc,&U->submatrix_row,&U->submatrix_column,U->descriptor,
1226  VT_loc,&VT->submatrix_row,&VT->submatrix_column,VT->descriptor,
1227  &work[0],&lwork,&info);
1228  AssertThrow (info==0, LAPACKSupport::ExcErrorCode("pgesvd", info));
1229 
1230  lwork=work[0];
1231  work.resize(lwork);
1232 
1233  pgesvd(&jobu,&jobvt,&n_rows,&n_columns,A_loc,&submatrix_row,&submatrix_column,descriptor,
1234  & *sv.begin(),U_loc,&U->submatrix_row,&U->submatrix_column,U->descriptor,
1235  VT_loc,&VT->submatrix_row,&VT->submatrix_column,VT->descriptor,
1236  &work[0],&lwork,&info);
1237  AssertThrow (info==0, LAPACKSupport::ExcErrorCode("pgesvd", info));
1238  }
1239 
1240  /*
1241  * send the singular values to processors not being part of the process grid
1242  */
1243  grid->send_to_inactive(sv.data(), sv.size());
1244 
1246  state = LAPACKSupport::State::unusable;
1247 
1248  return sv;
1249 }
1250 
1251 
1252 
1253 template <typename NumberType>
1255  const bool transpose)
1256 {
1257  Assert(grid==B.grid,ExcMessage("The matrices A and B need to have the same process grid"));
1258  Assert (state == LAPACKSupport::matrix,
1259  ExcMessage("Matrix has to be in Matrix state before calling this function."));
1261  ExcMessage("Matrix B has to be in Matrix state before calling this function."));
1262 
1263  if (transpose)
1264  {
1265  Assert(n_columns==B.n_rows,ExcDimensionMismatch(n_columns,B.n_rows));
1266  }
1267  else
1268  {
1269  Assert(n_rows==B.n_rows,ExcDimensionMismatch(n_rows,B.n_rows));
1270  }
1271 
1272  //see https://www.ibm.com/support/knowledgecenter/en/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_lgels.htm
1273  Assert(row_block_size==column_block_size,ExcMessage("Use identical block sizes for rows and columns of matrix A"));
1274  Assert(B.row_block_size==B.column_block_size,ExcMessage("Use identical block sizes for rows and columns of matrix B"));
1275  Assert(row_block_size==B.row_block_size,ExcMessage("Use identical block-cyclic distribution for matrices A and B"));
1276 
1277  Threads::Mutex::ScopedLock lock (mutex);
1278 
1279  if (grid->mpi_process_is_active)
1280  {
1281  char trans = transpose ? 'T' : 'N';
1282  NumberType *A_loc = & this->values[0];
1283  NumberType *B_loc = & B.values[0];
1284  int info = 0;
1285  /*
1286  * by setting lwork to -1 a workspace query for optimal length of work is performed
1287  */
1288  int lwork=-1;
1289  work.resize(1);
1290 
1291  pgels(&trans,&n_rows,&n_columns,&B.n_columns,A_loc,&submatrix_row,&submatrix_column,descriptor,
1292  B_loc,&B.submatrix_row,&B.submatrix_column,B.descriptor,&work[0],&lwork,&info);
1293  AssertThrow (info==0, LAPACKSupport::ExcErrorCode("pgels", info));
1294 
1295  lwork=work[0];
1296  work.resize(lwork);
1297 
1298  pgels(&trans,&n_rows,&n_columns,&B.n_columns,A_loc,&submatrix_row,&submatrix_column,descriptor,
1299  B_loc,&B.submatrix_row,&B.submatrix_column,B.descriptor,&work[0],&lwork,&info);
1300  AssertThrow (info==0, LAPACKSupport::ExcErrorCode("pgels", info));
1301  }
1302  state = LAPACKSupport::State::unusable;
1303 }
1304 
1305 
1306 
1307 template <typename NumberType>
1308 unsigned int ScaLAPACKMatrix<NumberType>::pseudoinverse(const NumberType ratio)
1309 {
1310  Assert(state == LAPACKSupport::matrix,
1311  ExcMessage("Matrix has to be in Matrix state before calling this function."));
1312  Assert(row_block_size==column_block_size,
1313  ExcMessage("Use identical block sizes for rows and columns of matrix A"));
1314  Assert(ratio>0. && ratio<1.,
1315  ExcMessage("input parameter ratio has to be larger than zero and smaller than 1"));
1316 
1317  ScaLAPACKMatrix<NumberType> U(n_rows,n_rows,grid,row_block_size,row_block_size,LAPACKSupport::Property::general);
1318  ScaLAPACKMatrix<NumberType> VT(n_columns,n_columns,grid,row_block_size,row_block_size,LAPACKSupport::Property::general);
1319  std::vector<NumberType> sv = this->compute_SVD(&U,&VT);
1320  AssertThrow(sv[0]>std::numeric_limits<NumberType>::min(),ExcMessage("Matrix has rank 0"));
1321 
1322  // Get number of singular values fulfilling the following: sv[i] > sv[0] * ratio
1323  // Obviously, 0-th element already satisfies sv[0] > sv[0] * ratio
1324  // The singular values in sv are ordered by descending value so we break out of the loop
1325  // if a singular value is smaller than sv[0] * ratio.
1326  unsigned int n_sv=1;
1327  std::vector<NumberType> inv_sigma;
1328  inv_sigma.push_back(1/sv[0]);
1329 
1330  for (unsigned int i=1; i<sv.size(); ++i)
1331  if (sv[i] > sv[0] * ratio)
1332  {
1333  ++n_sv;
1334  inv_sigma.push_back(1/sv[i]);
1335  }
1336  else break;
1337 
1338  // For the matrix multiplication we use only the columns of U and rows of VT which are associated
1339  // with singular values larger than the limit.
1340  // That saves computational time for matrices with rank significantly smaller than min(n_rows,n_columns)
1341  ScaLAPACKMatrix<NumberType> U_R(n_rows,n_sv,grid,row_block_size,row_block_size,LAPACKSupport::Property::general);
1342  ScaLAPACKMatrix<NumberType> VT_R(n_sv,n_columns,grid,row_block_size,row_block_size,LAPACKSupport::Property::general);
1343  U.copy_to(U_R,std::make_pair(0,0),std::make_pair(0,0),std::make_pair(n_rows,n_sv));
1344  VT.copy_to(VT_R,std::make_pair(0,0),std::make_pair(0,0),std::make_pair(n_sv,n_columns));
1345 
1346  VT_R.scale_rows(inv_sigma);
1347  this->reinit(n_columns,n_rows,this->grid,row_block_size,column_block_size,LAPACKSupport::Property::general);
1348  VT_R.mult(1,U_R,0,*this,true,true);
1349  state = LAPACKSupport::State::inverse_matrix;
1350  return n_sv;
1351 }
1352 
1353 
1354 
1355 template <typename NumberType>
1356 NumberType ScaLAPACKMatrix<NumberType>::reciprocal_condition_number(const NumberType a_norm) const
1357 {
1358  Assert (state == LAPACKSupport::cholesky,
1359  ExcMessage("Matrix has to be in Cholesky state before calling this function."));
1360  Threads::Mutex::ScopedLock lock (mutex);
1361  NumberType rcond = 0.;
1362 
1363  if (grid->mpi_process_is_active)
1364  {
1365  int liwork = n_local_rows;
1366  iwork.resize(liwork);
1367 
1368  int info = 0;
1369  const NumberType *A_loc = &this->values[0];
1370 
1371  // by setting lwork to -1 a workspace query for optimal length of work is performed
1372  int lwork = -1;
1373  work.resize(1);
1374  ppocon(&uplo, &n_columns, A_loc, &submatrix_row, &submatrix_column, descriptor,
1375  &a_norm, &rcond, &work[0], &lwork, &iwork[0], &liwork, &info);
1376  AssertThrow (info==0, LAPACKSupport::ExcErrorCode("pdpocon", info));
1377  lwork = std::ceil(work[0]);
1378  work.resize(lwork);
1379 
1380  // now the actual run:
1381  ppocon(&uplo, &n_columns, A_loc, &submatrix_row, &submatrix_column, descriptor,
1382  &a_norm, &rcond, &work[0], &lwork, &iwork[0], &liwork, &info);
1383  AssertThrow (info==0, LAPACKSupport::ExcErrorCode("pdpocon", info));
1384  }
1385  grid->send_to_inactive(&rcond);
1386  return rcond;
1387 }
1388 
1389 
1390 
1391 template <typename NumberType>
1393 {
1394  const char type('O');
1395 
1396  if (property == LAPACKSupport::symmetric)
1397  return norm_symmetric(type);
1398  else
1399  return norm_general(type);
1400 }
1401 
1402 
1403 
1404 template <typename NumberType>
1406 {
1407  const char type('I');
1408 
1409  if (property == LAPACKSupport::symmetric)
1410  return norm_symmetric(type);
1411  else
1412  return norm_general(type);
1413 }
1414 
1415 
1416 
1417 template <typename NumberType>
1419 {
1420  const char type('F');
1421 
1422  if (property == LAPACKSupport::symmetric)
1423  return norm_symmetric(type);
1424  else
1425  return norm_general(type);
1426 }
1427 
1428 
1429 
1430 template <typename NumberType>
1431 NumberType ScaLAPACKMatrix<NumberType>::norm_general(const char type) const
1432 {
1433  Assert (state == LAPACKSupport::matrix ||
1435  ExcMessage("norms can be called in matrix state only."));
1436  Threads::Mutex::ScopedLock lock (mutex);
1437  NumberType res = 0.;
1438 
1439  if (grid->mpi_process_is_active)
1440  {
1441  const int iarow = indxg2p_(&submatrix_row, &row_block_size, &(grid->this_process_row), &first_process_row, &(grid->n_process_rows));
1442  const int iacol = indxg2p_(&submatrix_column, &column_block_size, &(grid->this_process_column), &first_process_column, &(grid->n_process_columns));
1443  const int mp0 = numroc_(&n_rows, &row_block_size, &(grid->this_process_row), &iarow, &(grid->n_process_rows));
1444  const int nq0 = numroc_(&n_columns, &column_block_size, &(grid->this_process_column), &iacol, &(grid->n_process_columns));
1445 
1446  // type='M': compute largest absolute value
1447  // type='F' || type='E': compute Frobenius norm
1448  // type='0' || type='1': compute infinity norm
1449  int lwork=0; // for type == 'M' || type == 'F' || type == 'E'
1450  if (type=='O' || type=='1')
1451  lwork = nq0;
1452  else if (type=='I')
1453  lwork = mp0;
1454 
1455  work.resize(lwork);
1456  const NumberType *A_loc = this->values.begin();
1457  res = plange(&type, &n_rows, &n_columns, A_loc, &submatrix_row, &submatrix_column, descriptor, work.data());
1458  }
1459  grid->send_to_inactive(&res);
1460  return res;
1461 }
1462 
1463 
1464 
1465 template <typename NumberType>
1466 NumberType ScaLAPACKMatrix<NumberType>::norm_symmetric(const char type) const
1467 {
1468  Assert (state == LAPACKSupport::matrix ||
1470  ExcMessage("norms can be called in matrix state only."));
1471  Assert (property == LAPACKSupport::symmetric,
1472  ExcMessage("Matrix has to be symmetric for this operation."));
1473  Threads::Mutex::ScopedLock lock (mutex);
1474  NumberType res = 0.;
1475 
1476  if (grid->mpi_process_is_active)
1477  {
1478  //int IROFFA = MOD( IA-1, MB_A )
1479  //int ICOFFA = MOD( JA-1, NB_A )
1480  const int lcm = ilcm_(&(grid->n_process_rows), &(grid->n_process_columns));
1481  const int v2 = lcm/(grid->n_process_rows);
1482 
1483  const int IAROW = indxg2p_(&submatrix_row, &row_block_size, &(grid->this_process_row), &first_process_row, &(grid->n_process_rows));
1484  const int IACOL = indxg2p_(&submatrix_column, &column_block_size, &(grid->this_process_column), &first_process_column, &(grid->n_process_columns));
1485  const int Np0 = numroc_(&n_columns/*+IROFFA*/, &row_block_size, &(grid->this_process_row), &IAROW, &(grid->n_process_rows));
1486  const int Nq0 = numroc_(&n_columns/*+ICOFFA*/, &column_block_size, &(grid->this_process_column), &IACOL, &(grid->n_process_columns));
1487 
1488  const int v1 = iceil_(&Np0, &row_block_size);
1489  const int ldw = (n_local_rows==n_local_columns) ?
1490  0 :
1491  row_block_size*iceil_(&v1,&v2);
1492 
1493  const int lwork = (type == 'M' || type == 'F' || type == 'E' ) ?
1494  0 :
1495  2*Nq0+Np0+ldw;
1496  work.resize(lwork);
1497  const NumberType *A_loc = this->values.begin();
1498  res = plansy(&type, &uplo, &n_columns, A_loc, &submatrix_row, &submatrix_column, descriptor, work.data());
1499  }
1500  grid->send_to_inactive(&res);
1501  return res;
1502 }
1503 
1504 
1505 
1506 #ifdef DEAL_II_WITH_HDF5
1507 namespace internal
1508 {
1509  namespace
1510  {
1511  void create_HDF5_state_enum_id(hid_t &state_enum_id)
1512  {
1513  // create HDF5 enum type for LAPACKSupport::State
1515  state_enum_id = H5Tcreate (H5T_ENUM, sizeof(LAPACKSupport::State));
1516  val = LAPACKSupport::State::cholesky;
1517  herr_t status = H5Tenum_insert (state_enum_id, "cholesky", (int *)&val);
1518  AssertThrow(status >= 0, ExcInternalError());
1520  status = H5Tenum_insert (state_enum_id, "eigenvalues", (int *)&val);
1521  AssertThrow(status >= 0, ExcInternalError());
1522  val = LAPACKSupport::State::inverse_matrix;
1523  status = H5Tenum_insert (state_enum_id, "inverse_matrix", (int *)&val);
1524  AssertThrow(status >= 0, ExcInternalError());
1525  val = LAPACKSupport::State::inverse_svd;
1526  status = H5Tenum_insert (state_enum_id, "inverse_svd", (int *)&val);
1527  AssertThrow(status >= 0, ExcInternalError());
1528  val = LAPACKSupport::State::lu;
1529  status = H5Tenum_insert (state_enum_id, "lu", (int *)&val);
1530  AssertThrow(status >= 0, ExcInternalError());
1531  val = LAPACKSupport::State::matrix;
1532  status = H5Tenum_insert (state_enum_id, "matrix", (int *)&val);
1533  AssertThrow(status >= 0, ExcInternalError());
1534  val = LAPACKSupport::State::svd;
1535  status = H5Tenum_insert (state_enum_id, "svd", (int *)&val);
1536  AssertThrow(status >= 0, ExcInternalError());
1537  val = LAPACKSupport::State::unusable;
1538  status = H5Tenum_insert (state_enum_id, "unusable", (int *)&val);
1539  AssertThrow(status >= 0, ExcInternalError());
1540  }
1541 
1542  void create_HDF5_property_enum_id(hid_t &property_enum_id)
1543  {
1544  // create HDF5 enum type for LAPACKSupport::Property
1545  property_enum_id = H5Tcreate (H5T_ENUM, sizeof(LAPACKSupport::Property));
1546  LAPACKSupport::Property prop = LAPACKSupport::Property::diagonal;
1547  herr_t status = H5Tenum_insert (property_enum_id, "diagonal", (int *)&prop);
1548  AssertThrow(status >= 0, ExcInternalError());
1550  status = H5Tenum_insert (property_enum_id, "general", (int *)&prop);
1551  AssertThrow(status >= 0, ExcInternalError());
1552  prop = LAPACKSupport::Property::hessenberg;
1553  status = H5Tenum_insert (property_enum_id, "hessenberg", (int *)&prop);
1554  AssertThrow(status >= 0, ExcInternalError());
1555  prop = LAPACKSupport::Property::lower_triangular;
1556  status = H5Tenum_insert (property_enum_id, "lower_triangular", (int *)&prop);
1557  AssertThrow(status >= 0, ExcInternalError());
1558  prop = LAPACKSupport::Property::symmetric;
1559  status = H5Tenum_insert (property_enum_id, "symmetric", (int *)&prop);
1560  AssertThrow(status >= 0, ExcInternalError());
1561  prop = LAPACKSupport::Property::upper_triangular;
1562  status = H5Tenum_insert (property_enum_id, "upper_triangular", (int *)&prop);
1563  AssertThrow(status >= 0, ExcInternalError());
1564  }
1565  }
1566 }
1567 #endif
1568 
1569 
1570 
1571 template <typename NumberType>
1572 void ScaLAPACKMatrix<NumberType>::save(const char *filename,
1573  const std::pair<unsigned int,unsigned int> &chunk_size) const
1574 {
1575 #ifndef DEAL_II_WITH_HDF5
1576  (void)filename;
1577  (void)chunk_size;
1578  AssertThrow(false, ExcMessage ("HDF5 support is disabled."));
1579 #else
1580 
1581  std::pair<unsigned int,unsigned int> chunks_size_ = chunk_size;
1582 
1583  if (chunks_size_.first==numbers::invalid_unsigned_int || chunks_size_.second==numbers::invalid_unsigned_int)
1584  {
1585  // default: store the matrix in chunks of columns
1586  chunks_size_.first = n_rows;
1587  chunks_size_.second = 1;
1588  }
1589  Assert((chunks_size_.first <= (unsigned int)n_rows) && (chunks_size_.first>0),ExcIndexRange(chunks_size_.first,1,n_rows+1));
1590  Assert((chunks_size_.second <= (unsigned int)n_columns) && (chunks_size_.second>0),ExcIndexRange(chunks_size_.second,1,n_columns+1));
1591 
1592 # ifdef H5_HAVE_PARALLEL
1593  //implementation for configurations equipped with a parallel file system
1594  save_parallel(filename,chunks_size_);
1595 
1596 # else
1597  //implementation for configurations with no parallel file system
1598  save_serial(filename,chunks_size_);
1599 
1600 # endif
1601 #endif
1602 }
1603 
1604 
1605 
1606 template <typename NumberType>
1607 void ScaLAPACKMatrix<NumberType>::save_serial(const char *filename,
1608  const std::pair<unsigned int,unsigned int> &chunk_size) const
1609 {
1610 # ifndef DEAL_II_WITH_HDF5
1611  (void)filename;
1612  (void)chunk_size;
1613  Assert(false,ExcInternalError());
1614 # else
1615 
1616  /*
1617  * The content of the distributed matrix is copied to a matrix using a 1x1 process grid.
1618  * Therefore, one process has all the data and can write it to a file.
1619  *
1620  * Create a 1x1 column grid which will be used to initialize
1621  * an effectively serial ScaLAPACK matrix to gather the contents from the current object
1622  */
1623  const auto column_grid = std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,1,1);
1624 
1625  const int MB=n_rows, NB=n_columns;
1626  ScaLAPACKMatrix<NumberType> tmp(n_rows,n_columns,column_grid,MB,NB);
1627  copy_to(tmp);
1628 
1629  // the 1x1 grid has only one process and this one writes
1630  // the content of the matrix to the HDF5 file
1631  if (tmp.grid->mpi_process_is_active)
1632  {
1633  herr_t status;
1634 
1635  // create a new file using default properties
1636  hid_t file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
1637 
1638  // modify dataset creation properties, i.e. enable chunking
1639  hsize_t chunk_dims[2];
1640  //revert order of rows and columns as ScaLAPACK uses column-major ordering
1641  chunk_dims[0] = chunk_size.second;
1642  chunk_dims[1] = chunk_size.first;
1643  hid_t data_property = H5Pcreate (H5P_DATASET_CREATE);
1644  status = H5Pset_chunk (data_property, 2, chunk_dims);
1645  AssertThrow(status >= 0, ExcIO());
1646 
1647  // create the data space for the dataset
1648  hsize_t dims[2];
1649  //change order of rows and columns as ScaLAPACKMatrix uses column major ordering
1650  dims[0] = n_columns;
1651  dims[1] = n_rows;
1652  hid_t dataspace_id = H5Screate_simple(2, dims, nullptr);
1653 
1654  // create the dataset within the file using chunk creation properties
1655  hid_t type_id = hdf5_type_id(&tmp.values[0]);
1656  hid_t dataset_id = H5Dcreate2(file_id, "/matrix",
1657  type_id, dataspace_id,
1658  H5P_DEFAULT, data_property, H5P_DEFAULT);
1659 
1660  // write the dataset
1661  status = H5Dwrite(dataset_id, type_id,
1662  H5S_ALL, H5S_ALL, H5P_DEFAULT,
1663  &tmp.values[0]);
1664  AssertThrow(status >= 0, ExcIO());
1665 
1666  // create HDF5 enum type for LAPACKSupport::State and LAPACKSupport::Property
1667  hid_t state_enum_id, property_enum_id;
1668  internal::create_HDF5_state_enum_id(state_enum_id);
1669  internal::create_HDF5_property_enum_id(property_enum_id);
1670 
1671  // create the data space for the state enum
1672  hsize_t dims_state[1];
1673  dims_state[0]=1;
1674  hid_t state_enum_dataspace = H5Screate_simple(1, dims_state, nullptr);
1675  // create the dataset for the state enum
1676  hid_t state_enum_dataset = H5Dcreate2(file_id, "/state", state_enum_id, state_enum_dataspace,
1677  H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
1678  // write the dataset for the state enum
1679  status = H5Dwrite(state_enum_dataset, state_enum_id,
1680  H5S_ALL, H5S_ALL, H5P_DEFAULT,
1681  &state);
1682  AssertThrow(status >= 0, ExcIO());
1683 
1684  // create the data space for the property enum
1685  hsize_t dims_property[1];
1686  dims_property[0]=1;
1687  hid_t property_enum_dataspace = H5Screate_simple(1, dims_property, nullptr);
1688  // create the dataset for the property enum
1689  hid_t property_enum_dataset = H5Dcreate2(file_id, "/property", property_enum_id, property_enum_dataspace,
1690  H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
1691  // write the dataset for the property enum
1692  status = H5Dwrite(property_enum_dataset, property_enum_id,
1693  H5S_ALL, H5S_ALL, H5P_DEFAULT,
1694  &property);
1695  AssertThrow(status >= 0, ExcIO());
1696 
1697  // end access to the datasets and release resources used by them
1698  status = H5Dclose(dataset_id);
1699  AssertThrow(status >= 0, ExcIO());
1700  status = H5Dclose(state_enum_dataset);
1701  AssertThrow(status >= 0, ExcIO());
1702  status = H5Dclose(property_enum_dataset);
1703  AssertThrow(status >= 0, ExcIO());
1704 
1705  // terminate access to the data spaces
1706  status = H5Sclose(dataspace_id);
1707  AssertThrow(status >= 0, ExcIO());
1708  status = H5Sclose(state_enum_dataspace);
1709  AssertThrow(status >= 0, ExcIO());
1710  status = H5Sclose(property_enum_dataspace);
1711  AssertThrow(status >= 0, ExcIO());
1712 
1713  // release enum data types
1714  status = H5Tclose(state_enum_id);
1715  AssertThrow(status >= 0, ExcIO());
1716  status = H5Tclose(property_enum_id);
1717  AssertThrow(status >= 0, ExcIO());
1718 
1719  // release the creation property
1720  status = H5Pclose (data_property);
1721  AssertThrow(status >= 0, ExcIO());
1722 
1723  // close the file.
1724  status = H5Fclose(file_id);
1725  AssertThrow(status >= 0, ExcIO());
1726  }
1727 # endif
1728 }
1729 
1730 
1731 
1732 template <typename NumberType>
1733 void ScaLAPACKMatrix<NumberType>::save_parallel(const char *filename,
1734  const std::pair<unsigned int,unsigned int> &chunk_size) const
1735 {
1736 # ifndef DEAL_II_WITH_HDF5
1737  (void)filename;
1738  (void)chunk_size;
1739  Assert(false,ExcInternalError());
1740 # else
1741 
1742  const unsigned int n_mpi_processes(Utilities::MPI::n_mpi_processes(this->grid->mpi_communicator));
1743  MPI_Info info = MPI_INFO_NULL;
1744  /*
1745  * The content of the distributed matrix is copied to a matrix using a 1xn_processes process grid.
1746  * Therefore, the processes hold contiguous chunks of the matrix, which they can write to the file
1747  *
1748  * Create a 1xn_processes column grid
1749  */
1750  const auto column_grid = std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,1,n_mpi_processes);
1751 
1752  const int MB=n_rows, NB=std::ceil(n_columns/n_mpi_processes);
1753  ScaLAPACKMatrix<NumberType> tmp(n_rows,n_columns,column_grid,MB,NB);
1754  copy_to(tmp);
1755 
1756  // get pointer to data held by the process
1757  NumberType *data = (tmp.values.size()>0) ? &tmp.values[0] : nullptr;
1758 
1759  herr_t status;
1760  // dataset dimensions
1761  hsize_t dims[2];
1762 
1763  // set up file access property list with parallel I/O access
1764  hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
1765  status = H5Pset_fapl_mpio(plist_id, tmp.grid->mpi_communicator, info);
1766  AssertThrow(status >= 0, ExcIO());
1767 
1768  // create a new file collectively and release property list identifier
1769  hid_t file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist_id);
1770  status = H5Pclose(plist_id);
1771  AssertThrow(status >= 0, ExcIO());
1772 
1773  // As ScaLAPACK, and therefore the class ScaLAPACKMatrix, uses column-major ordering
1774  // but HDF5 row-major ordering, we have to reverse entries related
1775  // to columns and rows in the following.
1776  // create the dataspace for the dataset
1777  dims[0] = tmp.n_columns;
1778  dims[1] = tmp.n_rows;
1779 
1780  hid_t filespace = H5Screate_simple(2, dims, nullptr);
1781 
1782  // create the chunked dataset with default properties and close filespace
1783  hsize_t chunk_dims[2];
1784  //revert order of rows and columns as ScaLAPACK uses column-major ordering
1785  chunk_dims[0] = chunk_size.second;
1786  chunk_dims[1] = chunk_size.first;
1787  plist_id = H5Pcreate(H5P_DATASET_CREATE);
1788  H5Pset_chunk(plist_id, 2, chunk_dims);
1789  hid_t type_id = hdf5_type_id(data);
1790  hid_t dset_id = H5Dcreate2(file_id, "/matrix", type_id,
1791  filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
1792 
1793  status = H5Sclose(filespace);
1794  AssertThrow(status >= 0, ExcIO());
1795 
1796  status = H5Pclose(plist_id);
1797  AssertThrow(status >= 0, ExcIO());
1798 
1799  // gather the number of local rows and columns from all processes
1800  std::vector<int> proc_n_local_rows(n_mpi_processes), proc_n_local_columns(n_mpi_processes);
1801  int ierr = MPI_Allgather(&tmp.n_local_rows, 1, MPI_INT,
1802  proc_n_local_rows.data(), 1, MPI_INT,
1803  tmp.grid->mpi_communicator);
1804  AssertThrowMPI(ierr);
1805  ierr = MPI_Allgather(&tmp.n_local_columns, 1, MPI_INT,
1806  proc_n_local_columns.data(), 1, MPI_INT,
1807  tmp.grid->mpi_communicator);
1808  AssertThrowMPI(ierr);
1809 
1810  const unsigned int my_rank(Utilities::MPI::this_mpi_process(tmp.grid->mpi_communicator));
1811 
1812  // hyperslab selection parameters
1813  // each process defines dataset in memory and writes it to the hyperslab in the file
1814  hsize_t count[2];
1815  count[0] = tmp.n_local_columns;
1816  count[1] = tmp.n_rows;
1817  hid_t memspace = H5Screate_simple(2, count, nullptr);
1818 
1819  hsize_t offset[2] = {0};
1820  for (unsigned int i=0; i<my_rank; ++i)
1821  offset[0] += proc_n_local_columns[i];
1822 
1823  // select hyperslab in the file.
1824  filespace = H5Dget_space(dset_id);
1825  status = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, nullptr, count, nullptr);
1826  AssertThrow(status >= 0, ExcIO());
1827 
1828  // create property list for independent dataset write
1829  plist_id = H5Pcreate(H5P_DATASET_XFER);
1830  status = H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_INDEPENDENT);
1831  AssertThrow(status >= 0, ExcIO());
1832 
1833  // process with no data will not participate in writing to the file
1834  if (tmp.values.size()>0)
1835  {
1836  status = H5Dwrite(dset_id, type_id, memspace, filespace,
1837  plist_id, data);
1838  AssertThrow(status >= 0, ExcIO());
1839  }
1840  // close/release sources
1841  status = H5Dclose(dset_id);
1842  AssertThrow(status >= 0, ExcIO());
1843  status = H5Sclose(filespace);
1844  AssertThrow(status >= 0, ExcIO());
1845  status = H5Sclose(memspace);
1846  AssertThrow(status >= 0, ExcIO());
1847  status = H5Pclose(plist_id);
1848  AssertThrow(status >= 0, ExcIO());
1849  status = H5Fclose(file_id);
1850  AssertThrow(status >= 0, ExcIO());
1851 
1852  // before writing the state and property to file wait for
1853  // all processes to finish writing the matrix content to the file
1854  ierr = MPI_Barrier(tmp.grid->mpi_communicator);
1855  AssertThrowMPI(ierr);
1856 
1857  // only root process will write state and property to the file
1858  if (tmp.grid->this_mpi_process==0)
1859  {
1860  // open file using default properties
1861  hid_t file_id_reopen = H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT);
1862 
1863  // create HDF5 enum type for LAPACKSupport::State and LAPACKSupport::Property
1864  hid_t state_enum_id, property_enum_id;
1865  internal::create_HDF5_state_enum_id(state_enum_id);
1866  internal::create_HDF5_property_enum_id(property_enum_id);
1867 
1868  // create the data space for the state enum
1869  hsize_t dims_state[1];
1870  dims_state[0]=1;
1871  hid_t state_enum_dataspace = H5Screate_simple(1, dims_state, nullptr);
1872  // create the dataset for the state enum
1873  hid_t state_enum_dataset = H5Dcreate2(file_id_reopen, "/state", state_enum_id, state_enum_dataspace,
1874  H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
1875  // write the dataset for the state enum
1876  status = H5Dwrite(state_enum_dataset, state_enum_id,
1877  H5S_ALL, H5S_ALL, H5P_DEFAULT,
1878  &state);
1879  AssertThrow(status >= 0, ExcIO());
1880 
1881  // create the data space for the property enum
1882  hsize_t dims_property[1];
1883  dims_property[0]=1;
1884  hid_t property_enum_dataspace = H5Screate_simple(1, dims_property, nullptr);
1885  // create the dataset for the property enum
1886  hid_t property_enum_dataset = H5Dcreate2(file_id_reopen, "/property", property_enum_id, property_enum_dataspace,
1887  H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
1888  // write the dataset for the property enum
1889  status = H5Dwrite(property_enum_dataset, property_enum_id,
1890  H5S_ALL, H5S_ALL, H5P_DEFAULT,
1891  &property);
1892  AssertThrow(status >= 0, ExcIO());
1893 
1894  status = H5Dclose(state_enum_dataset);
1895  AssertThrow(status >= 0, ExcIO());
1896  status = H5Dclose(property_enum_dataset);
1897  AssertThrow(status >= 0, ExcIO());
1898  status = H5Sclose(state_enum_dataspace);
1899  AssertThrow(status >= 0, ExcIO());
1900  status = H5Sclose(property_enum_dataspace);
1901  AssertThrow(status >= 0, ExcIO());
1902  status = H5Tclose(state_enum_id);
1903  AssertThrow(status >= 0, ExcIO());
1904  status = H5Tclose(property_enum_id);
1905  AssertThrow(status >= 0, ExcIO());
1906  status = H5Fclose(file_id_reopen);
1907  AssertThrow(status >= 0, ExcIO());
1908  }
1909 
1910 # endif
1911 }
1912 
1913 
1914 
1915 template <typename NumberType>
1916 void ScaLAPACKMatrix<NumberType>::load(const char *filename)
1917 {
1918 #ifndef DEAL_II_WITH_HDF5
1919  (void)filename;
1920  AssertThrow(false, ExcMessage ("HDF5 support is disabled."));
1921 #else
1922 # ifdef H5_HAVE_PARALLEL
1923  //implementation for configurations equipped with a parallel file system
1924  load_parallel(filename);
1925 
1926 # else
1927  //implementation for configurations with no parallel file system
1928  load_serial(filename);
1929 # endif
1930 #endif
1931 }
1932 
1933 
1934 
1935 template <typename NumberType>
1936 void ScaLAPACKMatrix<NumberType>::load_serial(const char *filename)
1937 {
1938 # ifndef DEAL_II_WITH_HDF5
1939  (void)filename;
1940  Assert(false,ExcInternalError());
1941 # else
1942 
1943  /*
1944  * The content of the distributed matrix is copied to a matrix using a 1x1 process grid.
1945  * Therefore, one process has all the data and can write it to a file
1946  */
1947  //create a 1xP column grid with P being the number of MPI processes
1948  const auto one_grid = std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,1,1);
1949 
1950  const int MB=n_rows, NB=n_columns;
1951  ScaLAPACKMatrix<NumberType> tmp(n_rows,n_columns,one_grid,MB,NB);
1952 
1953  int state_int = -1;
1954  int property_int = -1;
1955 
1956  // the 1x1 grid has only one process and this one reads
1957  // the content of the matrix from the HDF5 file
1958  if (tmp.grid->mpi_process_is_active)
1959  {
1960  herr_t status;
1961 
1962  // open the file in read-only mode
1963  hid_t file_id = H5Fopen(filename, H5F_ACC_RDONLY, H5P_DEFAULT);
1964 
1965  // open the dataset in the file
1966  hid_t dataset_id = H5Dopen2(file_id, "/matrix", H5P_DEFAULT);
1967 
1968  // check the datatype of the data in the file
1969  // datatype of source and destination must have the same class
1970  // see HDF User's Guide: 6.10. Data Transfer: Datatype Conversion and Selection
1971  hid_t datatype = H5Dget_type(dataset_id);
1972  H5T_class_t t_class_in = H5Tget_class(datatype);
1973  H5T_class_t t_class = H5Tget_class(hdf5_type_id(&tmp.values[0]));
1974  AssertThrow(t_class_in == t_class,
1975  ExcMessage("The data type of the matrix to be read does not match the archive"));
1976 
1977  // get dataspace handle
1978  hid_t dataspace_id = H5Dget_space(dataset_id);
1979  // get number of dimensions
1980  const int ndims = H5Sget_simple_extent_ndims(dataspace_id);
1981  AssertThrow(ndims==2, ExcIO());
1982  // get every dimension
1983  hsize_t dims[2];
1984  H5Sget_simple_extent_dims(dataspace_id, dims, nullptr);
1985  AssertThrow((int)dims[0]==n_columns,
1986  ExcMessage("The number of columns of the matrix does not match the content of the archive"));
1987  AssertThrow((int)dims[1]==n_rows,
1988  ExcMessage("The number of rows of the matrix does not match the content of the archive"));
1989 
1990  // read data
1991  status = H5Dread(dataset_id, hdf5_type_id(&tmp.values[0]), H5S_ALL, H5S_ALL,
1992  H5P_DEFAULT, &tmp.values[0]);
1993  AssertThrow(status >= 0, ExcIO());
1994 
1995  // create HDF5 enum type for LAPACKSupport::State and LAPACKSupport::Property
1996  hid_t state_enum_id, property_enum_id;
1997  internal::create_HDF5_state_enum_id(state_enum_id);
1998  internal::create_HDF5_property_enum_id(property_enum_id);
1999 
2000  // open the datasets for the state and property enum in the file
2001  hid_t dataset_state_id = H5Dopen2(file_id, "/state", H5P_DEFAULT);
2002  hid_t datatype_state = H5Dget_type(dataset_state_id);
2003  H5T_class_t t_class_state = H5Tget_class(datatype_state);
2004  AssertThrow(t_class_state == H5T_ENUM, ExcIO());
2005 
2006  hid_t dataset_property_id = H5Dopen2(file_id, "/property", H5P_DEFAULT);
2007  hid_t datatype_property = H5Dget_type(dataset_property_id);
2008  H5T_class_t t_class_property = H5Tget_class(datatype_property);
2009  AssertThrow(t_class_property == H5T_ENUM, ExcIO());
2010 
2011  // get dataspace handles
2012  hid_t dataspace_state = H5Dget_space(dataset_state_id);
2013  hid_t dataspace_property = H5Dget_space(dataset_property_id);
2014  // get number of dimensions
2015  const int ndims_state = H5Sget_simple_extent_ndims(dataspace_state);
2016  AssertThrow(ndims_state==1, ExcIO());
2017  const int ndims_property = H5Sget_simple_extent_ndims(dataspace_property);
2018  AssertThrow(ndims_property==1, ExcIO());
2019  // get every dimension
2020  hsize_t dims_state[1];
2021  H5Sget_simple_extent_dims(dataspace_state, dims_state, nullptr);
2022  AssertThrow((int)dims_state[0]==1,ExcIO());
2023  hsize_t dims_property[1];
2024  H5Sget_simple_extent_dims(dataspace_property, dims_property, nullptr);
2025  AssertThrow((int)dims_property[0]==1,ExcIO());
2026 
2027  // read data
2028  status = H5Dread(dataset_state_id, state_enum_id, H5S_ALL, H5S_ALL,
2029  H5P_DEFAULT, &tmp.state);
2030  AssertThrow(status >= 0, ExcIO());
2031  // To send the state from the root process to the other processes
2032  // the state enum is casted to an integer, that will be broadcasted and
2033  // subsequently casted back to the enum type
2034  state_int = static_cast<int>(tmp.state);
2035 
2036  status = H5Dread(dataset_property_id, property_enum_id, H5S_ALL, H5S_ALL,
2037  H5P_DEFAULT, &tmp.property);
2038  AssertThrow(status >= 0, ExcIO());
2039  // To send the property from the root process to the other processes
2040  // the state enum is casted to an integer, that will be broadcasted and
2041  // subsequently casted back to the enum type
2042  property_int = static_cast<int>(tmp.property);
2043 
2044  // terminate access to the data spaces
2045  status = H5Sclose(dataspace_id);
2046  AssertThrow(status >= 0, ExcIO());
2047  status = H5Sclose(dataspace_state);
2048  AssertThrow(status >= 0, ExcIO());
2049  status = H5Sclose(dataspace_property);
2050  AssertThrow(status >= 0, ExcIO());
2051 
2052  // release data type handles
2053  status = H5Tclose(datatype);
2054  AssertThrow(status >= 0, ExcIO());
2055  status = H5Tclose(state_enum_id);
2056  AssertThrow(status >= 0, ExcIO());
2057  status = H5Tclose(property_enum_id);
2058  AssertThrow(status >= 0, ExcIO());
2059 
2060  // end access to the data sets and release resources used by them
2061  status = H5Dclose(dataset_state_id);
2062  AssertThrow(status >= 0, ExcIO());
2063  status = H5Dclose(dataset_id);
2064  AssertThrow(status >= 0, ExcIO());
2065  status = H5Dclose(dataset_property_id);
2066  AssertThrow(status >= 0, ExcIO());
2067 
2068  // close the file.
2069  status = H5Fclose(file_id);
2070  AssertThrow(status >= 0, ExcIO());
2071  }
2072  // so far only the root process has the correct state integer --> broadcasting
2073  tmp.grid->send_to_inactive(&state_int,1);
2074  // so far only the root process has the correct property integer --> broadcasting
2075  tmp.grid->send_to_inactive(&property_int,1);
2076 
2077  tmp.state = static_cast<LAPACKSupport::State>(state_int);
2078  tmp.property = static_cast<LAPACKSupport::Property>(property_int);
2079 
2080  tmp.copy_to(*this);
2081 
2082 # endif // DEAL_II_WITH_HDF5
2083 }
2084 
2085 
2086 
2087 template <typename NumberType>
2088 void ScaLAPACKMatrix<NumberType>::load_parallel(const char *filename)
2089 {
2090 # ifndef DEAL_II_WITH_HDF5
2091  (void)filename;
2092  Assert(false,ExcInternalError());
2093 # else
2094 # ifndef H5_HAVE_PARALLEL
2095  Assert(false,ExcInternalError());
2096 # else
2097 
2098  const unsigned int n_mpi_processes(Utilities::MPI::n_mpi_processes(this->grid->mpi_communicator));
2099  MPI_Info info = MPI_INFO_NULL;
2100  /*
2101  * The content of the distributed matrix is copied to a matrix using a 1xn_processes process grid.
2102  * Therefore, the processes hold contiguous chunks of the matrix, which they can write to the file
2103  */
2104  //create a 1xP column grid with P being the number of MPI processes
2105  const auto column_grid = std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,1,n_mpi_processes);
2106 
2107  const int MB=n_rows, NB=std::ceil(n_columns/n_mpi_processes);
2108  ScaLAPACKMatrix<NumberType> tmp(n_rows,n_columns,column_grid,MB,NB);
2109 
2110  // get pointer to data held by the process
2111  NumberType *data = (tmp.values.size()>0) ? &tmp.values[0] : nullptr;
2112 
2113  herr_t status;
2114 
2115  // set up file access property list with parallel I/O access
2116  hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
2117  status = H5Pset_fapl_mpio(plist_id, tmp.grid->mpi_communicator, info);
2118  AssertThrow(status >= 0, ExcIO());
2119 
2120  // open file collectively in read-only mode and release property list identifier
2121  hid_t file_id = H5Fopen(filename, H5F_ACC_RDONLY, plist_id);
2122  status = H5Pclose(plist_id);
2123  AssertThrow(status >= 0, ExcIO());
2124 
2125  // open the dataset in the file collectively
2126  hid_t dataset_id = H5Dopen2(file_id, "/matrix", H5P_DEFAULT);
2127 
2128  // check the datatype of the dataset in the file
2129  // if the classes of type of the dataset and the matrix do not match abort
2130  // see HDF User's Guide: 6.10. Data Transfer: Datatype Conversion and Selection
2131  hid_t datatype = hdf5_type_id(data);
2132  hid_t datatype_inp = H5Dget_type(dataset_id);
2133  H5T_class_t t_class_inp = H5Tget_class(datatype_inp);
2134  H5T_class_t t_class = H5Tget_class(datatype);
2135  AssertThrow(t_class_inp == t_class,
2136  ExcMessage("The data type of the matrix to be read does not match the archive"));
2137 
2138  // get the dimensions of the matrix stored in the file
2139  // get dataspace handle
2140  hid_t dataspace_id = H5Dget_space(dataset_id);
2141  // get number of dimensions
2142  const int ndims = H5Sget_simple_extent_ndims(dataspace_id);
2143  AssertThrow(ndims==2, ExcIO());
2144  // get every dimension
2145  hsize_t dims[2];
2146  status = H5Sget_simple_extent_dims(dataspace_id, dims, nullptr);
2147  AssertThrow(status >= 0, ExcIO());
2148  AssertThrow((int)dims[0]==n_columns,
2149  ExcMessage("The number of columns of the matrix does not match the content of the archive"));
2150  AssertThrow((int)dims[1]==n_rows,
2151  ExcMessage("The number of rows of the matrix does not match the content of the archive"));
2152 
2153  // gather the number of local rows and columns from all processes
2154  std::vector<int> proc_n_local_rows(n_mpi_processes), proc_n_local_columns(n_mpi_processes);
2155  int ierr = MPI_Allgather(&tmp.n_local_rows, 1, MPI_INT,
2156  proc_n_local_rows.data(), 1, MPI_INT,
2157  tmp.grid->mpi_communicator);
2158  AssertThrowMPI(ierr);
2159  ierr = MPI_Allgather(&tmp.n_local_columns, 1, MPI_INT,
2160  proc_n_local_columns.data(), 1, MPI_INT,
2161  tmp.grid->mpi_communicator);
2162  AssertThrowMPI(ierr);
2163 
2164  const unsigned int my_rank(Utilities::MPI::this_mpi_process(tmp.grid->mpi_communicator));
2165 
2166  // hyperslab selection parameters
2167  // each process defines dataset in memory and writes it to the hyperslab in the file
2168  hsize_t count[2];
2169  count[0] = tmp.n_local_columns;
2170  count[1] = tmp.n_local_rows;
2171 
2172  hsize_t offset[2] = {0};
2173  for (unsigned int i=0; i<my_rank; ++i)
2174  offset[0] += proc_n_local_columns[i];
2175 
2176  // select hyperslab in the file
2177  status = H5Sselect_hyperslab(dataspace_id, H5S_SELECT_SET, offset, nullptr, count, nullptr);
2178  AssertThrow(status >= 0, ExcIO());
2179 
2180  // create a memory dataspace independently
2181  hid_t memspace = H5Screate_simple(2, count, nullptr);
2182 
2183  // read data independently
2184  status = H5Dread(dataset_id, datatype, memspace, dataspace_id, H5P_DEFAULT, data);
2185  AssertThrow(status >= 0, ExcIO());
2186 
2187  // create HDF5 enum type for LAPACKSupport::State and LAPACKSupport::Property
2188  hid_t state_enum_id, property_enum_id;
2189  internal::create_HDF5_state_enum_id(state_enum_id);
2190  internal::create_HDF5_property_enum_id(property_enum_id);
2191 
2192  // open the datasets for the state and property enum in the file
2193  hid_t dataset_state_id = H5Dopen2(file_id, "/state", H5P_DEFAULT);
2194  hid_t datatype_state = H5Dget_type(dataset_state_id);
2195  H5T_class_t t_class_state = H5Tget_class(datatype_state);
2196  AssertThrow(t_class_state == H5T_ENUM, ExcIO());
2197 
2198  hid_t dataset_property_id = H5Dopen2(file_id, "/property", H5P_DEFAULT);
2199  hid_t datatype_property = H5Dget_type(dataset_property_id);
2200  H5T_class_t t_class_property = H5Tget_class(datatype_property);
2201  AssertThrow(t_class_property == H5T_ENUM, ExcIO());
2202 
2203  // get dataspace handles
2204  hid_t dataspace_state = H5Dget_space(dataset_state_id);
2205  hid_t dataspace_property = H5Dget_space(dataset_property_id);
2206  // get number of dimensions
2207  const int ndims_state = H5Sget_simple_extent_ndims(dataspace_state);
2208  AssertThrow(ndims_state==1, ExcIO());
2209  const int ndims_property = H5Sget_simple_extent_ndims(dataspace_property);
2210  AssertThrow(ndims_property==1, ExcIO());
2211  // get every dimension
2212  hsize_t dims_state[1];
2213  H5Sget_simple_extent_dims(dataspace_state, dims_state, nullptr);
2214  AssertThrow((int)dims_state[0]==1,ExcIO());
2215  hsize_t dims_property[1];
2216  H5Sget_simple_extent_dims(dataspace_property, dims_property, nullptr);
2217  AssertThrow((int)dims_property[0]==1,ExcIO());
2218 
2219  // read data
2220  status = H5Dread(dataset_state_id, state_enum_id, H5S_ALL, H5S_ALL,
2221  H5P_DEFAULT, &tmp.state);
2222  AssertThrow(status >= 0, ExcIO());
2223 
2224  status = H5Dread(dataset_property_id, property_enum_id, H5S_ALL, H5S_ALL,
2225  H5P_DEFAULT, &tmp.property);
2226  AssertThrow(status >= 0, ExcIO());
2227 
2228  // close/release sources
2229  status = H5Sclose(memspace);
2230  AssertThrow(status >= 0, ExcIO());
2231  status = H5Dclose(dataset_id);
2232  AssertThrow(status >= 0, ExcIO());
2233  status = H5Dclose(dataset_state_id);
2234  AssertThrow(status >= 0, ExcIO());
2235  status = H5Dclose(dataset_property_id);
2236  AssertThrow(status >= 0, ExcIO());
2237  status = H5Sclose(dataspace_id);
2238  AssertThrow(status >= 0, ExcIO());
2239  status = H5Sclose(dataspace_state);
2240  AssertThrow(status >= 0, ExcIO());
2241  status = H5Sclose(dataspace_property);
2242  AssertThrow(status >= 0, ExcIO());
2243  //status = H5Tclose(datatype);
2244  //AssertThrow(status >= 0, ExcIO());
2245  status = H5Tclose(state_enum_id);
2246  AssertThrow(status >= 0, ExcIO());
2247  status = H5Tclose(property_enum_id);
2248  AssertThrow(status >= 0, ExcIO());
2249  status = H5Fclose(file_id);
2250  AssertThrow(status >= 0, ExcIO());
2251 
2252  // copying the distributed matrices
2253  tmp.copy_to(*this);
2254 
2255 # endif // H5_HAVE_PARALLEL
2256 # endif // DEAL_II_WITH_HDF5
2257 }
2258 
2259 
2260 
2261 namespace internal
2262 {
2263  namespace
2264  {
2265 
2266  template <typename NumberType>
2267  void scale_columns(ScaLAPACKMatrix<NumberType> &matrix,
2268  const ArrayView<const NumberType> &factors)
2269  {
2270  Assert(matrix.n()==factors.size(),ExcDimensionMismatch(matrix.n(),factors.size()));
2271 
2272  for (unsigned int i=0; i<matrix.local_n(); ++i)
2273  {
2274  const NumberType s = factors[matrix.global_column(i)];
2275 
2276  for (unsigned int j=0; j<matrix.local_m(); ++j)
2277  matrix.local_el(j,i) *= s;
2278  }
2279  }
2280 
2281  template <typename NumberType>
2282  void scale_rows(ScaLAPACKMatrix<NumberType> &matrix,
2283  const ArrayView<const NumberType> &factors)
2284  {
2285  Assert(matrix.m()==factors.size(),ExcDimensionMismatch(matrix.m(),factors.size()));
2286 
2287  for (unsigned int i=0; i<matrix.local_m(); ++i)
2288  {
2289  const NumberType s = factors[matrix.global_row(i)];
2290 
2291  for (unsigned int j=0; j<matrix.local_n(); ++j)
2292  matrix.local_el(i,j) *= s;
2293  }
2294  }
2295 
2296  }
2297 }
2298 
2299 
2300 
2301 template <typename NumberType>
2302 template <class InputVector>
2303 void ScaLAPACKMatrix<NumberType>::scale_columns(const InputVector &factors)
2304 {
2305  if (this->grid->mpi_process_is_active)
2306  internal::scale_columns(*this, make_array_view(factors));
2307 }
2308 
2309 
2310 
2311 template <typename NumberType>
2312 template <class InputVector>
2313 void ScaLAPACKMatrix<NumberType>::scale_rows(const InputVector &factors)
2314 {
2315  if (this->grid->mpi_process_is_active)
2316  internal::scale_rows(*this, make_array_view(factors));
2317 }
2318 
2319 
2320 
2321 // instantiations
2322 #include "scalapack.inst"
2323 
2324 
2325 DEAL_II_NAMESPACE_CLOSE
2326 
2327 #endif // DEAL_II_WITH_SCALAPACK
std::vector< NumberType > eigenpairs_symmetric_by_index_MRRR(const std::pair< unsigned int, unsigned int > &index_limits, const bool compute_eigenvectors)
Definition: scalapack.cc:1004
void copy_transposed(const ScaLAPACKMatrix< NumberType > &B)
Definition: scalapack.cc:499
ScaLAPACKMatrix(const size_type n_rows, const size_type n_columns, const std::shared_ptr< const Utilities::MPI::ProcessGrid > &process_grid, const size_type row_block_size=32, const size_type column_block_size=32, const LAPACKSupport::Property property=LAPACKSupport::Property::general)
Definition: scalapack.cc:73
Matrix is symmetric.
std::vector< NumberType > eigenpairs_symmetric_by_value_MRRR(const std::pair< NumberType, NumberType > &value_limits, const bool compute_eigenvectors)
Definition: scalapack.cc:1024
void mmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
Definition: scalapack.cc:634
static const unsigned int invalid_unsigned_int
Definition: types.h:173
unsigned int size_type
Definition: scalapack.h:112
LAPACKSupport::State state
Definition: scalapack.h:744
unsigned int global_row(const unsigned int loc_row) const
Definition: scalapack.cc:229
NumberType l1_norm() const
Definition: scalapack.cc:1392
void Tmmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
Definition: scalapack.cc:647
Contents is actually a matrix.
ScaLAPACKMatrix< NumberType > & operator=(const FullMatrix< NumberType > &)
Definition: scalapack.cc:199
static ::ExceptionBase & ExcIO()
std::array< Number, 1 > eigenvalues(const SymmetricTensor< 2, 1, Number > &T)
unsigned int pseudoinverse(const NumberType ratio)
Definition: scalapack.cc:1308
std::array< std::pair< Number, Tensor< 1, dim, Number > >, std::integral_constant< int, dim >::value > eigenvectors(const SymmetricTensor< 2, dim, Number > &T, const SymmetricTensorEigenvectorMethod method=SymmetricTensorEigenvectorMethod::ql_implicit_shifts)
void load(const char *filename)
Definition: scalapack.cc:1916
Matrix is upper triangular.
std::vector< NumberType > eigenpairs_symmetric(const bool compute_eigenvectors, const std::pair< unsigned int, unsigned int > &index_limits=std::make_pair(numbers::invalid_unsigned_int, numbers::invalid_unsigned_int), const std::pair< NumberType, NumberType > &value_limits=std::make_pair(std::numeric_limits< NumberType >::quiet_NaN(), std::numeric_limits< NumberType >::quiet_NaN()))
Definition: scalapack.cc:820
Contents is the inverse of a matrix.
std::vector< NumberType > eigenpairs_symmetric_by_index(const std::pair< unsigned int, unsigned int > &index_limits, const bool compute_eigenvectors)
Definition: scalapack.cc:785
#define AssertIndexRange(index, range)
Definition: exceptions.h:1284
void scale_columns(const InputVector &factors)
Definition: scalapack.cc:2303
std::shared_ptr< const Utilities::MPI::ProcessGrid > grid
Definition: scalapack.h:756
#define AssertThrow(cond, exc)
Definition: exceptions.h:1221
static ::ExceptionBase & ExcIndexRange(int arg1, int arg2, int arg3)
void resize(const size_type size_in)
const int submatrix_column
Definition: scalapack.h:837
void reinit(const size_type n_rows, const size_type n_columns, const std::shared_ptr< const Utilities::MPI::ProcessGrid > &process_grid, const size_type row_block_size=32, const size_type column_block_size=32, const LAPACKSupport::Property property=LAPACKSupport::Property::general)
Definition: scalapack.cc:104
ArrayView< typename std::remove_reference< typename std::iterator_traits< Iterator >::reference >::type > make_array_view(const Iterator begin, const Iterator end)
Definition: array_view.h:490
void TmTmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
Definition: scalapack.cc:673
void least_squares(ScaLAPACKMatrix< NumberType > &B, const bool transpose=false)
Definition: scalapack.cc:1254
void mTmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
Definition: scalapack.cc:660
static ::ExceptionBase & ExcMessage(std::string arg1)
Contents is a Cholesky decomposition.
void scale_rows(const InputVector &factors)
Definition: scalapack.cc:2313
NumberType norm_symmetric(const char type) const
Definition: scalapack.cc:1466
T sum(const T &t, const MPI_Comm &mpi_communicator)
void set_property(const LAPACKSupport::Property property)
Definition: scalapack.cc:172
#define Assert(cond, exc)
Definition: exceptions.h:1142
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
static ::ExceptionBase & ExcErrorCode(char *arg1, types::blas_int arg2)
LAPACKSupport::State get_state() const
Definition: scalapack.cc:190
void reinit(const size_type size1, const size_type size2, const bool omit_default_initialization=false)
unsigned int global_column(const unsigned int loc_column) const
Definition: scalapack.cc:241
int create_group(const MPI_Comm &comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
Definition: mpi.cc:95
void add(const ScaLAPACKMatrix< NumberType > &B, const NumberType a=0., const NumberType b=1., const bool transpose_B=false)
Definition: scalapack.cc:507
Contents is something useless.
std::vector< NumberType > eigenpairs_symmetric_by_value(const std::pair< NumberType, NumberType > &value_limits, const bool compute_eigenvectors)
Definition: scalapack.cc:805
int column_block_size
Definition: scalapack.h:776
NumberType norm_general(const char type) const
Definition: scalapack.cc:1431
std::size_t size() const
Definition: array_view.h:370
const int submatrix_row
Definition: scalapack.h:831
void save(const char *filename, const std::pair< unsigned int, unsigned int > &chunk_size=std::make_pair(numbers::invalid_unsigned_int, numbers::invalid_unsigned_int)) const
Definition: scalapack.cc:1572
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:65
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1314
std::vector< NumberType > eigenpairs_symmetric_MRRR(const bool compute_eigenvectors, const std::pair< unsigned int, unsigned int > &index_limits=std::make_pair(numbers::invalid_unsigned_int, numbers::invalid_unsigned_int), const std::pair< NumberType, NumberType > &value_limits=std::make_pair(std::numeric_limits< NumberType >::quiet_NaN(), std::numeric_limits< NumberType >::quiet_NaN()))
Definition: scalapack.cc:1039
constexpr int chunk_size
Definition: cuda_size.h:33
std::vector< NumberType > compute_SVD(ScaLAPACKMatrix< NumberType > *U=nullptr, ScaLAPACKMatrix< NumberType > *VT=nullptr)
Definition: scalapack.cc:1180
void mult(const NumberType b, const ScaLAPACKMatrix< NumberType > &B, const NumberType c, ScaLAPACKMatrix< NumberType > &C, const bool transpose_A=false, const bool transpose_B=false) const
Definition: scalapack.cc:562
size_type size() const
void copy_to(FullMatrix< NumberType > &matrix) const
Definition: scalapack.cc:253
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:75
static ::ExceptionBase & ExcNotImplemented()
LAPACKSupport::Property property
Definition: scalapack.h:750
void Tadd(const NumberType b, const ScaLAPACKMatrix< NumberType > &B)
Definition: scalapack.cc:553
NumberType reciprocal_condition_number(const NumberType a_norm) const
Definition: scalapack.cc:1356
NumberType linfty_norm() const
Definition: scalapack.cc:1405
int descriptor[9]
Definition: scalapack.h:791
Eigenvalue vector is filled.
AlignedVector< NumberType > values
Definition: table.h:648
DerivativeForm< 1, spacedim, dim, Number > transpose(const DerivativeForm< 1, dim, spacedim, Number > &DF)
void compute_cholesky_factorization()
Definition: scalapack.cc:686
Matrix is lower triangular.
#define AssertIsFinite(number)
Definition: exceptions.h:1299
NumberType frobenius_norm() const
Definition: scalapack.cc:1418
LAPACKSupport::Property get_property() const
Definition: scalapack.cc:181
static ::ExceptionBase & ExcInternalError()
void compute_lu_factorization()
Definition: scalapack.cc:708