Reference documentation for deal.II version 9.1.1
\(\newcommand{\dealcoloneq}{\mathrel{\vcenter{:}}=}\)
scalapack.cc
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2017 - 2019 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #include <deal.II/lac/scalapack.h>
18 
19 #ifdef DEAL_II_WITH_SCALAPACK
20 
21 # include <deal.II/base/array_view.h>
22 # include <deal.II/base/mpi.h>
23 # include <deal.II/base/mpi.templates.h>
24 # include <deal.II/base/std_cxx14/memory.h>
25 
26 # include <deal.II/lac/scalapack.templates.h>
27 
28 # ifdef DEAL_II_WITH_HDF5
29 # include <hdf5.h>
30 # endif
31 
32 DEAL_II_NAMESPACE_OPEN
33 
34 # ifdef DEAL_II_WITH_HDF5
35 
36 template <typename number>
37 inline hid_t
38 hdf5_type_id(const number *)
39 {
40  Assert(false, ::ExcNotImplemented());
41  // don't know what to put here; it does not matter
42  return -1;
43 }
44 
45 inline hid_t
46 hdf5_type_id(const double *)
47 {
48  return H5T_NATIVE_DOUBLE;
49 }
50 
51 inline hid_t
52 hdf5_type_id(const float *)
53 {
54  return H5T_NATIVE_FLOAT;
55 }
56 
57 inline hid_t
58 hdf5_type_id(const int *)
59 {
60  return H5T_NATIVE_INT;
61 }
62 
63 inline hid_t
64 hdf5_type_id(const unsigned int *)
65 {
66  return H5T_NATIVE_UINT;
67 }
68 
69 inline hid_t
70 hdf5_type_id(const char *)
71 {
72  return H5T_NATIVE_CHAR;
73 }
74 # endif // DEAL_II_WITH_HDF5
75 
76 
77 
78 template <typename NumberType>
80  const size_type n_rows_,
81  const size_type n_columns_,
82  const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
83  const size_type row_block_size_,
84  const size_type column_block_size_,
85  const LAPACKSupport::Property property_)
86  : uplo('L')
87  , // for non-symmetric matrices this is not needed
88  first_process_row(0)
89  , first_process_column(0)
90  , submatrix_row(1)
91  , submatrix_column(1)
92 {
93  reinit(n_rows_,
94  n_columns_,
95  process_grid,
96  row_block_size_,
97  column_block_size_,
98  property_);
99 }
100 
101 
102 
103 template <typename NumberType>
105  const size_type size,
106  const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
107  const size_type block_size,
108  const LAPACKSupport::Property property)
109  : ScaLAPACKMatrix<NumberType>(size,
110  size,
111  process_grid,
112  block_size,
113  block_size,
114  property)
115 {}
116 
117 
118 
119 template <typename NumberType>
121  const std::string & filename,
122  const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
123  const size_type row_block_size,
124  const size_type column_block_size)
125  : uplo('L')
126  , // for non-symmetric matrices this is not needed
127  first_process_row(0)
128  , first_process_column(0)
129  , submatrix_row(1)
130  , submatrix_column(1)
131 {
132 # ifndef DEAL_II_WITH_HDF5
133  (void)filename;
134  (void)process_grid;
135  (void)row_block_size;
136  (void)column_block_size;
137  Assert(
138  false,
139  ExcMessage(
140  "This function is only available when deal.II is configured with HDF5"));
141 # else
142 
143  const unsigned int this_mpi_process(
144  Utilities::MPI::this_mpi_process(process_grid->mpi_communicator));
145 
146  // Before reading the content from disk the root process determines the
147  // dimensions of the matrix. Subsequently, memory is allocated by a call to
148  // reinit() and the matrix is loaded by a call to load().
149  if (this_mpi_process == 0)
150  {
151  herr_t status = 0;
152 
153  // open file in read-only mode
154  hid_t file = H5Fopen(filename.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT);
155  AssertThrow(file >= 0, ExcIO());
156 
157  // get data set in file
158  hid_t dataset = H5Dopen2(file, "/matrix", H5P_DEFAULT);
159  AssertThrow(dataset >= 0, ExcIO());
160 
161  // determine file space
162  hid_t filespace = H5Dget_space(dataset);
163 
164  // get number of dimensions in data set
165  int rank = H5Sget_simple_extent_ndims(filespace);
166  AssertThrow(rank == 2, ExcIO());
167  hsize_t dims[2];
168  status = H5Sget_simple_extent_dims(filespace, dims, nullptr);
169  AssertThrow(status >= 0, ExcIO());
170 
171  // due to ScaLAPACK's column-major memory layout the dimensions are
172  // swapped
173  n_rows = dims[1];
174  n_columns = dims[0];
175 
176  // close/release resources
177  status = H5Sclose(filespace);
178  AssertThrow(status >= 0, ExcIO());
179  status = H5Dclose(dataset);
180  AssertThrow(status >= 0, ExcIO());
181  status = H5Fclose(file);
182  AssertThrow(status >= 0, ExcIO());
183  }
184  int ierr = MPI_Bcast(&n_rows,
185  1,
186  Utilities::MPI::internal::mpi_type_id(&n_rows),
187  0 /*from root*/,
188  process_grid->mpi_communicator);
189  AssertThrowMPI(ierr);
190 
191  ierr = MPI_Bcast(&n_columns,
192  1,
193  Utilities::MPI::internal::mpi_type_id(&n_columns),
194  0 /*from root*/,
195  process_grid->mpi_communicator);
196  AssertThrowMPI(ierr);
197 
198  // the property will be overwritten by the subsequent call to load()
199  reinit(n_rows,
200  n_columns,
201  process_grid,
205 
206  load(filename.c_str());
207 
208 # endif // DEAL_II_WITH_HDF5
209 }
210 
211 
212 
213 template <typename NumberType>
214 void
216  const size_type n_rows_,
217  const size_type n_columns_,
218  const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
219  const size_type row_block_size_,
220  const size_type column_block_size_,
221  const LAPACKSupport::Property property_)
222 {
223  Assert(row_block_size_ > 0, ExcMessage("Row block size has to be positive."));
224  Assert(column_block_size_ > 0,
225  ExcMessage("Column block size has to be positive."));
226  Assert(
227  row_block_size_ <= n_rows_,
228  ExcMessage(
229  "Row block size can not be greater than the number of rows of the matrix"));
230  Assert(
231  column_block_size_ <= n_columns_,
232  ExcMessage(
233  "Column block size can not be greater than the number of columns of the matrix"));
234 
235  state = LAPACKSupport::State::matrix;
236  property = property_;
237  grid = process_grid;
238  n_rows = n_rows_;
239  n_columns = n_columns_;
240  row_block_size = row_block_size_;
241  column_block_size = column_block_size_;
242 
243  if (grid->mpi_process_is_active)
244  {
245  // Get local sizes:
246  n_local_rows = numroc_(&n_rows,
247  &row_block_size,
248  &(grid->this_process_row),
249  &first_process_row,
250  &(grid->n_process_rows));
251  n_local_columns = numroc_(&n_columns,
252  &column_block_size,
253  &(grid->this_process_column),
254  &first_process_column,
255  &(grid->n_process_columns));
256 
257  // LLD_A = MAX(1,NUMROC(M_A, MB_A, MYROW, RSRC_A, NPROW)), different
258  // between processes
259  int lda = std::max(1, n_local_rows);
260 
261  int info = 0;
262  descinit_(descriptor,
263  &n_rows,
264  &n_columns,
265  &row_block_size,
266  &column_block_size,
267  &first_process_row,
268  &first_process_column,
269  &(grid->blacs_context),
270  &lda,
271  &info);
272  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("descinit_", info));
273 
274  this->TransposeTable<NumberType>::reinit(n_local_rows, n_local_columns);
275  }
276  else
277  {
278  // set process-local variables to something telling:
279  n_local_rows = -1;
280  n_local_columns = -1;
281  std::fill(std::begin(descriptor), std::end(descriptor), -1);
282  }
283 }
284 
285 
286 
287 template <typename NumberType>
288 void
290  const size_type size,
291  const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
292  const size_type block_size,
293  const LAPACKSupport::Property property)
294 {
295  reinit(size, size, process_grid, block_size, block_size, property);
296 }
297 
298 
299 
300 template <typename NumberType>
301 void
303  const LAPACKSupport::Property property_)
304 {
305  property = property_;
306 }
307 
308 
309 
310 template <typename NumberType>
313 {
314  return property;
315 }
316 
317 
318 
319 template <typename NumberType>
322 {
323  return state;
324 }
325 
326 
327 
328 template <typename NumberType>
331 {
332  // FIXME: another way to copy is to use pdgeadd_ PBLAS routine.
333  // This routine computes the sum of two matrices B := a*A + b*B.
334  // Matrices can have different distribution,in particular matrix A can
335  // be owned by only one process, so we can set a=1 and b=0 to copy
336  // non-distributed matrix A into distributed matrix B.
337  Assert(n_rows == int(matrix.m()), ExcDimensionMismatch(n_rows, matrix.m()));
338  Assert(n_columns == int(matrix.n()),
339  ExcDimensionMismatch(n_columns, matrix.n()));
340 
341  if (grid->mpi_process_is_active)
342  {
343  for (int i = 0; i < n_local_rows; ++i)
344  {
345  const int glob_i = global_row(i);
346  for (int j = 0; j < n_local_columns; ++j)
347  {
348  const int glob_j = global_column(j);
349  local_el(i, j) = matrix(glob_i, glob_j);
350  }
351  }
352  }
353  state = LAPACKSupport::matrix;
354  return *this;
355 }
356 
357 
358 
359 template <typename NumberType>
360 void
362  const unsigned int rank)
363 {
364  if (n_rows * n_columns == 0)
365  return;
366 
367  const unsigned int this_mpi_process(
368  Utilities::MPI::this_mpi_process(this->grid->mpi_communicator));
369 
370 # ifdef DEBUG
371  Assert(Utilities::MPI::max(rank, this->grid->mpi_communicator) == rank,
372  ExcMessage("All processes have to call routine with identical rank"));
373  Assert(Utilities::MPI::min(rank, this->grid->mpi_communicator) == rank,
374  ExcMessage("All processes have to call routine with identical rank"));
375 # endif
376 
377  // root process has to be active in the grid of A
378  if (this_mpi_process == rank)
379  {
380  Assert(grid->mpi_process_is_active, ExcInternalError());
381  Assert(n_rows == int(B.m()), ExcDimensionMismatch(n_rows, B.m()));
382  Assert(n_columns == int(B.n()), ExcDimensionMismatch(n_columns, B.n()));
383  }
384  // Create 1x1 grid for matrix B.
385  // The underlying grid for matrix B only contains the process #rank.
386  // This grid will be used to copy the serial matrix B to the distributed
387  // matrix using the ScaLAPACK routine pgemr2d.
388  MPI_Group group_A;
389  MPI_Comm_group(this->grid->mpi_communicator, &group_A);
390  const int n = 1;
391  const std::vector<int> ranks(n, rank);
392  MPI_Group group_B;
393  MPI_Group_incl(group_A, n, DEAL_II_MPI_CONST_CAST(ranks.data()), &group_B);
394  MPI_Comm communicator_B;
395  Utilities::MPI::create_group(this->grid->mpi_communicator,
396  group_B,
397  0,
398  &communicator_B);
399  int n_proc_rows_B = 1, n_proc_cols_B = 1;
400  int this_process_row_B = -1, this_process_column_B = -1;
401  int blacs_context_B = -1;
402  if (MPI_COMM_NULL != communicator_B)
403  {
404  // Initialize Cblas context from the provided communicator
405  blacs_context_B = Csys2blacs_handle(communicator_B);
406  const char *order = "Col";
407  Cblacs_gridinit(&blacs_context_B, order, n_proc_rows_B, n_proc_cols_B);
408  Cblacs_gridinfo(blacs_context_B,
409  &n_proc_rows_B,
410  &n_proc_cols_B,
411  &this_process_row_B,
412  &this_process_column_B);
413  Assert(n_proc_rows_B * n_proc_cols_B == 1, ExcInternalError());
414  // the active process of grid B has to be process #rank of the
415  // communicator attached to A
416  Assert(this_mpi_process == rank, ExcInternalError());
417  }
418  const bool mpi_process_is_active_B =
419  (this_process_row_B >= 0 && this_process_column_B >= 0);
420 
421  // create descriptor for matrix B
422  std::vector<int> descriptor_B(9, -1);
423  const int first_process_row_B = 0, first_process_col_B = 0;
424 
425  if (mpi_process_is_active_B)
426  {
427  // Get local sizes:
428  int n_local_rows_B = numroc_(&n_rows,
429  &n_rows,
430  &this_process_row_B,
431  &first_process_row_B,
432  &n_proc_rows_B);
433  int n_local_cols_B = numroc_(&n_columns,
434  &n_columns,
435  &this_process_column_B,
436  &first_process_col_B,
437  &n_proc_cols_B);
438  Assert(n_local_rows_B == n_rows, ExcInternalError());
439  Assert(n_local_cols_B == n_columns, ExcInternalError());
440  (void)n_local_cols_B;
441 
442  int lda = std::max(1, n_local_rows_B);
443  int info = 0;
444  descinit_(descriptor_B.data(),
445  &n_rows,
446  &n_columns,
447  &n_rows,
448  &n_columns,
449  &first_process_row_B,
450  &first_process_col_B,
451  &blacs_context_B,
452  &lda,
453  &info);
454  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("descinit_", info));
455  }
456  if (this->grid->mpi_process_is_active)
457  {
458  const int ii = 1;
459  NumberType *loc_vals_A =
460  this->values.size() > 0 ? this->values.data() : nullptr;
461  const NumberType *loc_vals_B =
462  mpi_process_is_active_B ? &(B(0, 0)) : nullptr;
463 
464  // pgemr2d has to be called only for processes active on grid attached to
465  // matrix A
466  pgemr2d(&n_rows,
467  &n_columns,
468  loc_vals_B,
469  &ii,
470  &ii,
471  descriptor_B.data(),
472  loc_vals_A,
473  &ii,
474  &ii,
475  this->descriptor,
476  &(this->grid->blacs_context));
477  }
478  if (mpi_process_is_active_B)
479  Cblacs_gridexit(blacs_context_B);
480 
481  MPI_Group_free(&group_A);
482  MPI_Group_free(&group_B);
483  if (MPI_COMM_NULL != communicator_B)
484  MPI_Comm_free(&communicator_B);
485 
486  state = LAPACKSupport::matrix;
487 }
488 
489 
490 
491 template <typename NumberType>
492 unsigned int
493 ScaLAPACKMatrix<NumberType>::global_row(const unsigned int loc_row) const
494 {
495  Assert(n_local_rows >= 0 && loc_row < static_cast<unsigned int>(n_local_rows),
496  ExcIndexRange(loc_row, 0, n_local_rows));
497  const int i = loc_row + 1;
498  return indxl2g_(&i,
499  &row_block_size,
500  &(grid->this_process_row),
501  &first_process_row,
502  &(grid->n_process_rows)) -
503  1;
504 }
505 
506 
507 
508 template <typename NumberType>
509 unsigned int
510 ScaLAPACKMatrix<NumberType>::global_column(const unsigned int loc_column) const
511 {
512  Assert(n_local_columns >= 0 &&
513  loc_column < static_cast<unsigned int>(n_local_columns),
514  ExcIndexRange(loc_column, 0, n_local_columns));
515  const int j = loc_column + 1;
516  return indxl2g_(&j,
517  &column_block_size,
518  &(grid->this_process_column),
519  &first_process_column,
520  &(grid->n_process_columns)) -
521  1;
522 }
523 
524 
525 
526 template <typename NumberType>
527 void
529  const unsigned int rank) const
530 {
531  if (n_rows * n_columns == 0)
532  return;
533 
534  const unsigned int this_mpi_process(
535  Utilities::MPI::this_mpi_process(this->grid->mpi_communicator));
536 
537 # ifdef DEBUG
538  Assert(Utilities::MPI::max(rank, this->grid->mpi_communicator) == rank,
539  ExcMessage("All processes have to call routine with identical rank"));
540  Assert(Utilities::MPI::min(rank, this->grid->mpi_communicator) == rank,
541  ExcMessage("All processes have to call routine with identical rank"));
542 # endif
543 
544  if (this_mpi_process == rank)
545  {
546  // the process which gets the serial copy has to be in the process grid
547  Assert(this->grid->is_process_active(), ExcInternalError());
548  Assert(n_rows == int(B.m()), ExcDimensionMismatch(n_rows, B.m()));
549  Assert(n_columns == int(B.n()), ExcDimensionMismatch(n_columns, B.n()));
550  }
551 
552  // Create 1x1 grid for matrix B.
553  // The underlying grid for matrix B only contains the process #rank.
554  // This grid will be used to copy to the distributed matrix to the serial
555  // matrix B using the ScaLAPACK routine pgemr2d.
556  MPI_Group group_A;
557  MPI_Comm_group(this->grid->mpi_communicator, &group_A);
558  const int n = 1;
559  const std::vector<int> ranks(n, rank);
560  MPI_Group group_B;
561  MPI_Group_incl(group_A, n, DEAL_II_MPI_CONST_CAST(ranks.data()), &group_B);
562  MPI_Comm communicator_B;
563  Utilities::MPI::create_group(this->grid->mpi_communicator,
564  group_B,
565  0,
566  &communicator_B);
567  int n_proc_rows_B = 1, n_proc_cols_B = 1;
568  int this_process_row_B = -1, this_process_column_B = -1;
569  int blacs_context_B = -1;
570  if (MPI_COMM_NULL != communicator_B)
571  {
572  // Initialize Cblas context from the provided communicator
573  blacs_context_B = Csys2blacs_handle(communicator_B);
574  const char *order = "Col";
575  Cblacs_gridinit(&blacs_context_B, order, n_proc_rows_B, n_proc_cols_B);
576  Cblacs_gridinfo(blacs_context_B,
577  &n_proc_rows_B,
578  &n_proc_cols_B,
579  &this_process_row_B,
580  &this_process_column_B);
581  Assert(n_proc_rows_B * n_proc_cols_B == 1, ExcInternalError());
582  // the active process of grid B has to be process #rank of the
583  // communicator attached to A
584  Assert(this_mpi_process == rank, ExcInternalError());
585  }
586  const bool mpi_process_is_active_B =
587  (this_process_row_B >= 0 && this_process_column_B >= 0);
588 
589  // create descriptor for matrix B
590  std::vector<int> descriptor_B(9, -1);
591  const int first_process_row_B = 0, first_process_col_B = 0;
592 
593  if (mpi_process_is_active_B)
594  {
595  // Get local sizes:
596  int n_local_rows_B = numroc_(&n_rows,
597  &n_rows,
598  &this_process_row_B,
599  &first_process_row_B,
600  &n_proc_rows_B);
601  int n_local_cols_B = numroc_(&n_columns,
602  &n_columns,
603  &this_process_column_B,
604  &first_process_col_B,
605  &n_proc_cols_B);
606  Assert(n_local_rows_B == n_rows, ExcInternalError());
607  Assert(n_local_cols_B == n_columns, ExcInternalError());
608  (void)n_local_cols_B;
609 
610  int lda = std::max(1, n_local_rows_B);
611  int info = 0;
612  // fill descriptor for matrix B
613  descinit_(descriptor_B.data(),
614  &n_rows,
615  &n_columns,
616  &n_rows,
617  &n_columns,
618  &first_process_row_B,
619  &first_process_col_B,
620  &blacs_context_B,
621  &lda,
622  &info);
623  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("descinit_", info));
624  }
625  // pgemr2d has to be called only for processes active on grid attached to
626  // matrix A
627  if (this->grid->mpi_process_is_active)
628  {
629  const int ii = 1;
630  const NumberType *loc_vals_A =
631  this->values.size() > 0 ? this->values.data() : nullptr;
632  NumberType *loc_vals_B = mpi_process_is_active_B ? &(B(0, 0)) : nullptr;
633 
634  pgemr2d(&n_rows,
635  &n_columns,
636  loc_vals_A,
637  &ii,
638  &ii,
639  this->descriptor,
640  loc_vals_B,
641  &ii,
642  &ii,
643  descriptor_B.data(),
644  &(this->grid->blacs_context));
645  }
646  if (mpi_process_is_active_B)
647  Cblacs_gridexit(blacs_context_B);
648 
649  MPI_Group_free(&group_A);
650  MPI_Group_free(&group_B);
651  if (MPI_COMM_NULL != communicator_B)
652  MPI_Comm_free(&communicator_B);
653 }
654 
655 
656 
657 template <typename NumberType>
658 void
660 {
661  // FIXME: use PDGEMR2D for copying?
662  // PDGEMR2D copies a submatrix of A on a submatrix of B.
663  // A and B can have different distributions
664  // see http://icl.cs.utk.edu/lapack-forum/viewtopic.php?t=50
665  Assert(n_rows == int(matrix.m()), ExcDimensionMismatch(n_rows, matrix.m()));
666  Assert(n_columns == int(matrix.n()),
667  ExcDimensionMismatch(n_columns, matrix.n()));
668 
669  matrix = 0.;
670  if (grid->mpi_process_is_active)
671  {
672  for (int i = 0; i < n_local_rows; ++i)
673  {
674  const int glob_i = global_row(i);
675  for (int j = 0; j < n_local_columns; ++j)
676  {
677  const int glob_j = global_column(j);
678  matrix(glob_i, glob_j) = local_el(i, j);
679  }
680  }
681  }
682  Utilities::MPI::sum(matrix, grid->mpi_communicator, matrix);
683 
684  // we could move the following lines under the main loop above,
685  // but they would be dependent on glob_i and glob_j, which
686  // won't make it much prettier
687  if (state == LAPACKSupport::cholesky)
688  {
689  if (property == LAPACKSupport::lower_triangular)
690  for (unsigned int i = 0; i < matrix.n(); ++i)
691  for (unsigned int j = i + 1; j < matrix.m(); ++j)
692  matrix(i, j) = 0.;
693  else if (property == LAPACKSupport::upper_triangular)
694  for (unsigned int i = 0; i < matrix.n(); ++i)
695  for (unsigned int j = 0; j < i; ++j)
696  matrix(i, j) = 0.;
697  }
698  else if (property == LAPACKSupport::symmetric &&
700  {
701  if (uplo == 'L')
702  for (unsigned int i = 0; i < matrix.n(); ++i)
703  for (unsigned int j = i + 1; j < matrix.m(); ++j)
704  matrix(i, j) = matrix(j, i);
705  else if (uplo == 'U')
706  for (unsigned int i = 0; i < matrix.n(); ++i)
707  for (unsigned int j = 0; j < i; ++j)
708  matrix(i, j) = matrix(j, i);
709  }
710 }
711 
712 
713 
714 template <typename NumberType>
715 void
718  const std::pair<unsigned int, unsigned int> &offset_A,
719  const std::pair<unsigned int, unsigned int> &offset_B,
720  const std::pair<unsigned int, unsigned int> &submatrix_size) const
721 {
722  // submatrix is empty
723  if (submatrix_size.first == 0 || submatrix_size.second == 0)
724  return;
725 
726  // range checking for matrix A
727  AssertIndexRange(offset_A.first, n_rows - submatrix_size.first + 1);
728  AssertIndexRange(offset_A.second, n_columns - submatrix_size.second + 1);
729 
730  // range checking for matrix B
731  AssertIndexRange(offset_B.first, B.n_rows - submatrix_size.first + 1);
732  AssertIndexRange(offset_B.second, B.n_columns - submatrix_size.second + 1);
733 
734  // Currently, copying of matrices will only be supported if A and B share the
735  // same MPI communicator
736  int ierr, comparison;
737  ierr = MPI_Comm_compare(grid->mpi_communicator,
738  B.grid->mpi_communicator,
739  &comparison);
740  AssertThrowMPI(ierr);
741  Assert(comparison == MPI_IDENT,
742  ExcMessage("Matrix A and B must have a common MPI Communicator"));
743 
744  /*
745  * The routine pgemr2d requires a BLACS context resembling at least the union
746  * of process grids described by the BLACS contexts held by the ProcessGrids
747  * of matrix A and B. As A and B share the same MPI communicator, there is no
748  * need to create a union MPI communicator to initialise the BLACS context
749  */
750  int union_blacs_context = Csys2blacs_handle(this->grid->mpi_communicator);
751  const char *order = "Col";
752  int union_n_process_rows =
753  Utilities::MPI::n_mpi_processes(this->grid->mpi_communicator);
754  int union_n_process_columns = 1;
755  Cblacs_gridinit(&union_blacs_context,
756  order,
757  union_n_process_rows,
758  union_n_process_columns);
759 
760  int n_grid_rows_A, n_grid_columns_A, my_row_A, my_column_A;
761  Cblacs_gridinfo(this->grid->blacs_context,
762  &n_grid_rows_A,
763  &n_grid_columns_A,
764  &my_row_A,
765  &my_column_A);
766 
767  // check whether process is in the BLACS context of matrix A
768  const bool in_context_A =
769  (my_row_A >= 0 && my_row_A < n_grid_rows_A) &&
770  (my_column_A >= 0 && my_column_A < n_grid_columns_A);
771 
772  int n_grid_rows_B, n_grid_columns_B, my_row_B, my_column_B;
773  Cblacs_gridinfo(B.grid->blacs_context,
774  &n_grid_rows_B,
775  &n_grid_columns_B,
776  &my_row_B,
777  &my_column_B);
778 
779  // check whether process is in the BLACS context of matrix B
780  const bool in_context_B =
781  (my_row_B >= 0 && my_row_B < n_grid_rows_B) &&
782  (my_column_B >= 0 && my_column_B < n_grid_columns_B);
783 
784  const int n_rows_submatrix = submatrix_size.first;
785  const int n_columns_submatrix = submatrix_size.second;
786 
787  // due to Fortran indexing one has to be added
788  int ia = offset_A.first + 1, ja = offset_A.second + 1;
789  int ib = offset_B.first + 1, jb = offset_B.second + 1;
790 
791  std::array<int, 9> desc_A, desc_B;
792 
793  const NumberType *loc_vals_A = nullptr;
794  NumberType * loc_vals_B = nullptr;
795 
796  // Note: the function pgemr2d has to be called for all processes in the union
797  // BLACS context If the calling process is not part of the BLACS context of A,
798  // desc_A[1] has to be -1 and all other parameters do not have to be set If
799  // the calling process is not part of the BLACS context of B, desc_B[1] has to
800  // be -1 and all other parameters do not have to be set
801  if (in_context_A)
802  {
803  if (this->values.size() != 0)
804  loc_vals_A = this->values.data();
805 
806  for (unsigned int i = 0; i < desc_A.size(); ++i)
807  desc_A[i] = this->descriptor[i];
808  }
809  else
810  desc_A[1] = -1;
811 
812  if (in_context_B)
813  {
814  if (B.values.size() != 0)
815  loc_vals_B = B.values.data();
816 
817  for (unsigned int i = 0; i < desc_B.size(); ++i)
818  desc_B[i] = B.descriptor[i];
819  }
820  else
821  desc_B[1] = -1;
822 
823  pgemr2d(&n_rows_submatrix,
824  &n_columns_submatrix,
825  loc_vals_A,
826  &ia,
827  &ja,
828  desc_A.data(),
829  loc_vals_B,
830  &ib,
831  &jb,
832  desc_B.data(),
833  &union_blacs_context);
834 
836 
837  // releasing the union BLACS context
838  Cblacs_gridexit(union_blacs_context);
839 }
840 
841 
842 
843 template <typename NumberType>
844 void
846 {
847  Assert(n_rows == dest.n_rows, ExcDimensionMismatch(n_rows, dest.n_rows));
848  Assert(n_columns == dest.n_columns,
849  ExcDimensionMismatch(n_columns, dest.n_columns));
850 
851  if (this->grid->mpi_process_is_active)
852  AssertThrow(
853  this->descriptor[0] == 1,
854  ExcMessage(
855  "Copying of ScaLAPACK matrices only implemented for dense matrices"));
856  if (dest.grid->mpi_process_is_active)
857  AssertThrow(
858  dest.descriptor[0] == 1,
859  ExcMessage(
860  "Copying of ScaLAPACK matrices only implemented for dense matrices"));
861 
862  /*
863  * just in case of different process grids or block-cyclic distributions
864  * inter-process communication is necessary
865  * if distributed matrices have the same process grid and block sizes, local
866  * copying is enough
867  */
868  if ((this->grid != dest.grid) || (row_block_size != dest.row_block_size) ||
869  (column_block_size != dest.column_block_size))
870  {
871  /*
872  * get the MPI communicator, which is the union of the source and
873  * destination MPI communicator
874  */
875  int ierr = 0;
876  MPI_Group group_source, group_dest, group_union;
877  ierr = MPI_Comm_group(this->grid->mpi_communicator, &group_source);
878  AssertThrowMPI(ierr);
879  ierr = MPI_Comm_group(dest.grid->mpi_communicator, &group_dest);
880  AssertThrowMPI(ierr);
881  ierr = MPI_Group_union(group_source, group_dest, &group_union);
882  AssertThrowMPI(ierr);
883  MPI_Comm mpi_communicator_union;
884 
885  // to create a communicator representing the union of the source
886  // and destination MPI communicator we need a communicator that
887  // is guaranteed to contain all desired processes -- i.e.,
888  // MPI_COMM_WORLD. on the other hand, as documented in the MPI
889  // standard, MPI_Comm_create_group is not collective on all
890  // processes in the first argument, but instead is collective on
891  // only those processes listed in the group. in other words,
892  // there is really no harm in passing MPI_COMM_WORLD as the
893  // first argument, even if the program we are currently running
894  // and that is calling this function only works on a subset of
895  // processes. the same holds for the wrapper/fallback we are using here.
896  ierr = Utilities::MPI::create_group(MPI_COMM_WORLD,
897  group_union,
898  5,
899  &mpi_communicator_union);
900  AssertThrowMPI(ierr);
901 
902  /*
903  * The routine pgemr2d requires a BLACS context resembling at least the
904  * union of process grids described by the BLACS contexts of matrix A and
905  * B
906  */
907  int union_blacs_context = Csys2blacs_handle(mpi_communicator_union);
908  const char *order = "Col";
909  int union_n_process_rows =
910  Utilities::MPI::n_mpi_processes(mpi_communicator_union);
911  int union_n_process_columns = 1;
912  Cblacs_gridinit(&union_blacs_context,
913  order,
914  union_n_process_rows,
915  union_n_process_columns);
916 
917  const NumberType *loc_vals_source = nullptr;
918  NumberType * loc_vals_dest = nullptr;
919 
920  if (this->grid->mpi_process_is_active && (this->values.size() > 0))
921  {
922  AssertThrow(this->values.size() > 0,
923  ::ExcMessage(
924  "source: process is active but local matrix empty"));
925  loc_vals_source = this->values.data();
926  }
927  if (dest.grid->mpi_process_is_active && (dest.values.size() > 0))
928  {
929  AssertThrow(
930  dest.values.size() > 0,
931  ::ExcMessage(
932  "destination: process is active but local matrix empty"));
933  loc_vals_dest = dest.values.data();
934  }
935  pgemr2d(&n_rows,
936  &n_columns,
937  loc_vals_source,
938  &submatrix_row,
939  &submatrix_column,
940  descriptor,
941  loc_vals_dest,
942  &dest.submatrix_row,
943  &dest.submatrix_column,
944  dest.descriptor,
945  &union_blacs_context);
946 
947  Cblacs_gridexit(union_blacs_context);
948 
949  if (mpi_communicator_union != MPI_COMM_NULL)
950  {
951  ierr = MPI_Comm_free(&mpi_communicator_union);
952  AssertThrowMPI(ierr);
953  }
954  ierr = MPI_Group_free(&group_source);
955  AssertThrowMPI(ierr);
956  ierr = MPI_Group_free(&group_dest);
957  AssertThrowMPI(ierr);
958  ierr = MPI_Group_free(&group_union);
959  AssertThrowMPI(ierr);
960  }
961  else
962  // process is active in the process grid
963  if (this->grid->mpi_process_is_active)
964  dest.values = this->values;
965 
966  dest.state = state;
967  dest.property = property;
968 }
969 
970 
971 
972 template <typename NumberType>
973 void
976 {
977  add(B, 0, 1, true);
978 }
979 
980 
981 
982 template <typename NumberType>
983 void
985  const NumberType alpha,
986  const NumberType beta,
987  const bool transpose_B)
988 {
989  if (transpose_B)
990  {
991  Assert(n_rows == B.n_columns, ExcDimensionMismatch(n_rows, B.n_columns));
992  Assert(n_columns == B.n_rows, ExcDimensionMismatch(n_columns, B.n_rows));
993  Assert(column_block_size == B.row_block_size,
994  ExcDimensionMismatch(column_block_size, B.row_block_size));
995  Assert(row_block_size == B.column_block_size,
996  ExcDimensionMismatch(row_block_size, B.column_block_size));
997  }
998  else
999  {
1000  Assert(n_rows == B.n_rows, ExcDimensionMismatch(n_rows, B.n_rows));
1001  Assert(n_columns == B.n_columns,
1002  ExcDimensionMismatch(n_columns, B.n_columns));
1003  Assert(column_block_size == B.column_block_size,
1004  ExcDimensionMismatch(column_block_size, B.column_block_size));
1005  Assert(row_block_size == B.row_block_size,
1006  ExcDimensionMismatch(row_block_size, B.row_block_size));
1007  }
1008  Assert(this->grid == B.grid,
1009  ExcMessage("The matrices A and B need to have the same process grid"));
1010 
1011  if (this->grid->mpi_process_is_active)
1012  {
1013  char trans_b = transpose_B ? 'T' : 'N';
1014  NumberType *A_loc =
1015  (this->values.size() > 0) ? this->values.data() : nullptr;
1016  const NumberType *B_loc =
1017  (B.values.size() > 0) ? B.values.data() : nullptr;
1018 
1019  pgeadd(&trans_b,
1020  &n_rows,
1021  &n_columns,
1022  &beta,
1023  B_loc,
1024  &B.submatrix_row,
1025  &B.submatrix_column,
1026  B.descriptor,
1027  &alpha,
1028  A_loc,
1029  &submatrix_row,
1030  &submatrix_column,
1031  descriptor);
1032  }
1033  state = LAPACKSupport::matrix;
1034 }
1035 
1036 
1037 
1038 template <typename NumberType>
1039 void
1041  const ScaLAPACKMatrix<NumberType> &B)
1042 {
1043  add(B, 1, a, false);
1044 }
1045 
1046 
1047 
1048 template <typename NumberType>
1049 void
1051  const ScaLAPACKMatrix<NumberType> &B)
1052 {
1053  add(B, 1, a, true);
1054 }
1055 
1056 
1057 
1058 template <typename NumberType>
1059 void
1061  const ScaLAPACKMatrix<NumberType> &B,
1062  const NumberType c,
1064  const bool transpose_A,
1065  const bool transpose_B) const
1066 {
1067  Assert(this->grid == B.grid,
1068  ExcMessage("The matrices A and B need to have the same process grid"));
1069  Assert(C.grid == B.grid,
1070  ExcMessage("The matrices B and C need to have the same process grid"));
1071 
1072  // see for further info:
1073  // https://www.ibm.com/support/knowledgecenter/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_lgemm.htm
1074  if (!transpose_A && !transpose_B)
1075  {
1076  Assert(this->n_columns == B.n_rows,
1077  ExcDimensionMismatch(this->n_columns, B.n_rows));
1078  Assert(this->n_rows == C.n_rows,
1079  ExcDimensionMismatch(this->n_rows, C.n_rows));
1080  Assert(B.n_columns == C.n_columns,
1081  ExcDimensionMismatch(B.n_columns, C.n_columns));
1082  Assert(this->row_block_size == C.row_block_size,
1083  ExcDimensionMismatch(this->row_block_size, C.row_block_size));
1084  Assert(this->column_block_size == B.row_block_size,
1085  ExcDimensionMismatch(this->column_block_size, B.row_block_size));
1086  Assert(B.column_block_size == C.column_block_size,
1087  ExcDimensionMismatch(B.column_block_size, C.column_block_size));
1088  }
1089  else if (transpose_A && !transpose_B)
1090  {
1091  Assert(this->n_rows == B.n_rows,
1092  ExcDimensionMismatch(this->n_rows, B.n_rows));
1093  Assert(this->n_columns == C.n_rows,
1094  ExcDimensionMismatch(this->n_columns, C.n_rows));
1095  Assert(B.n_columns == C.n_columns,
1096  ExcDimensionMismatch(B.n_columns, C.n_columns));
1097  Assert(this->column_block_size == C.row_block_size,
1098  ExcDimensionMismatch(this->column_block_size, C.row_block_size));
1099  Assert(this->row_block_size == B.row_block_size,
1100  ExcDimensionMismatch(this->row_block_size, B.row_block_size));
1101  Assert(B.column_block_size == C.column_block_size,
1102  ExcDimensionMismatch(B.column_block_size, C.column_block_size));
1103  }
1104  else if (!transpose_A && transpose_B)
1105  {
1106  Assert(this->n_columns == B.n_columns,
1107  ExcDimensionMismatch(this->n_columns, B.n_columns));
1108  Assert(this->n_rows == C.n_rows,
1109  ExcDimensionMismatch(this->n_rows, C.n_rows));
1110  Assert(B.n_rows == C.n_columns,
1111  ExcDimensionMismatch(B.n_rows, C.n_columns));
1112  Assert(this->row_block_size == C.row_block_size,
1113  ExcDimensionMismatch(this->row_block_size, C.row_block_size));
1114  Assert(this->column_block_size == B.column_block_size,
1115  ExcDimensionMismatch(this->column_block_size,
1116  B.column_block_size));
1117  Assert(B.row_block_size == C.column_block_size,
1118  ExcDimensionMismatch(B.row_block_size, C.column_block_size));
1119  }
1120  else // if (transpose_A && transpose_B)
1121  {
1122  Assert(this->n_rows == B.n_columns,
1123  ExcDimensionMismatch(this->n_rows, B.n_columns));
1124  Assert(this->n_columns == C.n_rows,
1125  ExcDimensionMismatch(this->n_columns, C.n_rows));
1126  Assert(B.n_rows == C.n_columns,
1127  ExcDimensionMismatch(B.n_rows, C.n_columns));
1128  Assert(this->column_block_size == C.row_block_size,
1129  ExcDimensionMismatch(this->row_block_size, C.row_block_size));
1130  Assert(this->row_block_size == B.column_block_size,
1131  ExcDimensionMismatch(this->column_block_size, B.row_block_size));
1132  Assert(B.row_block_size == C.column_block_size,
1133  ExcDimensionMismatch(B.column_block_size, C.column_block_size));
1134  }
1135 
1136  if (this->grid->mpi_process_is_active)
1137  {
1138  char trans_a = transpose_A ? 'T' : 'N';
1139  char trans_b = transpose_B ? 'T' : 'N';
1140 
1141  const NumberType *A_loc =
1142  (this->values.size() > 0) ? this->values.data() : nullptr;
1143  const NumberType *B_loc =
1144  (B.values.size() > 0) ? B.values.data() : nullptr;
1145  NumberType *C_loc = (C.values.size() > 0) ? C.values.data() : nullptr;
1146  int m = C.n_rows;
1147  int n = C.n_columns;
1148  int k = transpose_A ? this->n_rows : this->n_columns;
1149 
1150  pgemm(&trans_a,
1151  &trans_b,
1152  &m,
1153  &n,
1154  &k,
1155  &b,
1156  A_loc,
1157  &(this->submatrix_row),
1158  &(this->submatrix_column),
1159  this->descriptor,
1160  B_loc,
1161  &B.submatrix_row,
1162  &B.submatrix_column,
1163  B.descriptor,
1164  &c,
1165  C_loc,
1166  &C.submatrix_row,
1167  &C.submatrix_column,
1168  C.descriptor);
1169  }
1170  C.state = LAPACKSupport::matrix;
1171 }
1172 
1173 
1174 
1175 template <typename NumberType>
1176 void
1178  const ScaLAPACKMatrix<NumberType> &B,
1179  const bool adding) const
1180 {
1181  if (adding)
1182  mult(1., B, 1., C, false, false);
1183  else
1184  mult(1., B, 0, C, false, false);
1185 }
1186 
1187 
1188 
1189 template <typename NumberType>
1190 void
1192  const ScaLAPACKMatrix<NumberType> &B,
1193  const bool adding) const
1194 {
1195  if (adding)
1196  mult(1., B, 1., C, true, false);
1197  else
1198  mult(1., B, 0, C, true, false);
1199 }
1200 
1201 
1202 
1203 template <typename NumberType>
1204 void
1206  const ScaLAPACKMatrix<NumberType> &B,
1207  const bool adding) const
1208 {
1209  if (adding)
1210  mult(1., B, 1., C, false, true);
1211  else
1212  mult(1., B, 0, C, false, true);
1213 }
1214 
1215 
1216 
1217 template <typename NumberType>
1218 void
1220  const ScaLAPACKMatrix<NumberType> &B,
1221  const bool adding) const
1222 {
1223  if (adding)
1224  mult(1., B, 1., C, true, true);
1225  else
1226  mult(1., B, 0, C, true, true);
1227 }
1228 
1229 
1230 
1231 template <typename NumberType>
1232 void
1234 {
1235  Assert(
1236  n_columns == n_rows && property == LAPACKSupport::Property::symmetric,
1237  ExcMessage(
1238  "Cholesky factorization can be applied to symmetric matrices only."));
1239  Assert(state == LAPACKSupport::matrix,
1240  ExcMessage(
1241  "Matrix has to be in Matrix state before calling this function."));
1242 
1243  if (grid->mpi_process_is_active)
1244  {
1245  int info = 0;
1246  NumberType *A_loc = this->values.data();
1247  // pdpotrf_(&uplo,&n_columns,A_loc,&submatrix_row,&submatrix_column,descriptor,&info);
1248  ppotrf(&uplo,
1249  &n_columns,
1250  A_loc,
1251  &submatrix_row,
1252  &submatrix_column,
1253  descriptor,
1254  &info);
1255  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("ppotrf", info));
1256  }
1257  state = LAPACKSupport::cholesky;
1258  property = (uplo == 'L' ? LAPACKSupport::lower_triangular :
1260 }
1261 
1262 
1263 
1264 template <typename NumberType>
1265 void
1267 {
1268  Assert(state == LAPACKSupport::matrix,
1269  ExcMessage(
1270  "Matrix has to be in Matrix state before calling this function."));
1271 
1272  if (grid->mpi_process_is_active)
1273  {
1274  int info = 0;
1275  NumberType *A_loc = this->values.data();
1276 
1277  const int iarow = indxg2p_(&submatrix_row,
1278  &row_block_size,
1279  &(grid->this_process_row),
1280  &first_process_row,
1281  &(grid->n_process_rows));
1282  const int mp = numroc_(&n_rows,
1283  &row_block_size,
1284  &(grid->this_process_row),
1285  &iarow,
1286  &(grid->n_process_rows));
1287  ipiv.resize(mp + row_block_size);
1288 
1289  pgetrf(&n_rows,
1290  &n_columns,
1291  A_loc,
1292  &submatrix_row,
1293  &submatrix_column,
1294  descriptor,
1295  ipiv.data(),
1296  &info);
1297  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("pgetrf", info));
1298  }
1299  state = LAPACKSupport::State::lu;
1301 }
1302 
1303 
1304 
1305 template <typename NumberType>
1306 void
1308 {
1309  // Check whether matrix is symmetric and save flag.
1310  // If a Cholesky factorization has been applied previously,
1311  // the original matrix was symmetric.
1312  const bool is_symmetric = (property == LAPACKSupport::symmetric ||
1313  state == LAPACKSupport::State::cholesky);
1314 
1315  // Check whether matrix is triangular and is in an unfactorized state.
1316  const bool is_triangular = (property == LAPACKSupport::upper_triangular ||
1317  property == LAPACKSupport::lower_triangular) &&
1318  (state == LAPACKSupport::State::matrix ||
1319  state == LAPACKSupport::State::inverse_matrix);
1320 
1321  if (is_triangular)
1322  {
1323  if (grid->mpi_process_is_active)
1324  {
1325  const char uploTriangular =
1326  property == LAPACKSupport::upper_triangular ? 'U' : 'L';
1327  const char diag = 'N';
1328  int info = 0;
1329  NumberType *A_loc = this->values.data();
1330  ptrtri(&uploTriangular,
1331  &diag,
1332  &n_columns,
1333  A_loc,
1334  &submatrix_row,
1335  &submatrix_column,
1336  descriptor,
1337  &info);
1338  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("ptrtri", info));
1339  // The inversion is stored in the same part as the triangular matrix,
1340  // so we don't need to re-set the property here.
1341  }
1342  }
1343  else
1344  {
1345  // Matrix is neither in Cholesky nor LU state.
1346  // Compute the required factorizations based on the property of the
1347  // matrix.
1348  if (!(state == LAPACKSupport::State::lu ||
1349  state == LAPACKSupport::State::cholesky))
1350  {
1351  if (is_symmetric)
1352  compute_cholesky_factorization();
1353  else
1354  compute_lu_factorization();
1355  }
1356  if (grid->mpi_process_is_active)
1357  {
1358  int info = 0;
1359  NumberType *A_loc = this->values.data();
1360 
1361  if (is_symmetric)
1362  {
1363  ppotri(&uplo,
1364  &n_columns,
1365  A_loc,
1366  &submatrix_row,
1367  &submatrix_column,
1368  descriptor,
1369  &info);
1370  AssertThrow(info == 0,
1371  LAPACKSupport::ExcErrorCode("ppotri", info));
1372  property = LAPACKSupport::Property::symmetric;
1373  }
1374  else
1375  {
1376  int lwork = -1, liwork = -1;
1377  work.resize(1);
1378  iwork.resize(1);
1379 
1380  pgetri(&n_columns,
1381  A_loc,
1382  &submatrix_row,
1383  &submatrix_column,
1384  descriptor,
1385  ipiv.data(),
1386  work.data(),
1387  &lwork,
1388  iwork.data(),
1389  &liwork,
1390  &info);
1391 
1392  AssertThrow(info == 0,
1393  LAPACKSupport::ExcErrorCode("pgetri", info));
1394  lwork = static_cast<int>(work[0]);
1395  liwork = iwork[0];
1396  work.resize(lwork);
1397  iwork.resize(liwork);
1398 
1399  pgetri(&n_columns,
1400  A_loc,
1401  &submatrix_row,
1402  &submatrix_column,
1403  descriptor,
1404  ipiv.data(),
1405  work.data(),
1406  &lwork,
1407  iwork.data(),
1408  &liwork,
1409  &info);
1410 
1411  AssertThrow(info == 0,
1412  LAPACKSupport::ExcErrorCode("pgetri", info));
1413  }
1414  }
1415  }
1416  state = LAPACKSupport::State::inverse_matrix;
1417 }
1418 
1419 
1420 
1421 template <typename NumberType>
1422 std::vector<NumberType>
1424  const std::pair<unsigned int, unsigned int> &index_limits,
1425  const bool compute_eigenvectors)
1426 {
1427  // check validity of index limits
1428  AssertIndexRange(index_limits.first, n_rows);
1429  AssertIndexRange(index_limits.second, n_rows);
1430 
1431  std::pair<unsigned int, unsigned int> idx =
1432  std::make_pair(std::min(index_limits.first, index_limits.second),
1433  std::max(index_limits.first, index_limits.second));
1434 
1435  // compute all eigenvalues/eigenvectors
1436  if (idx.first == 0 && idx.second == static_cast<unsigned int>(n_rows - 1))
1437  return eigenpairs_symmetric(compute_eigenvectors);
1438  else
1439  return eigenpairs_symmetric(compute_eigenvectors, idx);
1440 }
1441 
1442 
1443 
1444 template <typename NumberType>
1445 std::vector<NumberType>
1447  const std::pair<NumberType, NumberType> &value_limits,
1448  const bool compute_eigenvectors)
1449 {
1450  Assert(!std::isnan(value_limits.first),
1451  ExcMessage("value_limits.first is NaN"));
1452  Assert(!std::isnan(value_limits.second),
1453  ExcMessage("value_limits.second is NaN"));
1454 
1455  std::pair<unsigned int, unsigned int> indices =
1456  std::make_pair(numbers::invalid_unsigned_int,
1458 
1459  return eigenpairs_symmetric(compute_eigenvectors, indices, value_limits);
1460 }
1461 
1462 
1463 
1464 template <typename NumberType>
1465 std::vector<NumberType>
1467  const bool compute_eigenvectors,
1468  const std::pair<unsigned int, unsigned int> &eigenvalue_idx,
1469  const std::pair<NumberType, NumberType> & eigenvalue_limits)
1470 {
1471  Assert(state == LAPACKSupport::matrix,
1472  ExcMessage(
1473  "Matrix has to be in Matrix state before calling this function."));
1474  Assert(property == LAPACKSupport::symmetric,
1475  ExcMessage("Matrix has to be symmetric for this operation."));
1476 
1477  std::lock_guard<std::mutex> lock(mutex);
1478 
1479  const bool use_values = (std::isnan(eigenvalue_limits.first) ||
1480  std::isnan(eigenvalue_limits.second)) ?
1481  false :
1482  true;
1483  const bool use_indices =
1484  ((eigenvalue_idx.first == numbers::invalid_unsigned_int) ||
1485  (eigenvalue_idx.second == numbers::invalid_unsigned_int)) ?
1486  false :
1487  true;
1488 
1489  Assert(
1490  !(use_values && use_indices),
1491  ExcMessage(
1492  "Prescribing both the index and value range for the eigenvalues is ambiguous"));
1493 
1494  // if computation of eigenvectors is not required use a sufficiently small
1495  // distributed matrix
1496  std::unique_ptr<ScaLAPACKMatrix<NumberType>> eigenvectors =
1497  compute_eigenvectors ?
1498  std_cxx14::make_unique<ScaLAPACKMatrix<NumberType>>(n_rows,
1499  grid,
1500  row_block_size) :
1501  std_cxx14::make_unique<ScaLAPACKMatrix<NumberType>>(
1502  grid->n_process_rows, grid->n_process_columns, grid, 1, 1);
1503 
1504  eigenvectors->property = property;
1505  // number of eigenvalues to be returned from psyevx; upon successful exit ev
1506  // contains the m seclected eigenvalues in ascending order set to all
1507  // eigenvaleus in case we will be using psyev.
1508  int m = n_rows;
1509  std::vector<NumberType> ev(n_rows);
1510 
1511  if (grid->mpi_process_is_active)
1512  {
1513  int info = 0;
1514  /*
1515  * for jobz==N only eigenvalues are computed, for jobz='V' also the
1516  * eigenvectors of the matrix are computed
1517  */
1518  char jobz = compute_eigenvectors ? 'V' : 'N';
1519  char range = 'A';
1520  // default value is to compute all eigenvalues and optionally eigenvectors
1521  bool all_eigenpairs = true;
1522  NumberType vl = NumberType(), vu = NumberType();
1523  int il = 1, iu = 1;
1524  // number of eigenvectors to be returned;
1525  // upon successful exit the first m=nz columns contain the selected
1526  // eigenvectors (only if jobz=='V')
1527  int nz = 0;
1528  NumberType abstol = NumberType();
1529 
1530  // orfac decides which eigenvectors should be reorthogonalized
1531  // see
1532  // http://www.netlib.org/scalapack/explore-html/df/d1a/pdsyevx_8f_source.html
1533  // for explanation to keeps simple no reorthogonalized will be done by
1534  // setting orfac to 0
1535  NumberType orfac = 0;
1536  // contains the indices of eigenvectors that failed to converge
1537  std::vector<int> ifail;
1538  // This array contains indices of eigenvectors corresponding to
1539  // a cluster of eigenvalues that could not be reorthogonalized
1540  // due to insufficient workspace
1541  // see
1542  // http://www.netlib.org/scalapack/explore-html/df/d1a/pdsyevx_8f_source.html
1543  // for explanation
1544  std::vector<int> iclustr;
1545  // This array contains the gap between eigenvalues whose
1546  // eigenvectors could not be reorthogonalized.
1547  // see
1548  // http://www.netlib.org/scalapack/explore-html/df/d1a/pdsyevx_8f_source.html
1549  // for explanation
1550  std::vector<NumberType> gap(n_local_rows * n_local_columns);
1551 
1552  // index range for eigenvalues is not specified
1553  if (!use_indices)
1554  {
1555  // interval for eigenvalues is not specified and consequently all
1556  // eigenvalues/eigenpairs will be computed
1557  if (!use_values)
1558  {
1559  range = 'A';
1560  all_eigenpairs = true;
1561  }
1562  else
1563  {
1564  range = 'V';
1565  all_eigenpairs = false;
1566  vl = std::min(eigenvalue_limits.first, eigenvalue_limits.second);
1567  vu = std::max(eigenvalue_limits.first, eigenvalue_limits.second);
1568  }
1569  }
1570  else
1571  {
1572  range = 'I';
1573  all_eigenpairs = false;
1574  // as Fortran starts counting/indexing from 1 unlike C/C++, where it
1575  // starts from 0
1576  il = std::min(eigenvalue_idx.first, eigenvalue_idx.second) + 1;
1577  iu = std::max(eigenvalue_idx.first, eigenvalue_idx.second) + 1;
1578  }
1579  NumberType *A_loc = this->values.data();
1580  /*
1581  * by setting lwork to -1 a workspace query for optimal length of work is
1582  * performed
1583  */
1584  int lwork = -1;
1585  int liwork = -1;
1586  NumberType *eigenvectors_loc =
1587  (compute_eigenvectors ? eigenvectors->values.data() : nullptr);
1588  work.resize(1);
1589  iwork.resize(1);
1590 
1591  if (all_eigenpairs)
1592  {
1593  psyev(&jobz,
1594  &uplo,
1595  &n_rows,
1596  A_loc,
1597  &submatrix_row,
1598  &submatrix_column,
1599  descriptor,
1600  ev.data(),
1601  eigenvectors_loc,
1602  &eigenvectors->submatrix_row,
1603  &eigenvectors->submatrix_column,
1604  eigenvectors->descriptor,
1605  work.data(),
1606  &lwork,
1607  &info);
1608  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("psyev", info));
1609  }
1610  else
1611  {
1612  char cmach = compute_eigenvectors ? 'U' : 'S';
1613  plamch(&(this->grid->blacs_context), &cmach, abstol);
1614  abstol *= 2;
1615  ifail.resize(n_rows);
1616  iclustr.resize(2 * grid->n_process_rows * grid->n_process_columns);
1617  gap.resize(grid->n_process_rows * grid->n_process_columns);
1618 
1619  psyevx(&jobz,
1620  &range,
1621  &uplo,
1622  &n_rows,
1623  A_loc,
1624  &submatrix_row,
1625  &submatrix_column,
1626  descriptor,
1627  &vl,
1628  &vu,
1629  &il,
1630  &iu,
1631  &abstol,
1632  &m,
1633  &nz,
1634  ev.data(),
1635  &orfac,
1636  eigenvectors_loc,
1637  &eigenvectors->submatrix_row,
1638  &eigenvectors->submatrix_column,
1639  eigenvectors->descriptor,
1640  work.data(),
1641  &lwork,
1642  iwork.data(),
1643  &liwork,
1644  ifail.data(),
1645  iclustr.data(),
1646  gap.data(),
1647  &info);
1648  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("psyevx", info));
1649  }
1650  lwork = static_cast<int>(work[0]);
1651  work.resize(lwork);
1652 
1653  if (all_eigenpairs)
1654  {
1655  psyev(&jobz,
1656  &uplo,
1657  &n_rows,
1658  A_loc,
1659  &submatrix_row,
1660  &submatrix_column,
1661  descriptor,
1662  ev.data(),
1663  eigenvectors_loc,
1664  &eigenvectors->submatrix_row,
1665  &eigenvectors->submatrix_column,
1666  eigenvectors->descriptor,
1667  work.data(),
1668  &lwork,
1669  &info);
1670 
1671  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("psyev", info));
1672  }
1673  else
1674  {
1675  liwork = iwork[0];
1676  AssertThrow(liwork > 0, ExcInternalError());
1677  iwork.resize(liwork);
1678 
1679  psyevx(&jobz,
1680  &range,
1681  &uplo,
1682  &n_rows,
1683  A_loc,
1684  &submatrix_row,
1685  &submatrix_column,
1686  descriptor,
1687  &vl,
1688  &vu,
1689  &il,
1690  &iu,
1691  &abstol,
1692  &m,
1693  &nz,
1694  ev.data(),
1695  &orfac,
1696  eigenvectors_loc,
1697  &eigenvectors->submatrix_row,
1698  &eigenvectors->submatrix_column,
1699  eigenvectors->descriptor,
1700  work.data(),
1701  &lwork,
1702  iwork.data(),
1703  &liwork,
1704  ifail.data(),
1705  iclustr.data(),
1706  gap.data(),
1707  &info);
1708 
1709  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("psyevx", info));
1710  }
1711  // if eigenvectors are queried copy eigenvectors to original matrix
1712  // as the temporary matrix eigenvectors has identical dimensions and
1713  // block-cyclic distribution we simply swap the local array
1714  if (compute_eigenvectors)
1715  this->values.swap(eigenvectors->values);
1716 
1717  // adapt the size of ev to fit m upon return
1718  while (ev.size() > static_cast<size_type>(m))
1719  ev.pop_back();
1720  }
1721  /*
1722  * send number of computed eigenvalues to inactive processes
1723  */
1724  grid->send_to_inactive(&m, 1);
1725 
1726  /*
1727  * inactive processes have to resize array of eigenvalues
1728  */
1729  if (!grid->mpi_process_is_active)
1730  ev.resize(m);
1731  /*
1732  * send the eigenvalues to processors not being part of the process grid
1733  */
1734  grid->send_to_inactive(ev.data(), ev.size());
1735 
1736  /*
1737  * if only eigenvalues are queried the content of the matrix will be destroyed
1738  * if the eigenpairs are queried matrix A on exit stores the eigenvectors in
1739  * the columns
1740  */
1741  if (compute_eigenvectors)
1742  {
1745  }
1746  else
1747  state = LAPACKSupport::unusable;
1748 
1749  return ev;
1750 }
1751 
1752 
1753 
1754 template <typename NumberType>
1755 std::vector<NumberType>
1757  const std::pair<unsigned int, unsigned int> &index_limits,
1758  const bool compute_eigenvectors)
1759 {
1760  // Check validity of index limits.
1761  AssertIndexRange(index_limits.first, static_cast<unsigned int>(n_rows));
1762  AssertIndexRange(index_limits.second, static_cast<unsigned int>(n_rows));
1763 
1764  const std::pair<unsigned int, unsigned int> idx =
1765  std::make_pair(std::min(index_limits.first, index_limits.second),
1766  std::max(index_limits.first, index_limits.second));
1767 
1768  // Compute all eigenvalues/eigenvectors.
1769  if (idx.first == 0 && idx.second == static_cast<unsigned int>(n_rows - 1))
1770  return eigenpairs_symmetric_MRRR(compute_eigenvectors);
1771  else
1772  return eigenpairs_symmetric_MRRR(compute_eigenvectors, idx);
1773 }
1774 
1775 
1776 
1777 template <typename NumberType>
1778 std::vector<NumberType>
1780  const std::pair<NumberType, NumberType> &value_limits,
1781  const bool compute_eigenvectors)
1782 {
1783  AssertIsFinite(value_limits.first);
1784  AssertIsFinite(value_limits.second);
1785 
1786  const std::pair<unsigned int, unsigned int> indices =
1787  std::make_pair(numbers::invalid_unsigned_int,
1789 
1790  return eigenpairs_symmetric_MRRR(compute_eigenvectors, indices, value_limits);
1791 }
1792 
1793 
1794 
1795 template <typename NumberType>
1796 std::vector<NumberType>
1798  const bool compute_eigenvectors,
1799  const std::pair<unsigned int, unsigned int> &eigenvalue_idx,
1800  const std::pair<NumberType, NumberType> & eigenvalue_limits)
1801 {
1802  Assert(state == LAPACKSupport::matrix,
1803  ExcMessage(
1804  "Matrix has to be in Matrix state before calling this function."));
1805  Assert(property == LAPACKSupport::symmetric,
1806  ExcMessage("Matrix has to be symmetric for this operation."));
1807 
1808  std::lock_guard<std::mutex> lock(mutex);
1809 
1810  const bool use_values = (std::isnan(eigenvalue_limits.first) ||
1811  std::isnan(eigenvalue_limits.second)) ?
1812  false :
1813  true;
1814  const bool use_indices =
1815  ((eigenvalue_idx.first == numbers::invalid_unsigned_int) ||
1816  (eigenvalue_idx.second == numbers::invalid_unsigned_int)) ?
1817  false :
1818  true;
1819 
1820  Assert(
1821  !(use_values && use_indices),
1822  ExcMessage(
1823  "Prescribing both the index and value range for the eigenvalues is ambiguous"));
1824 
1825  // If computation of eigenvectors is not required, use a sufficiently small
1826  // distributed matrix.
1827  std::unique_ptr<ScaLAPACKMatrix<NumberType>> eigenvectors =
1828  compute_eigenvectors ?
1829  std_cxx14::make_unique<ScaLAPACKMatrix<NumberType>>(n_rows,
1830  grid,
1831  row_block_size) :
1832  std_cxx14::make_unique<ScaLAPACKMatrix<NumberType>>(
1833  grid->n_process_rows, grid->n_process_columns, grid, 1, 1);
1834 
1835  eigenvectors->property = property;
1836  // Number of eigenvalues to be returned from psyevr; upon successful exit ev
1837  // contains the m seclected eigenvalues in ascending order.
1838  int m = n_rows;
1839  std::vector<NumberType> ev(n_rows);
1840 
1841  // Number of eigenvectors to be returned;
1842  // Upon successful exit the first m=nz columns contain the selected
1843  // eigenvectors (only if jobz=='V').
1844  int nz = 0;
1845 
1846  if (grid->mpi_process_is_active)
1847  {
1848  int info = 0;
1849  /*
1850  * For jobz==N only eigenvalues are computed, for jobz='V' also the
1851  * eigenvectors of the matrix are computed.
1852  */
1853  char jobz = compute_eigenvectors ? 'V' : 'N';
1854  // Default value is to compute all eigenvalues and optionally
1855  // eigenvectors.
1856  char range = 'A';
1857  NumberType vl = NumberType(), vu = NumberType();
1858  int il = 1, iu = 1;
1859 
1860  // Index range for eigenvalues is not specified.
1861  if (!use_indices)
1862  {
1863  // Interval for eigenvalues is not specified and consequently all
1864  // eigenvalues/eigenpairs will be computed.
1865  if (!use_values)
1866  {
1867  range = 'A';
1868  }
1869  else
1870  {
1871  range = 'V';
1872  vl = std::min(eigenvalue_limits.first, eigenvalue_limits.second);
1873  vu = std::max(eigenvalue_limits.first, eigenvalue_limits.second);
1874  }
1875  }
1876  else
1877  {
1878  range = 'I';
1879  // As Fortran starts counting/indexing from 1 unlike C/C++, where it
1880  // starts from 0.
1881  il = std::min(eigenvalue_idx.first, eigenvalue_idx.second) + 1;
1882  iu = std::max(eigenvalue_idx.first, eigenvalue_idx.second) + 1;
1883  }
1884  NumberType *A_loc = this->values.data();
1885 
1886  /*
1887  * By setting lwork to -1 a workspace query for optimal length of work is
1888  * performed.
1889  */
1890  int lwork = -1;
1891  int liwork = -1;
1892  NumberType *eigenvectors_loc =
1893  (compute_eigenvectors ? eigenvectors->values.data() : nullptr);
1894  work.resize(1);
1895  iwork.resize(1);
1896 
1897  psyevr(&jobz,
1898  &range,
1899  &uplo,
1900  &n_rows,
1901  A_loc,
1902  &submatrix_row,
1903  &submatrix_column,
1904  descriptor,
1905  &vl,
1906  &vu,
1907  &il,
1908  &iu,
1909  &m,
1910  &nz,
1911  ev.data(),
1912  eigenvectors_loc,
1913  &eigenvectors->submatrix_row,
1914  &eigenvectors->submatrix_column,
1915  eigenvectors->descriptor,
1916  work.data(),
1917  &lwork,
1918  iwork.data(),
1919  &liwork,
1920  &info);
1921 
1922  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("psyevr", info));
1923 
1924  lwork = static_cast<int>(work[0]);
1925  work.resize(lwork);
1926  liwork = iwork[0];
1927  iwork.resize(liwork);
1928 
1929  psyevr(&jobz,
1930  &range,
1931  &uplo,
1932  &n_rows,
1933  A_loc,
1934  &submatrix_row,
1935  &submatrix_column,
1936  descriptor,
1937  &vl,
1938  &vu,
1939  &il,
1940  &iu,
1941  &m,
1942  &nz,
1943  ev.data(),
1944  eigenvectors_loc,
1945  &eigenvectors->submatrix_row,
1946  &eigenvectors->submatrix_column,
1947  eigenvectors->descriptor,
1948  work.data(),
1949  &lwork,
1950  iwork.data(),
1951  &liwork,
1952  &info);
1953 
1954  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("psyevr", info));
1955 
1956  if (compute_eigenvectors)
1957  AssertThrow(
1958  m == nz,
1959  ExcMessage(
1960  "psyevr failed to compute all eigenvectors for the selected eigenvalues"));
1961 
1962  // If eigenvectors are queried, copy eigenvectors to original matrix.
1963  // As the temporary matrix eigenvectors has identical dimensions and
1964  // block-cyclic distribution we simply swap the local array.
1965  if (compute_eigenvectors)
1966  this->values.swap(eigenvectors->values);
1967 
1968  // Adapt the size of ev to fit m upon return.
1969  while (ev.size() > static_cast<size_type>(m))
1970  ev.pop_back();
1971  }
1972  /*
1973  * Send number of computed eigenvalues to inactive processes.
1974  */
1975  grid->send_to_inactive(&m, 1);
1976 
1977  /*
1978  * Inactive processes have to resize array of eigenvalues.
1979  */
1980  if (!grid->mpi_process_is_active)
1981  ev.resize(m);
1982  /*
1983  * Send the eigenvalues to processors not being part of the process grid.
1984  */
1985  grid->send_to_inactive(ev.data(), ev.size());
1986 
1987  /*
1988  * If only eigenvalues are queried, the content of the matrix will be
1989  * destroyed. If the eigenpairs are queried, matrix A on exit stores the
1990  * eigenvectors in the columns.
1991  */
1992  if (compute_eigenvectors)
1993  {
1996  }
1997  else
1998  state = LAPACKSupport::unusable;
1999 
2000  return ev;
2001 }
2002 
2003 
2004 
2005 template <typename NumberType>
2006 std::vector<NumberType>
2009 {
2010  Assert(state == LAPACKSupport::matrix,
2011  ExcMessage(
2012  "Matrix has to be in Matrix state before calling this function."));
2013  Assert(row_block_size == column_block_size,
2014  ExcDimensionMismatch(row_block_size, column_block_size));
2015 
2016  const bool left_singluar_vectors = (U != nullptr) ? true : false;
2017  const bool right_singluar_vectors = (VT != nullptr) ? true : false;
2018 
2019  if (left_singluar_vectors)
2020  {
2021  Assert(n_rows == U->n_rows, ExcDimensionMismatch(n_rows, U->n_rows));
2022  Assert(U->n_rows == U->n_columns,
2024  Assert(row_block_size == U->row_block_size,
2025  ExcDimensionMismatch(row_block_size, U->row_block_size));
2026  Assert(column_block_size == U->column_block_size,
2027  ExcDimensionMismatch(column_block_size, U->column_block_size));
2028  Assert(grid->blacs_context == U->grid->blacs_context,
2029  ExcDimensionMismatch(grid->blacs_context, U->grid->blacs_context));
2030  }
2031  if (right_singluar_vectors)
2032  {
2033  Assert(n_columns == VT->n_rows,
2034  ExcDimensionMismatch(n_columns, VT->n_rows));
2035  Assert(VT->n_rows == VT->n_columns,
2037  Assert(row_block_size == VT->row_block_size,
2038  ExcDimensionMismatch(row_block_size, VT->row_block_size));
2039  Assert(column_block_size == VT->column_block_size,
2040  ExcDimensionMismatch(column_block_size, VT->column_block_size));
2041  Assert(grid->blacs_context == VT->grid->blacs_context,
2042  ExcDimensionMismatch(grid->blacs_context,
2043  VT->grid->blacs_context));
2044  }
2045  std::lock_guard<std::mutex> lock(mutex);
2046 
2047  std::vector<NumberType> sv(std::min(n_rows, n_columns));
2048 
2049  if (grid->mpi_process_is_active)
2050  {
2051  char jobu = left_singluar_vectors ? 'V' : 'N';
2052  char jobvt = right_singluar_vectors ? 'V' : 'N';
2053  NumberType *A_loc = this->values.data();
2054  NumberType *U_loc = left_singluar_vectors ? U->values.data() : nullptr;
2055  NumberType *VT_loc = right_singluar_vectors ? VT->values.data() : nullptr;
2056  int info = 0;
2057  /*
2058  * by setting lwork to -1 a workspace query for optimal length of work is
2059  * performed
2060  */
2061  int lwork = -1;
2062  work.resize(1);
2063 
2064  pgesvd(&jobu,
2065  &jobvt,
2066  &n_rows,
2067  &n_columns,
2068  A_loc,
2069  &submatrix_row,
2070  &submatrix_column,
2071  descriptor,
2072  &*sv.begin(),
2073  U_loc,
2074  &U->submatrix_row,
2075  &U->submatrix_column,
2076  U->descriptor,
2077  VT_loc,
2078  &VT->submatrix_row,
2079  &VT->submatrix_column,
2080  VT->descriptor,
2081  work.data(),
2082  &lwork,
2083  &info);
2084  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("pgesvd", info));
2085 
2086  lwork = static_cast<int>(work[0]);
2087  work.resize(lwork);
2088 
2089  pgesvd(&jobu,
2090  &jobvt,
2091  &n_rows,
2092  &n_columns,
2093  A_loc,
2094  &submatrix_row,
2095  &submatrix_column,
2096  descriptor,
2097  &*sv.begin(),
2098  U_loc,
2099  &U->submatrix_row,
2100  &U->submatrix_column,
2101  U->descriptor,
2102  VT_loc,
2103  &VT->submatrix_row,
2104  &VT->submatrix_column,
2105  VT->descriptor,
2106  work.data(),
2107  &lwork,
2108  &info);
2109  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("pgesvd", info));
2110  }
2111 
2112  /*
2113  * send the singular values to processors not being part of the process grid
2114  */
2115  grid->send_to_inactive(sv.data(), sv.size());
2116 
2118  state = LAPACKSupport::State::unusable;
2119 
2120  return sv;
2121 }
2122 
2123 
2124 
2125 template <typename NumberType>
2126 void
2128  const bool transpose)
2129 {
2130  Assert(grid == B.grid,
2131  ExcMessage("The matrices A and B need to have the same process grid"));
2132  Assert(state == LAPACKSupport::matrix,
2133  ExcMessage(
2134  "Matrix has to be in Matrix state before calling this function."));
2136  ExcMessage(
2137  "Matrix B has to be in Matrix state before calling this function."));
2138 
2139  if (transpose)
2140  {
2141  Assert(n_columns == B.n_rows, ExcDimensionMismatch(n_columns, B.n_rows));
2142  }
2143  else
2144  {
2145  Assert(n_rows == B.n_rows, ExcDimensionMismatch(n_rows, B.n_rows));
2146  }
2147 
2148  // see
2149  // https://www.ibm.com/support/knowledgecenter/en/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_lgels.htm
2150  Assert(row_block_size == column_block_size,
2151  ExcMessage(
2152  "Use identical block sizes for rows and columns of matrix A"));
2154  ExcMessage(
2155  "Use identical block sizes for rows and columns of matrix B"));
2156  Assert(row_block_size == B.row_block_size,
2157  ExcMessage(
2158  "Use identical block-cyclic distribution for matrices A and B"));
2159 
2160  std::lock_guard<std::mutex> lock(mutex);
2161 
2162  if (grid->mpi_process_is_active)
2163  {
2164  char trans = transpose ? 'T' : 'N';
2165  NumberType *A_loc = this->values.data();
2166  NumberType *B_loc = B.values.data();
2167  int info = 0;
2168  /*
2169  * by setting lwork to -1 a workspace query for optimal length of work is
2170  * performed
2171  */
2172  int lwork = -1;
2173  work.resize(1);
2174 
2175  pgels(&trans,
2176  &n_rows,
2177  &n_columns,
2178  &B.n_columns,
2179  A_loc,
2180  &submatrix_row,
2181  &submatrix_column,
2182  descriptor,
2183  B_loc,
2184  &B.submatrix_row,
2185  &B.submatrix_column,
2186  B.descriptor,
2187  work.data(),
2188  &lwork,
2189  &info);
2190  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("pgels", info));
2191 
2192  lwork = static_cast<int>(work[0]);
2193  work.resize(lwork);
2194 
2195  pgels(&trans,
2196  &n_rows,
2197  &n_columns,
2198  &B.n_columns,
2199  A_loc,
2200  &submatrix_row,
2201  &submatrix_column,
2202  descriptor,
2203  B_loc,
2204  &B.submatrix_row,
2205  &B.submatrix_column,
2206  B.descriptor,
2207  work.data(),
2208  &lwork,
2209  &info);
2210  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("pgels", info));
2211  }
2212  state = LAPACKSupport::State::unusable;
2213 }
2214 
2215 
2216 
2217 template <typename NumberType>
2218 unsigned int
2220 {
2221  Assert(state == LAPACKSupport::matrix,
2222  ExcMessage(
2223  "Matrix has to be in Matrix state before calling this function."));
2224  Assert(row_block_size == column_block_size,
2225  ExcMessage(
2226  "Use identical block sizes for rows and columns of matrix A"));
2227  Assert(
2228  ratio > 0. && ratio < 1.,
2229  ExcMessage(
2230  "input parameter ratio has to be larger than zero and smaller than 1"));
2231 
2232  ScaLAPACKMatrix<NumberType> U(n_rows,
2233  n_rows,
2234  grid,
2235  row_block_size,
2236  row_block_size,
2238  ScaLAPACKMatrix<NumberType> VT(n_columns,
2239  n_columns,
2240  grid,
2241  row_block_size,
2242  row_block_size,
2244  std::vector<NumberType> sv = this->compute_SVD(&U, &VT);
2245  AssertThrow(sv[0] > std::numeric_limits<NumberType>::min(),
2246  ExcMessage("Matrix has rank 0"));
2247 
2248  // Get number of singular values fulfilling the following: sv[i] > sv[0] *
2249  // ratio Obviously, 0-th element already satisfies sv[0] > sv[0] * ratio The
2250  // singular values in sv are ordered by descending value so we break out of
2251  // the loop if a singular value is smaller than sv[0] * ratio.
2252  unsigned int n_sv = 1;
2253  std::vector<NumberType> inv_sigma;
2254  inv_sigma.push_back(1 / sv[0]);
2255 
2256  for (unsigned int i = 1; i < sv.size(); ++i)
2257  if (sv[i] > sv[0] * ratio)
2258  {
2259  ++n_sv;
2260  inv_sigma.push_back(1 / sv[i]);
2261  }
2262  else
2263  break;
2264 
2265  // For the matrix multiplication we use only the columns of U and rows of VT
2266  // which are associated with singular values larger than the limit. That saves
2267  // computational time for matrices with rank significantly smaller than
2268  // min(n_rows,n_columns)
2269  ScaLAPACKMatrix<NumberType> U_R(n_rows,
2270  n_sv,
2271  grid,
2272  row_block_size,
2273  row_block_size,
2275  ScaLAPACKMatrix<NumberType> VT_R(n_sv,
2276  n_columns,
2277  grid,
2278  row_block_size,
2279  row_block_size,
2281  U.copy_to(U_R,
2282  std::make_pair(0, 0),
2283  std::make_pair(0, 0),
2284  std::make_pair(n_rows, n_sv));
2285  VT.copy_to(VT_R,
2286  std::make_pair(0, 0),
2287  std::make_pair(0, 0),
2288  std::make_pair(n_sv, n_columns));
2289 
2290  VT_R.scale_rows(inv_sigma);
2291  this->reinit(n_columns,
2292  n_rows,
2293  this->grid,
2294  row_block_size,
2295  column_block_size,
2297  VT_R.mult(1, U_R, 0, *this, true, true);
2298  state = LAPACKSupport::State::inverse_matrix;
2299  return n_sv;
2300 }
2301 
2302 
2303 
2304 template <typename NumberType>
2305 NumberType
2307  const NumberType a_norm) const
2308 {
2310  ExcMessage(
2311  "Matrix has to be in Cholesky state before calling this function."));
2312  std::lock_guard<std::mutex> lock(mutex);
2313  NumberType rcond = 0.;
2314 
2315  if (grid->mpi_process_is_active)
2316  {
2317  int liwork = n_local_rows;
2318  iwork.resize(liwork);
2319 
2320  int info = 0;
2321  const NumberType *A_loc = this->values.data();
2322 
2323  // by setting lwork to -1 a workspace query for optimal length of work is
2324  // performed
2325  int lwork = -1;
2326  work.resize(1);
2327  ppocon(&uplo,
2328  &n_columns,
2329  A_loc,
2330  &submatrix_row,
2331  &submatrix_column,
2332  descriptor,
2333  &a_norm,
2334  &rcond,
2335  work.data(),
2336  &lwork,
2337  iwork.data(),
2338  &liwork,
2339  &info);
2340  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("pdpocon", info));
2341  lwork = static_cast<int>(std::ceil(work[0]));
2342  work.resize(lwork);
2343 
2344  // now the actual run:
2345  ppocon(&uplo,
2346  &n_columns,
2347  A_loc,
2348  &submatrix_row,
2349  &submatrix_column,
2350  descriptor,
2351  &a_norm,
2352  &rcond,
2353  work.data(),
2354  &lwork,
2355  iwork.data(),
2356  &liwork,
2357  &info);
2358  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("pdpocon", info));
2359  }
2360  grid->send_to_inactive(&rcond);
2361  return rcond;
2362 }
2363 
2364 
2365 
2366 template <typename NumberType>
2367 NumberType
2369 {
2370  const char type('O');
2371 
2372  if (property == LAPACKSupport::symmetric)
2373  return norm_symmetric(type);
2374  else
2375  return norm_general(type);
2376 }
2377 
2378 
2379 
2380 template <typename NumberType>
2381 NumberType
2383 {
2384  const char type('I');
2385 
2386  if (property == LAPACKSupport::symmetric)
2387  return norm_symmetric(type);
2388  else
2389  return norm_general(type);
2390 }
2391 
2392 
2393 
2394 template <typename NumberType>
2395 NumberType
2397 {
2398  const char type('F');
2399 
2400  if (property == LAPACKSupport::symmetric)
2401  return norm_symmetric(type);
2402  else
2403  return norm_general(type);
2404 }
2405 
2406 
2407 
2408 template <typename NumberType>
2409 NumberType
2411 {
2412  Assert(state == LAPACKSupport::matrix ||
2414  ExcMessage("norms can be called in matrix state only."));
2415  std::lock_guard<std::mutex> lock(mutex);
2416  NumberType res = 0.;
2417 
2418  if (grid->mpi_process_is_active)
2419  {
2420  const int iarow = indxg2p_(&submatrix_row,
2421  &row_block_size,
2422  &(grid->this_process_row),
2423  &first_process_row,
2424  &(grid->n_process_rows));
2425  const int iacol = indxg2p_(&submatrix_column,
2426  &column_block_size,
2427  &(grid->this_process_column),
2428  &first_process_column,
2429  &(grid->n_process_columns));
2430  const int mp0 = numroc_(&n_rows,
2431  &row_block_size,
2432  &(grid->this_process_row),
2433  &iarow,
2434  &(grid->n_process_rows));
2435  const int nq0 = numroc_(&n_columns,
2436  &column_block_size,
2437  &(grid->this_process_column),
2438  &iacol,
2439  &(grid->n_process_columns));
2440 
2441  // type='M': compute largest absolute value
2442  // type='F' || type='E': compute Frobenius norm
2443  // type='0' || type='1': compute infinity norm
2444  int lwork = 0; // for type == 'M' || type == 'F' || type == 'E'
2445  if (type == 'O' || type == '1')
2446  lwork = nq0;
2447  else if (type == 'I')
2448  lwork = mp0;
2449 
2450  work.resize(lwork);
2451  const NumberType *A_loc = this->values.begin();
2452  res = plange(&type,
2453  &n_rows,
2454  &n_columns,
2455  A_loc,
2456  &submatrix_row,
2457  &submatrix_column,
2458  descriptor,
2459  work.data());
2460  }
2461  grid->send_to_inactive(&res);
2462  return res;
2463 }
2464 
2465 
2466 
2467 template <typename NumberType>
2468 NumberType
2470 {
2471  Assert(state == LAPACKSupport::matrix ||
2473  ExcMessage("norms can be called in matrix state only."));
2474  Assert(property == LAPACKSupport::symmetric,
2475  ExcMessage("Matrix has to be symmetric for this operation."));
2476  std::lock_guard<std::mutex> lock(mutex);
2477  NumberType res = 0.;
2478 
2479  if (grid->mpi_process_is_active)
2480  {
2481  // int IROFFA = MOD( IA-1, MB_A )
2482  // int ICOFFA = MOD( JA-1, NB_A )
2483  const int lcm =
2484  ilcm_(&(grid->n_process_rows), &(grid->n_process_columns));
2485  const int v2 = lcm / (grid->n_process_rows);
2486 
2487  const int IAROW = indxg2p_(&submatrix_row,
2488  &row_block_size,
2489  &(grid->this_process_row),
2490  &first_process_row,
2491  &(grid->n_process_rows));
2492  const int IACOL = indxg2p_(&submatrix_column,
2493  &column_block_size,
2494  &(grid->this_process_column),
2495  &first_process_column,
2496  &(grid->n_process_columns));
2497  const int Np0 = numroc_(&n_columns /*+IROFFA*/,
2498  &row_block_size,
2499  &(grid->this_process_row),
2500  &IAROW,
2501  &(grid->n_process_rows));
2502  const int Nq0 = numroc_(&n_columns /*+ICOFFA*/,
2503  &column_block_size,
2504  &(grid->this_process_column),
2505  &IACOL,
2506  &(grid->n_process_columns));
2507 
2508  const int v1 = iceil_(&Np0, &row_block_size);
2509  const int ldw = (n_local_rows == n_local_columns) ?
2510  0 :
2511  row_block_size * iceil_(&v1, &v2);
2512 
2513  const int lwork =
2514  (type == 'M' || type == 'F' || type == 'E') ? 0 : 2 * Nq0 + Np0 + ldw;
2515  work.resize(lwork);
2516  const NumberType *A_loc = this->values.begin();
2517  res = plansy(&type,
2518  &uplo,
2519  &n_columns,
2520  A_loc,
2521  &submatrix_row,
2522  &submatrix_column,
2523  descriptor,
2524  work.data());
2525  }
2526  grid->send_to_inactive(&res);
2527  return res;
2528 }
2529 
2530 
2531 
2532 # ifdef DEAL_II_WITH_HDF5
2533 namespace internal
2534 {
2535  namespace
2536  {
2537  void
2538  create_HDF5_state_enum_id(hid_t &state_enum_id)
2539  {
2540  // create HDF5 enum type for LAPACKSupport::State
2542  state_enum_id = H5Tcreate(H5T_ENUM, sizeof(LAPACKSupport::State));
2543  val = LAPACKSupport::State::cholesky;
2544  herr_t status = H5Tenum_insert(state_enum_id, "cholesky", &val);
2545  AssertThrow(status >= 0, ExcInternalError());
2547  status = H5Tenum_insert(state_enum_id, "eigenvalues", &val);
2548  AssertThrow(status >= 0, ExcInternalError());
2549  val = LAPACKSupport::State::inverse_matrix;
2550  status = H5Tenum_insert(state_enum_id, "inverse_matrix", &val);
2551  AssertThrow(status >= 0, ExcInternalError());
2552  val = LAPACKSupport::State::inverse_svd;
2553  status = H5Tenum_insert(state_enum_id, "inverse_svd", &val);
2554  AssertThrow(status >= 0, ExcInternalError());
2555  val = LAPACKSupport::State::lu;
2556  status = H5Tenum_insert(state_enum_id, "lu", &val);
2557  AssertThrow(status >= 0, ExcInternalError());
2558  val = LAPACKSupport::State::matrix;
2559  status = H5Tenum_insert(state_enum_id, "matrix", &val);
2560  AssertThrow(status >= 0, ExcInternalError());
2561  val = LAPACKSupport::State::svd;
2562  status = H5Tenum_insert(state_enum_id, "svd", &val);
2563  AssertThrow(status >= 0, ExcInternalError());
2564  val = LAPACKSupport::State::unusable;
2565  status = H5Tenum_insert(state_enum_id, "unusable", &val);
2566  AssertThrow(status >= 0, ExcInternalError());
2567  }
2568 
2569  void
2570  create_HDF5_property_enum_id(hid_t &property_enum_id)
2571  {
2572  // create HDF5 enum type for LAPACKSupport::Property
2573  property_enum_id = H5Tcreate(H5T_ENUM, sizeof(LAPACKSupport::Property));
2574  LAPACKSupport::Property prop = LAPACKSupport::Property::diagonal;
2575  herr_t status = H5Tenum_insert(property_enum_id, "diagonal", &prop);
2576  AssertThrow(status >= 0, ExcInternalError());
2578  status = H5Tenum_insert(property_enum_id, "general", &prop);
2579  AssertThrow(status >= 0, ExcInternalError());
2580  prop = LAPACKSupport::Property::hessenberg;
2581  status = H5Tenum_insert(property_enum_id, "hessenberg", &prop);
2582  AssertThrow(status >= 0, ExcInternalError());
2583  prop = LAPACKSupport::Property::lower_triangular;
2584  status = H5Tenum_insert(property_enum_id, "lower_triangular", &prop);
2585  AssertThrow(status >= 0, ExcInternalError());
2586  prop = LAPACKSupport::Property::symmetric;
2587  status = H5Tenum_insert(property_enum_id, "symmetric", &prop);
2588  AssertThrow(status >= 0, ExcInternalError());
2589  prop = LAPACKSupport::Property::upper_triangular;
2590  status = H5Tenum_insert(property_enum_id, "upper_triangular", &prop);
2591  AssertThrow(status >= 0, ExcInternalError());
2592  }
2593  } // namespace
2594 } // namespace internal
2595 # endif
2596 
2597 
2598 
2599 template <typename NumberType>
2600 void
2602  const std::string & filename,
2603  const std::pair<unsigned int, unsigned int> &chunk_size) const
2604 {
2605 # ifndef DEAL_II_WITH_HDF5
2606  (void)filename;
2607  (void)chunk_size;
2608  AssertThrow(false, ExcMessage("HDF5 support is disabled."));
2609 # else
2610 
2611  std::pair<unsigned int, unsigned int> chunks_size_ = chunk_size;
2612 
2613  if (chunks_size_.first == numbers::invalid_unsigned_int ||
2614  chunks_size_.second == numbers::invalid_unsigned_int)
2615  {
2616  // default: store the matrix in chunks of columns
2617  chunks_size_.first = n_rows;
2618  chunks_size_.second = 1;
2619  }
2620  Assert(chunks_size_.first > 0,
2621  ExcMessage("The row chunk size must be larger than 0."));
2622  AssertIndexRange(chunks_size_.first, n_rows + 1);
2623  Assert(chunks_size_.second > 0,
2624  ExcMessage("The column chunk size must be larger than 0."));
2625  AssertIndexRange(chunks_size_.second, n_columns + 1);
2626 
2627 # ifdef H5_HAVE_PARALLEL
2628  // implementation for configurations equipped with a parallel file system
2629  save_parallel(filename, chunks_size_);
2630 
2631 # else
2632  // implementation for configurations with no parallel file system
2633  save_serial(filename, chunks_size_);
2634 
2635 # endif
2636 # endif
2637 }
2638 
2639 
2640 
2641 template <typename NumberType>
2642 void
2644  const std::string & filename,
2645  const std::pair<unsigned int, unsigned int> &chunk_size) const
2646 {
2647 # ifndef DEAL_II_WITH_HDF5
2648  (void)filename;
2649  (void)chunk_size;
2650  Assert(false, ExcInternalError());
2651 # else
2652 
2653  /*
2654  * The content of the distributed matrix is copied to a matrix using a 1x1
2655  * process grid. Therefore, one process has all the data and can write it to a
2656  * file.
2657  *
2658  * Create a 1x1 column grid which will be used to initialize
2659  * an effectively serial ScaLAPACK matrix to gather the contents from the
2660  * current object
2661  */
2662  const auto column_grid =
2663  std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,
2664  1,
2665  1);
2666 
2667  const int MB = n_rows, NB = n_columns;
2668  ScaLAPACKMatrix<NumberType> tmp(n_rows, n_columns, column_grid, MB, NB);
2669  copy_to(tmp);
2670 
2671  // the 1x1 grid has only one process and this one writes
2672  // the content of the matrix to the HDF5 file
2673  if (tmp.grid->mpi_process_is_active)
2674  {
2675  herr_t status;
2676 
2677  // create a new file using default properties
2678  hid_t file_id =
2679  H5Fcreate(filename.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
2680 
2681  // modify dataset creation properties, i.e. enable chunking
2682  hsize_t chunk_dims[2];
2683  // revert order of rows and columns as ScaLAPACK uses column-major
2684  // ordering
2685  chunk_dims[0] = chunk_size.second;
2686  chunk_dims[1] = chunk_size.first;
2687  hid_t data_property = H5Pcreate(H5P_DATASET_CREATE);
2688  status = H5Pset_chunk(data_property, 2, chunk_dims);
2689  AssertThrow(status >= 0, ExcIO());
2690 
2691  // create the data space for the dataset
2692  hsize_t dims[2];
2693  // change order of rows and columns as ScaLAPACKMatrix uses column major
2694  // ordering
2695  dims[0] = n_columns;
2696  dims[1] = n_rows;
2697  hid_t dataspace_id = H5Screate_simple(2, dims, nullptr);
2698 
2699  // create the dataset within the file using chunk creation properties
2700  hid_t type_id = hdf5_type_id(tmp.values.data());
2701  hid_t dataset_id = H5Dcreate2(file_id,
2702  "/matrix",
2703  type_id,
2704  dataspace_id,
2705  H5P_DEFAULT,
2706  data_property,
2707  H5P_DEFAULT);
2708 
2709  // write the dataset
2710  status = H5Dwrite(
2711  dataset_id, type_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, tmp.values.data());
2712  AssertThrow(status >= 0, ExcIO());
2713 
2714  // create HDF5 enum type for LAPACKSupport::State and
2715  // LAPACKSupport::Property
2716  hid_t state_enum_id, property_enum_id;
2717  internal::create_HDF5_state_enum_id(state_enum_id);
2718  internal::create_HDF5_property_enum_id(property_enum_id);
2719 
2720  // create the data space for the state enum
2721  hsize_t dims_state[1];
2722  dims_state[0] = 1;
2723  hid_t state_enum_dataspace = H5Screate_simple(1, dims_state, nullptr);
2724  // create the dataset for the state enum
2725  hid_t state_enum_dataset = H5Dcreate2(file_id,
2726  "/state",
2727  state_enum_id,
2728  state_enum_dataspace,
2729  H5P_DEFAULT,
2730  H5P_DEFAULT,
2731  H5P_DEFAULT);
2732  // write the dataset for the state enum
2733  status = H5Dwrite(state_enum_dataset,
2734  state_enum_id,
2735  H5S_ALL,
2736  H5S_ALL,
2737  H5P_DEFAULT,
2738  &state);
2739  AssertThrow(status >= 0, ExcIO());
2740 
2741  // create the data space for the property enum
2742  hsize_t dims_property[1];
2743  dims_property[0] = 1;
2744  hid_t property_enum_dataspace =
2745  H5Screate_simple(1, dims_property, nullptr);
2746  // create the dataset for the property enum
2747  hid_t property_enum_dataset = H5Dcreate2(file_id,
2748  "/property",
2749  property_enum_id,
2750  property_enum_dataspace,
2751  H5P_DEFAULT,
2752  H5P_DEFAULT,
2753  H5P_DEFAULT);
2754  // write the dataset for the property enum
2755  status = H5Dwrite(property_enum_dataset,
2756  property_enum_id,
2757  H5S_ALL,
2758  H5S_ALL,
2759  H5P_DEFAULT,
2760  &property);
2761  AssertThrow(status >= 0, ExcIO());
2762 
2763  // end access to the datasets and release resources used by them
2764  status = H5Dclose(dataset_id);
2765  AssertThrow(status >= 0, ExcIO());
2766  status = H5Dclose(state_enum_dataset);
2767  AssertThrow(status >= 0, ExcIO());
2768  status = H5Dclose(property_enum_dataset);
2769  AssertThrow(status >= 0, ExcIO());
2770 
2771  // terminate access to the data spaces
2772  status = H5Sclose(dataspace_id);
2773  AssertThrow(status >= 0, ExcIO());
2774  status = H5Sclose(state_enum_dataspace);
2775  AssertThrow(status >= 0, ExcIO());
2776  status = H5Sclose(property_enum_dataspace);
2777  AssertThrow(status >= 0, ExcIO());
2778 
2779  // release enum data types
2780  status = H5Tclose(state_enum_id);
2781  AssertThrow(status >= 0, ExcIO());
2782  status = H5Tclose(property_enum_id);
2783  AssertThrow(status >= 0, ExcIO());
2784 
2785  // release the creation property
2786  status = H5Pclose(data_property);
2787  AssertThrow(status >= 0, ExcIO());
2788 
2789  // close the file.
2790  status = H5Fclose(file_id);
2791  AssertThrow(status >= 0, ExcIO());
2792  }
2793 # endif
2794 }
2795 
2796 
2797 
2798 template <typename NumberType>
2799 void
2801  const std::string & filename,
2802  const std::pair<unsigned int, unsigned int> &chunk_size) const
2803 {
2804 # ifndef DEAL_II_WITH_HDF5
2805  (void)filename;
2806  (void)chunk_size;
2807  Assert(false, ExcInternalError());
2808 # else
2809 
2810  const unsigned int n_mpi_processes(
2811  Utilities::MPI::n_mpi_processes(this->grid->mpi_communicator));
2812  MPI_Info info = MPI_INFO_NULL;
2813  /*
2814  * The content of the distributed matrix is copied to a matrix using a
2815  * 1xn_processes process grid. Therefore, the processes hold contiguous chunks
2816  * of the matrix, which they can write to the file
2817  *
2818  * Create a 1xn_processes column grid
2819  */
2820  const auto column_grid =
2821  std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,
2822  1,
2823  n_mpi_processes);
2824 
2825  const int MB = n_rows;
2826  /*
2827  * If the ratio n_columns/n_mpi_processes is smaller than the column block
2828  * size of the original matrix, the redistribution and saving of the matrix
2829  * requires a significant amount of MPI communication. Therefore, it is better
2830  * to set a minimum value for the block size NB, causing only
2831  * ceil(n_columns/NB) processes being actively involved in saving the matrix.
2832  * Example: A 2*10^9 x 400 matrix is distributed on a 80 x 5 process grid
2833  * using block size 32. Instead of distributing the matrix on a 1 x 400
2834  * process grid with a row block size of 2*10^9 and a column block size of 1,
2835  * the minimum value for NB yields that only ceil(400/32)=13 processes will be
2836  * writing the matrix to disk.
2837  */
2838  const int NB = std::max(static_cast<int>(std::ceil(
2839  static_cast<double>(n_columns) / n_mpi_processes)),
2840  column_block_size);
2841 
2842  ScaLAPACKMatrix<NumberType> tmp(n_rows, n_columns, column_grid, MB, NB);
2843  copy_to(tmp);
2844 
2845  // get pointer to data held by the process
2846  NumberType *data = (tmp.values.size() > 0) ? tmp.values.data() : nullptr;
2847 
2848  herr_t status;
2849  // dataset dimensions
2850  hsize_t dims[2];
2851 
2852  // set up file access property list with parallel I/O access
2853  hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
2854  status = H5Pset_fapl_mpio(plist_id, tmp.grid->mpi_communicator, info);
2855  AssertThrow(status >= 0, ExcIO());
2856 
2857  // create a new file collectively and release property list identifier
2858  hid_t file_id =
2859  H5Fcreate(filename.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, plist_id);
2860  status = H5Pclose(plist_id);
2861  AssertThrow(status >= 0, ExcIO());
2862 
2863  // As ScaLAPACK, and therefore the class ScaLAPACKMatrix, uses column-major
2864  // ordering but HDF5 row-major ordering, we have to reverse entries related to
2865  // columns and rows in the following. create the dataspace for the dataset
2866  dims[0] = tmp.n_columns;
2867  dims[1] = tmp.n_rows;
2868 
2869  hid_t filespace = H5Screate_simple(2, dims, nullptr);
2870 
2871  // create the chunked dataset with default properties and close filespace
2872  hsize_t chunk_dims[2];
2873  // revert order of rows and columns as ScaLAPACK uses column-major ordering
2874  chunk_dims[0] = chunk_size.second;
2875  chunk_dims[1] = chunk_size.first;
2876  plist_id = H5Pcreate(H5P_DATASET_CREATE);
2877  H5Pset_chunk(plist_id, 2, chunk_dims);
2878  hid_t type_id = hdf5_type_id(data);
2879  hid_t dset_id = H5Dcreate2(
2880  file_id, "/matrix", type_id, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
2881 
2882  status = H5Sclose(filespace);
2883  AssertThrow(status >= 0, ExcIO());
2884 
2885  status = H5Pclose(plist_id);
2886  AssertThrow(status >= 0, ExcIO());
2887 
2888  // gather the number of local rows and columns from all processes
2889  std::vector<int> proc_n_local_rows(n_mpi_processes),
2890  proc_n_local_columns(n_mpi_processes);
2891  int ierr = MPI_Allgather(&tmp.n_local_rows,
2892  1,
2893  MPI_INT,
2894  proc_n_local_rows.data(),
2895  1,
2896  MPI_INT,
2897  tmp.grid->mpi_communicator);
2898  AssertThrowMPI(ierr);
2899  ierr = MPI_Allgather(&tmp.n_local_columns,
2900  1,
2901  MPI_INT,
2902  proc_n_local_columns.data(),
2903  1,
2904  MPI_INT,
2905  tmp.grid->mpi_communicator);
2906  AssertThrowMPI(ierr);
2907 
2908  const unsigned int my_rank(
2909  Utilities::MPI::this_mpi_process(tmp.grid->mpi_communicator));
2910 
2911  // hyperslab selection parameters
2912  // each process defines dataset in memory and writes it to the hyperslab in
2913  // the file
2914  hsize_t count[2];
2915  count[0] = tmp.n_local_columns;
2916  count[1] = tmp.n_rows;
2917  hid_t memspace = H5Screate_simple(2, count, nullptr);
2918 
2919  hsize_t offset[2] = {0};
2920  for (unsigned int i = 0; i < my_rank; ++i)
2921  offset[0] += proc_n_local_columns[i];
2922 
2923  // select hyperslab in the file.
2924  filespace = H5Dget_space(dset_id);
2925  status = H5Sselect_hyperslab(
2926  filespace, H5S_SELECT_SET, offset, nullptr, count, nullptr);
2927  AssertThrow(status >= 0, ExcIO());
2928 
2929  // create property list for independent dataset write
2930  plist_id = H5Pcreate(H5P_DATASET_XFER);
2931  status = H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_INDEPENDENT);
2932  AssertThrow(status >= 0, ExcIO());
2933 
2934  // process with no data will not participate in writing to the file
2935  if (tmp.values.size() > 0)
2936  {
2937  status = H5Dwrite(dset_id, type_id, memspace, filespace, plist_id, data);
2938  AssertThrow(status >= 0, ExcIO());
2939  }
2940  // close/release sources
2941  status = H5Dclose(dset_id);
2942  AssertThrow(status >= 0, ExcIO());
2943  status = H5Sclose(filespace);
2944  AssertThrow(status >= 0, ExcIO());
2945  status = H5Sclose(memspace);
2946  AssertThrow(status >= 0, ExcIO());
2947  status = H5Pclose(plist_id);
2948  AssertThrow(status >= 0, ExcIO());
2949  status = H5Fclose(file_id);
2950  AssertThrow(status >= 0, ExcIO());
2951 
2952  // before writing the state and property to file wait for
2953  // all processes to finish writing the matrix content to the file
2954  ierr = MPI_Barrier(tmp.grid->mpi_communicator);
2955  AssertThrowMPI(ierr);
2956 
2957  // only root process will write state and property to the file
2958  if (tmp.grid->this_mpi_process == 0)
2959  {
2960  // open file using default properties
2961  hid_t file_id_reopen =
2962  H5Fopen(filename.c_str(), H5F_ACC_RDWR, H5P_DEFAULT);
2963 
2964  // create HDF5 enum type for LAPACKSupport::State and
2965  // LAPACKSupport::Property
2966  hid_t state_enum_id, property_enum_id;
2967  internal::create_HDF5_state_enum_id(state_enum_id);
2968  internal::create_HDF5_property_enum_id(property_enum_id);
2969 
2970  // create the data space for the state enum
2971  hsize_t dims_state[1];
2972  dims_state[0] = 1;
2973  hid_t state_enum_dataspace = H5Screate_simple(1, dims_state, nullptr);
2974  // create the dataset for the state enum
2975  hid_t state_enum_dataset = H5Dcreate2(file_id_reopen,
2976  "/state",
2977  state_enum_id,
2978  state_enum_dataspace,
2979  H5P_DEFAULT,
2980  H5P_DEFAULT,
2981  H5P_DEFAULT);
2982  // write the dataset for the state enum
2983  status = H5Dwrite(state_enum_dataset,
2984  state_enum_id,
2985  H5S_ALL,
2986  H5S_ALL,
2987  H5P_DEFAULT,
2988  &state);
2989  AssertThrow(status >= 0, ExcIO());
2990 
2991  // create the data space for the property enum
2992  hsize_t dims_property[1];
2993  dims_property[0] = 1;
2994  hid_t property_enum_dataspace =
2995  H5Screate_simple(1, dims_property, nullptr);
2996  // create the dataset for the property enum
2997  hid_t property_enum_dataset = H5Dcreate2(file_id_reopen,
2998  "/property",
2999  property_enum_id,
3000  property_enum_dataspace,
3001  H5P_DEFAULT,
3002  H5P_DEFAULT,
3003  H5P_DEFAULT);
3004  // write the dataset for the property enum
3005  status = H5Dwrite(property_enum_dataset,
3006  property_enum_id,
3007  H5S_ALL,
3008  H5S_ALL,
3009  H5P_DEFAULT,
3010  &property);
3011  AssertThrow(status >= 0, ExcIO());
3012 
3013  status = H5Dclose(state_enum_dataset);
3014  AssertThrow(status >= 0, ExcIO());
3015  status = H5Dclose(property_enum_dataset);
3016  AssertThrow(status >= 0, ExcIO());
3017  status = H5Sclose(state_enum_dataspace);
3018  AssertThrow(status >= 0, ExcIO());
3019  status = H5Sclose(property_enum_dataspace);
3020  AssertThrow(status >= 0, ExcIO());
3021  status = H5Tclose(state_enum_id);
3022  AssertThrow(status >= 0, ExcIO());
3023  status = H5Tclose(property_enum_id);
3024  AssertThrow(status >= 0, ExcIO());
3025  status = H5Fclose(file_id_reopen);
3026  AssertThrow(status >= 0, ExcIO());
3027  }
3028 
3029 # endif
3030 }
3031 
3032 
3033 
3034 template <typename NumberType>
3035 void
3036 ScaLAPACKMatrix<NumberType>::load(const std::string &filename)
3037 {
3038 # ifndef DEAL_II_WITH_HDF5
3039  (void)filename;
3040  AssertThrow(false, ExcMessage("HDF5 support is disabled."));
3041 # else
3042 # ifdef H5_HAVE_PARALLEL
3043  // implementation for configurations equipped with a parallel file system
3044  load_parallel(filename);
3045 
3046 # else
3047  // implementation for configurations with no parallel file system
3048  load_serial(filename);
3049 # endif
3050 # endif
3051 }
3052 
3053 
3054 
3055 template <typename NumberType>
3056 void
3057 ScaLAPACKMatrix<NumberType>::load_serial(const std::string &filename)
3058 {
3059 # ifndef DEAL_II_WITH_HDF5
3060  (void)filename;
3061  Assert(false, ExcInternalError());
3062 # else
3063 
3064  /*
3065  * The content of the distributed matrix is copied to a matrix using a 1x1
3066  * process grid. Therefore, one process has all the data and can write it to a
3067  * file
3068  */
3069  // create a 1xP column grid with P being the number of MPI processes
3070  const auto one_grid =
3071  std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,
3072  1,
3073  1);
3074 
3075  const int MB = n_rows, NB = n_columns;
3076  ScaLAPACKMatrix<NumberType> tmp(n_rows, n_columns, one_grid, MB, NB);
3077 
3078  int state_int = -1;
3079  int property_int = -1;
3080 
3081  // the 1x1 grid has only one process and this one reads
3082  // the content of the matrix from the HDF5 file
3083  if (tmp.grid->mpi_process_is_active)
3084  {
3085  herr_t status;
3086 
3087  // open the file in read-only mode
3088  hid_t file_id = H5Fopen(filename.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT);
3089 
3090  // open the dataset in the file
3091  hid_t dataset_id = H5Dopen2(file_id, "/matrix", H5P_DEFAULT);
3092 
3093  // check the datatype of the data in the file
3094  // datatype of source and destination must have the same class
3095  // see HDF User's Guide: 6.10. Data Transfer: Datatype Conversion and
3096  // Selection
3097  hid_t datatype = H5Dget_type(dataset_id);
3098  H5T_class_t t_class_in = H5Tget_class(datatype);
3099  H5T_class_t t_class = H5Tget_class(hdf5_type_id(tmp.values.data()));
3100  AssertThrow(
3101  t_class_in == t_class,
3102  ExcMessage(
3103  "The data type of the matrix to be read does not match the archive"));
3104 
3105  // get dataspace handle
3106  hid_t dataspace_id = H5Dget_space(dataset_id);
3107  // get number of dimensions
3108  const int ndims = H5Sget_simple_extent_ndims(dataspace_id);
3109  AssertThrow(ndims == 2, ExcIO());
3110  // get every dimension
3111  hsize_t dims[2];
3112  H5Sget_simple_extent_dims(dataspace_id, dims, nullptr);
3113  AssertThrow(
3114  static_cast<int>(dims[0]) == n_columns,
3115  ExcMessage(
3116  "The number of columns of the matrix does not match the content of the archive"));
3117  AssertThrow(
3118  static_cast<int>(dims[1]) == n_rows,
3119  ExcMessage(
3120  "The number of rows of the matrix does not match the content of the archive"));
3121 
3122  // read data
3123  status = H5Dread(dataset_id,
3124  hdf5_type_id(tmp.values.data()),
3125  H5S_ALL,
3126  H5S_ALL,
3127  H5P_DEFAULT,
3128  tmp.values.data());
3129  AssertThrow(status >= 0, ExcIO());
3130 
3131  // create HDF5 enum type for LAPACKSupport::State and
3132  // LAPACKSupport::Property
3133  hid_t state_enum_id, property_enum_id;
3134  internal::create_HDF5_state_enum_id(state_enum_id);
3135  internal::create_HDF5_property_enum_id(property_enum_id);
3136 
3137  // open the datasets for the state and property enum in the file
3138  hid_t dataset_state_id = H5Dopen2(file_id, "/state", H5P_DEFAULT);
3139  hid_t datatype_state = H5Dget_type(dataset_state_id);
3140  H5T_class_t t_class_state = H5Tget_class(datatype_state);
3141  AssertThrow(t_class_state == H5T_ENUM, ExcIO());
3142 
3143  hid_t dataset_property_id = H5Dopen2(file_id, "/property", H5P_DEFAULT);
3144  hid_t datatype_property = H5Dget_type(dataset_property_id);
3145  H5T_class_t t_class_property = H5Tget_class(datatype_property);
3146  AssertThrow(t_class_property == H5T_ENUM, ExcIO());
3147 
3148  // get dataspace handles
3149  hid_t dataspace_state = H5Dget_space(dataset_state_id);
3150  hid_t dataspace_property = H5Dget_space(dataset_property_id);
3151  // get number of dimensions
3152  const int ndims_state = H5Sget_simple_extent_ndims(dataspace_state);
3153  AssertThrow(ndims_state == 1, ExcIO());
3154  const int ndims_property = H5Sget_simple_extent_ndims(dataspace_property);
3155  AssertThrow(ndims_property == 1, ExcIO());
3156  // get every dimension
3157  hsize_t dims_state[1];
3158  H5Sget_simple_extent_dims(dataspace_state, dims_state, nullptr);
3159  AssertThrow(static_cast<int>(dims_state[0]) == 1, ExcIO());
3160  hsize_t dims_property[1];
3161  H5Sget_simple_extent_dims(dataspace_property, dims_property, nullptr);
3162  AssertThrow(static_cast<int>(dims_property[0]) == 1, ExcIO());
3163 
3164  // read data
3165  status = H5Dread(dataset_state_id,
3166  state_enum_id,
3167  H5S_ALL,
3168  H5S_ALL,
3169  H5P_DEFAULT,
3170  &tmp.state);
3171  AssertThrow(status >= 0, ExcIO());
3172  // To send the state from the root process to the other processes
3173  // the state enum is casted to an integer, that will be broadcasted and
3174  // subsequently casted back to the enum type
3175  state_int = static_cast<int>(tmp.state);
3176 
3177  status = H5Dread(dataset_property_id,
3178  property_enum_id,
3179  H5S_ALL,
3180  H5S_ALL,
3181  H5P_DEFAULT,
3182  &tmp.property);
3183  AssertThrow(status >= 0, ExcIO());
3184  // To send the property from the root process to the other processes
3185  // the state enum is casted to an integer, that will be broadcasted and
3186  // subsequently casted back to the enum type
3187  property_int = static_cast<int>(tmp.property);
3188 
3189  // terminate access to the data spaces
3190  status = H5Sclose(dataspace_id);
3191  AssertThrow(status >= 0, ExcIO());
3192  status = H5Sclose(dataspace_state);
3193  AssertThrow(status >= 0, ExcIO());
3194  status = H5Sclose(dataspace_property);
3195  AssertThrow(status >= 0, ExcIO());
3196 
3197  // release data type handles
3198  status = H5Tclose(datatype);
3199  AssertThrow(status >= 0, ExcIO());
3200  status = H5Tclose(state_enum_id);
3201  AssertThrow(status >= 0, ExcIO());
3202  status = H5Tclose(property_enum_id);
3203  AssertThrow(status >= 0, ExcIO());
3204 
3205  // end access to the data sets and release resources used by them
3206  status = H5Dclose(dataset_state_id);
3207  AssertThrow(status >= 0, ExcIO());
3208  status = H5Dclose(dataset_id);
3209  AssertThrow(status >= 0, ExcIO());
3210  status = H5Dclose(dataset_property_id);
3211  AssertThrow(status >= 0, ExcIO());
3212 
3213  // close the file.
3214  status = H5Fclose(file_id);
3215  AssertThrow(status >= 0, ExcIO());
3216  }
3217  // so far only the root process has the correct state integer --> broadcasting
3218  tmp.grid->send_to_inactive(&state_int, 1);
3219  // so far only the root process has the correct property integer -->
3220  // broadcasting
3221  tmp.grid->send_to_inactive(&property_int, 1);
3222 
3223  tmp.state = static_cast<LAPACKSupport::State>(state_int);
3224  tmp.property = static_cast<LAPACKSupport::Property>(property_int);
3225 
3226  tmp.copy_to(*this);
3227 
3228 # endif // DEAL_II_WITH_HDF5
3229 }
3230 
3231 
3232 
3233 template <typename NumberType>
3234 void
3235 ScaLAPACKMatrix<NumberType>::load_parallel(const std::string &filename)
3236 {
3237 # ifndef DEAL_II_WITH_HDF5
3238  (void)filename;
3239  Assert(false, ExcInternalError());
3240 # else
3241 # ifndef H5_HAVE_PARALLEL
3242  Assert(false, ExcInternalError());
3243 # else
3244 
3245  const unsigned int n_mpi_processes(
3246  Utilities::MPI::n_mpi_processes(this->grid->mpi_communicator));
3247  MPI_Info info = MPI_INFO_NULL;
3248  /*
3249  * The content of the distributed matrix is copied to a matrix using a
3250  * 1xn_processes process grid. Therefore, the processes hold contiguous chunks
3251  * of the matrix, which they can write to the file
3252  */
3253  // create a 1xP column grid with P being the number of MPI processes
3254  const auto column_grid =
3255  std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,
3256  1,
3257  n_mpi_processes);
3258 
3259  const int MB = n_rows;
3260  // for the choice of NB see explanation in save_parallel()
3261  const int NB = std::max(static_cast<int>(std::ceil(
3262  static_cast<double>(n_columns) / n_mpi_processes)),
3263  column_block_size);
3264 
3265  ScaLAPACKMatrix<NumberType> tmp(n_rows, n_columns, column_grid, MB, NB);
3266 
3267  // get pointer to data held by the process
3268  NumberType *data = (tmp.values.size() > 0) ? tmp.values.data() : nullptr;
3269 
3270  herr_t status;
3271 
3272  // set up file access property list with parallel I/O access
3273  hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
3274  status = H5Pset_fapl_mpio(plist_id, tmp.grid->mpi_communicator, info);
3275  AssertThrow(status >= 0, ExcIO());
3276 
3277  // open file collectively in read-only mode and release property list
3278  // identifier
3279  hid_t file_id = H5Fopen(filename.c_str(), H5F_ACC_RDONLY, plist_id);
3280  status = H5Pclose(plist_id);
3281  AssertThrow(status >= 0, ExcIO());
3282 
3283  // open the dataset in the file collectively
3284  hid_t dataset_id = H5Dopen2(file_id, "/matrix", H5P_DEFAULT);
3285 
3286  // check the datatype of the dataset in the file
3287  // if the classes of type of the dataset and the matrix do not match abort
3288  // see HDF User's Guide: 6.10. Data Transfer: Datatype Conversion and
3289  // Selection
3290  hid_t datatype = hdf5_type_id(data);
3291  hid_t datatype_inp = H5Dget_type(dataset_id);
3292  H5T_class_t t_class_inp = H5Tget_class(datatype_inp);
3293  H5T_class_t t_class = H5Tget_class(datatype);
3294  AssertThrow(
3295  t_class_inp == t_class,
3296  ExcMessage(
3297  "The data type of the matrix to be read does not match the archive"));
3298 
3299  // get the dimensions of the matrix stored in the file
3300  // get dataspace handle
3301  hid_t dataspace_id = H5Dget_space(dataset_id);
3302  // get number of dimensions
3303  const int ndims = H5Sget_simple_extent_ndims(dataspace_id);
3304  AssertThrow(ndims == 2, ExcIO());
3305  // get every dimension
3306  hsize_t dims[2];
3307  status = H5Sget_simple_extent_dims(dataspace_id, dims, nullptr);
3308  AssertThrow(status >= 0, ExcIO());
3309  AssertThrow(
3310  static_cast<int>(dims[0]) == n_columns,
3311  ExcMessage(
3312  "The number of columns of the matrix does not match the content of the archive"));
3313  AssertThrow(
3314  static_cast<int>(dims[1]) == n_rows,
3315  ExcMessage(
3316  "The number of rows of the matrix does not match the content of the archive"));
3317 
3318  // gather the number of local rows and columns from all processes
3319  std::vector<int> proc_n_local_rows(n_mpi_processes),
3320  proc_n_local_columns(n_mpi_processes);
3321  int ierr = MPI_Allgather(&tmp.n_local_rows,
3322  1,
3323  MPI_INT,
3324  proc_n_local_rows.data(),
3325  1,
3326  MPI_INT,
3327  tmp.grid->mpi_communicator);
3328  AssertThrowMPI(ierr);
3329  ierr = MPI_Allgather(&tmp.n_local_columns,
3330  1,
3331  MPI_INT,
3332  proc_n_local_columns.data(),
3333  1,
3334  MPI_INT,
3335  tmp.grid->mpi_communicator);
3336  AssertThrowMPI(ierr);
3337 
3338  const unsigned int my_rank(
3339  Utilities::MPI::this_mpi_process(tmp.grid->mpi_communicator));
3340 
3341  // hyperslab selection parameters
3342  // each process defines dataset in memory and writes it to the hyperslab in
3343  // the file
3344  hsize_t count[2];
3345  count[0] = tmp.n_local_columns;
3346  count[1] = tmp.n_local_rows;
3347 
3348  hsize_t offset[2] = {0};
3349  for (unsigned int i = 0; i < my_rank; ++i)
3350  offset[0] += proc_n_local_columns[i];
3351 
3352  // select hyperslab in the file
3353  status = H5Sselect_hyperslab(
3354  dataspace_id, H5S_SELECT_SET, offset, nullptr, count, nullptr);
3355  AssertThrow(status >= 0, ExcIO());
3356 
3357  // create a memory dataspace independently
3358  hid_t memspace = H5Screate_simple(2, count, nullptr);
3359 
3360  // read data independently
3361  status =
3362  H5Dread(dataset_id, datatype, memspace, dataspace_id, H5P_DEFAULT, data);
3363  AssertThrow(status >= 0, ExcIO());
3364 
3365  // create HDF5 enum type for LAPACKSupport::State and LAPACKSupport::Property
3366  hid_t state_enum_id, property_enum_id;
3367  internal::create_HDF5_state_enum_id(state_enum_id);
3368  internal::create_HDF5_property_enum_id(property_enum_id);
3369 
3370  // open the datasets for the state and property enum in the file
3371  hid_t dataset_state_id = H5Dopen2(file_id, "/state", H5P_DEFAULT);
3372  hid_t datatype_state = H5Dget_type(dataset_state_id);
3373  H5T_class_t t_class_state = H5Tget_class(datatype_state);
3374  AssertThrow(t_class_state == H5T_ENUM, ExcIO());
3375 
3376  hid_t dataset_property_id = H5Dopen2(file_id, "/property", H5P_DEFAULT);
3377  hid_t datatype_property = H5Dget_type(dataset_property_id);
3378  H5T_class_t t_class_property = H5Tget_class(datatype_property);
3379  AssertThrow(t_class_property == H5T_ENUM, ExcIO());
3380 
3381  // get dataspace handles
3382  hid_t dataspace_state = H5Dget_space(dataset_state_id);
3383  hid_t dataspace_property = H5Dget_space(dataset_property_id);
3384  // get number of dimensions
3385  const int ndims_state = H5Sget_simple_extent_ndims(dataspace_state);
3386  AssertThrow(ndims_state == 1, ExcIO());
3387  const int ndims_property = H5Sget_simple_extent_ndims(dataspace_property);
3388  AssertThrow(ndims_property == 1, ExcIO());
3389  // get every dimension
3390  hsize_t dims_state[1];
3391  H5Sget_simple_extent_dims(dataspace_state, dims_state, nullptr);
3392  AssertThrow(static_cast<int>(dims_state[0]) == 1, ExcIO());
3393  hsize_t dims_property[1];
3394  H5Sget_simple_extent_dims(dataspace_property, dims_property, nullptr);
3395  AssertThrow(static_cast<int>(dims_property[0]) == 1, ExcIO());
3396 
3397  // read data
3398  status = H5Dread(
3399  dataset_state_id, state_enum_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, &tmp.state);
3400  AssertThrow(status >= 0, ExcIO());
3401 
3402  status = H5Dread(dataset_property_id,
3403  property_enum_id,
3404  H5S_ALL,
3405  H5S_ALL,
3406  H5P_DEFAULT,
3407  &tmp.property);
3408  AssertThrow(status >= 0, ExcIO());
3409 
3410  // close/release sources
3411  status = H5Sclose(memspace);
3412  AssertThrow(status >= 0, ExcIO());
3413  status = H5Dclose(dataset_id);
3414  AssertThrow(status >= 0, ExcIO());
3415  status = H5Dclose(dataset_state_id);
3416  AssertThrow(status >= 0, ExcIO());
3417  status = H5Dclose(dataset_property_id);
3418  AssertThrow(status >= 0, ExcIO());
3419  status = H5Sclose(dataspace_id);
3420  AssertThrow(status >= 0, ExcIO());
3421  status = H5Sclose(dataspace_state);
3422  AssertThrow(status >= 0, ExcIO());
3423  status = H5Sclose(dataspace_property);
3424  AssertThrow(status >= 0, ExcIO());
3425  // status = H5Tclose(datatype);
3426  // AssertThrow(status >= 0, ExcIO());
3427  status = H5Tclose(state_enum_id);
3428  AssertThrow(status >= 0, ExcIO());
3429  status = H5Tclose(property_enum_id);
3430  AssertThrow(status >= 0, ExcIO());
3431  status = H5Fclose(file_id);
3432  AssertThrow(status >= 0, ExcIO());
3433 
3434  // copying the distributed matrices
3435  tmp.copy_to(*this);
3436 
3437 # endif // H5_HAVE_PARALLEL
3438 # endif // DEAL_II_WITH_HDF5
3439 }
3440 
3441 
3442 
3443 namespace internal
3444 {
3445  namespace
3446  {
3447  template <typename NumberType>
3448  void
3449  scale_columns(ScaLAPACKMatrix<NumberType> & matrix,
3450  const ArrayView<const NumberType> &factors)
3451  {
3452  Assert(matrix.n() == factors.size(),
3453  ExcDimensionMismatch(matrix.n(), factors.size()));
3454 
3455  for (unsigned int i = 0; i < matrix.local_n(); ++i)
3456  {
3457  const NumberType s = factors[matrix.global_column(i)];
3458 
3459  for (unsigned int j = 0; j < matrix.local_m(); ++j)
3460  matrix.local_el(j, i) *= s;
3461  }
3462  }
3463 
3464  template <typename NumberType>
3465  void
3466  scale_rows(ScaLAPACKMatrix<NumberType> & matrix,
3467  const ArrayView<const NumberType> &factors)
3468  {
3469  Assert(matrix.m() == factors.size(),
3470  ExcDimensionMismatch(matrix.m(), factors.size()));
3471 
3472  for (unsigned int i = 0; i < matrix.local_m(); ++i)
3473  {
3474  const NumberType s = factors[matrix.global_row(i)];
3475 
3476  for (unsigned int j = 0; j < matrix.local_n(); ++j)
3477  matrix.local_el(i, j) *= s;
3478  }
3479  }
3480 
3481  } // namespace
3482 } // namespace internal
3483 
3484 
3485 
3486 template <typename NumberType>
3487 template <class InputVector>
3488 void
3490 {
3491  if (this->grid->mpi_process_is_active)
3492  internal::scale_columns(*this, make_array_view(factors));
3493 }
3494 
3495 
3496 
3497 template <typename NumberType>
3498 template <class InputVector>
3499 void
3500 ScaLAPACKMatrix<NumberType>::scale_rows(const InputVector &factors)
3501 {
3502  if (this->grid->mpi_process_is_active)
3503  internal::scale_rows(*this, make_array_view(factors));
3504 }
3505 
3506 
3507 
3508 // instantiations
3509 # include "scalapack.inst"
3510 
3511 
3512 DEAL_II_NAMESPACE_CLOSE
3513 
3514 #endif // DEAL_II_WITH_SCALAPACK
std::vector< NumberType > eigenpairs_symmetric_by_index_MRRR(const std::pair< unsigned int, unsigned int > &index_limits, const bool compute_eigenvectors)
Definition: scalapack.cc:1756
void copy_transposed(const ScaLAPACKMatrix< NumberType > &B)
Definition: scalapack.cc:974
ScaLAPACKMatrix(const size_type n_rows, const size_type n_columns, const std::shared_ptr< const Utilities::MPI::ProcessGrid > &process_grid, const size_type row_block_size=32, const size_type column_block_size=32, const LAPACKSupport::Property property=LAPACKSupport::Property::general)
Definition: scalapack.cc:79
Matrix is symmetric.
std::vector< NumberType > eigenpairs_symmetric_by_value_MRRR(const std::pair< NumberType, NumberType > &value_limits, const bool compute_eigenvectors)
Definition: scalapack.cc:1779
void mmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
Definition: scalapack.cc:1177
unsigned int size_type
Definition: scalapack.h:83
static const unsigned int invalid_unsigned_int
Definition: types.h:173
LAPACKSupport::State state
Definition: scalapack.h:889
unsigned int global_row(const unsigned int loc_row) const
Definition: scalapack.cc:493
NumberType l1_norm() const
Definition: scalapack.cc:2368
void Tmmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
Definition: scalapack.cc:1191
Contents is actually a matrix.
ScaLAPACKMatrix< NumberType > & operator=(const FullMatrix< NumberType > &)
Definition: scalapack.cc:330
static ::ExceptionBase & ExcIO()
std::array< Number, 1 > eigenvalues(const SymmetricTensor< 2, 1, Number > &T)
unsigned int pseudoinverse(const NumberType ratio)
Definition: scalapack.cc:2219
std::array< std::pair< Number, Tensor< 1, dim, Number > >, std::integral_constant< int, dim >::value > eigenvectors(const SymmetricTensor< 2, dim, Number > &T, const SymmetricTensorEigenvectorMethod method=SymmetricTensorEigenvectorMethod::ql_implicit_shifts)
Matrix is upper triangular.
size_type n() const
std::vector< NumberType > eigenpairs_symmetric(const bool compute_eigenvectors, const std::pair< unsigned int, unsigned int > &index_limits=std::make_pair(numbers::invalid_unsigned_int, numbers::invalid_unsigned_int), const std::pair< NumberType, NumberType > &value_limits=std::make_pair(std::numeric_limits< NumberType >::quiet_NaN(), std::numeric_limits< NumberType >::quiet_NaN()))
Definition: scalapack.cc:1466
Contents is the inverse of a matrix.
std::vector< NumberType > eigenpairs_symmetric_by_index(const std::pair< unsigned int, unsigned int > &index_limits, const bool compute_eigenvectors)
Definition: scalapack.cc:1423
#define AssertIndexRange(index, range)
Definition: exceptions.h:1637
size_type m() const
pointer data()
size_type n_rows() const
void scale_columns(const InputVector &factors)
Definition: scalapack.cc:3489
std::shared_ptr< const Utilities::MPI::ProcessGrid > grid
Definition: scalapack.h:902
void load(const std::string &filename)
Definition: scalapack.cc:3036
#define AssertThrow(cond, exc)
Definition: exceptions.h:1519
static ::ExceptionBase & ExcErrorCode(std::string arg1, types::blas_int arg2)
static ::ExceptionBase & ExcIndexRange(int arg1, int arg2, int arg3)
const int submatrix_column
Definition: scalapack.h:983
void reinit(const size_type n_rows, const size_type n_columns, const std::shared_ptr< const Utilities::MPI::ProcessGrid > &process_grid, const size_type row_block_size=32, const size_type column_block_size=32, const LAPACKSupport::Property property=LAPACKSupport::Property::general)
Definition: scalapack.cc:215
std::size_t size() const
Definition: array_view.h:471
void TmTmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
Definition: scalapack.cc:1219
void least_squares(ScaLAPACKMatrix< NumberType > &B, const bool transpose=false)
Definition: scalapack.cc:2127
void mTmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
Definition: scalapack.cc:1205
static ::ExceptionBase & ExcMessage(std::string arg1)
Contents is a Cholesky decomposition.
void scale_rows(const InputVector &factors)
Definition: scalapack.cc:3500
NumberType norm_symmetric(const char type) const
Definition: scalapack.cc:2469
T sum(const T &t, const MPI_Comm &mpi_communicator)
void set_property(const LAPACKSupport::Property property)
Definition: scalapack.cc:302
#define Assert(cond, exc)
Definition: exceptions.h:1407
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
LAPACKSupport::State get_state() const
Definition: scalapack.cc:321
void reinit(const size_type size1, const size_type size2, const bool omit_default_initialization=false)
unsigned int global_column(const unsigned int loc_column) const
Definition: scalapack.cc:510
int create_group(const MPI_Comm &comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
Definition: mpi.cc:104
void add(const ScaLAPACKMatrix< NumberType > &B, const NumberType a=0., const NumberType b=1., const bool transpose_B=false)
Definition: scalapack.cc:984
Contents is something useless.
std::vector< NumberType > eigenpairs_symmetric_by_value(const std::pair< NumberType, NumberType > &value_limits, const bool compute_eigenvectors)
Definition: scalapack.cc:1446
int column_block_size
Definition: scalapack.h:922
NumberType norm_general(const char type) const
Definition: scalapack.cc:2410
const int submatrix_row
Definition: scalapack.h:977
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:71
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1695
std::vector< NumberType > eigenpairs_symmetric_MRRR(const bool compute_eigenvectors, const std::pair< unsigned int, unsigned int > &index_limits=std::make_pair(numbers::invalid_unsigned_int, numbers::invalid_unsigned_int), const std::pair< NumberType, NumberType > &value_limits=std::make_pair(std::numeric_limits< NumberType >::quiet_NaN(), std::numeric_limits< NumberType >::quiet_NaN()))
Definition: scalapack.cc:1797
constexpr int chunk_size
Definition: cuda_size.h:35
T min(const T &t, const MPI_Comm &mpi_communicator)
std::vector< NumberType > compute_SVD(ScaLAPACKMatrix< NumberType > *U=nullptr, ScaLAPACKMatrix< NumberType > *VT=nullptr)
Definition: scalapack.cc:2007
void mult(const NumberType b, const ScaLAPACKMatrix< NumberType > &B, const NumberType c, ScaLAPACKMatrix< NumberType > &C, const bool transpose_A=false, const bool transpose_B=false) const
Definition: scalapack.cc:1060
size_type size() const
void copy_to(FullMatrix< NumberType > &matrix) const
Definition: scalapack.cc:659
void save(const std::string &filename, const std::pair< unsigned int, unsigned int > &chunk_size=std::make_pair(numbers::invalid_unsigned_int, numbers::invalid_unsigned_int)) const
Definition: scalapack.cc:2601
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:82
static ::ExceptionBase & ExcNotImplemented()
LAPACKSupport::Property property
Definition: scalapack.h:895
void Tadd(const NumberType b, const ScaLAPACKMatrix< NumberType > &B)
Definition: scalapack.cc:1050
ArrayView< typename std::remove_reference< typename std::iterator_traits< Iterator >::reference >::type, MemorySpaceType > make_array_view(const Iterator begin, const Iterator end)
Definition: array_view.h:594
NumberType reciprocal_condition_number(const NumberType a_norm) const
Definition: scalapack.cc:2306
NumberType linfty_norm() const
Definition: scalapack.cc:2382
int descriptor[9]
Definition: scalapack.h:937
Eigenvalue vector is filled.
AlignedVector< NumberType > values
Definition: table.h:670
DerivativeForm< 1, spacedim, dim, Number > transpose(const DerivativeForm< 1, dim, spacedim, Number > &DF)
void compute_cholesky_factorization()
Definition: scalapack.cc:1233
Matrix is lower triangular.
#define AssertIsFinite(number)
Definition: exceptions.h:1669
T max(const T &t, const MPI_Comm &mpi_communicator)
NumberType frobenius_norm() const
Definition: scalapack.cc:2396
void copy_from(const LAPACKFullMatrix< NumberType > &matrix, const unsigned int rank)
Definition: scalapack.cc:361
LAPACKSupport::Property get_property() const
Definition: scalapack.cc:312
static ::ExceptionBase & ExcInternalError()
void compute_lu_factorization()
Definition: scalapack.cc:1266