Reference documentation for deal.II version GIT 1e9e64a91d 2022-09-28 19:20:02+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
scalapack.cc
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2017 - 2022 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #include <deal.II/lac/scalapack.h>
18 
19 #ifdef DEAL_II_WITH_SCALAPACK
20 
21 # include <deal.II/base/array_view.h>
22 # include <deal.II/base/mpi.h>
23 # include <deal.II/base/mpi.templates.h>
24 
25 # include <deal.II/lac/scalapack.templates.h>
26 
27 # ifdef DEAL_II_WITH_HDF5
28 # include <hdf5.h>
29 # endif
30 
31 # include <limits>
32 # include <memory>
33 
35 
36 # ifdef DEAL_II_WITH_HDF5
37 
38 template <typename number>
39 inline hid_t
40 hdf5_type_id(const number *)
41 {
42  Assert(false, ::ExcNotImplemented());
43  // don't know what to put here; it does not matter
44  return -1;
45 }
46 
47 inline hid_t
48 hdf5_type_id(const double *)
49 {
50  return H5T_NATIVE_DOUBLE;
51 }
52 
53 inline hid_t
54 hdf5_type_id(const float *)
55 {
56  return H5T_NATIVE_FLOAT;
57 }
58 
59 inline hid_t
60 hdf5_type_id(const int *)
61 {
62  return H5T_NATIVE_INT;
63 }
64 
65 inline hid_t
66 hdf5_type_id(const unsigned int *)
67 {
68  return H5T_NATIVE_UINT;
69 }
70 
71 inline hid_t
72 hdf5_type_id(const char *)
73 {
74  return H5T_NATIVE_CHAR;
75 }
76 # endif // DEAL_II_WITH_HDF5
77 
78 
79 
80 template <typename NumberType>
82  const size_type n_rows_,
83  const size_type n_columns_,
84  const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
85  const size_type row_block_size_,
86  const size_type column_block_size_,
87  const LAPACKSupport::Property property_)
88  : uplo('L')
89  , // for non-symmetric matrices this is not needed
90  first_process_row(0)
91  , first_process_column(0)
92  , submatrix_row(1)
93  , submatrix_column(1)
94 {
95  reinit(n_rows_,
96  n_columns_,
97  process_grid,
98  row_block_size_,
99  column_block_size_,
100  property_);
101 }
102 
103 
104 
105 template <typename NumberType>
107  const size_type size,
108  const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
109  const size_type block_size,
110  const LAPACKSupport::Property property)
111  : ScaLAPACKMatrix<NumberType>(size,
112  size,
113  process_grid,
114  block_size,
115  block_size,
116  property)
117 {}
118 
119 
120 
121 template <typename NumberType>
123  const std::string & filename,
124  const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
125  const size_type row_block_size,
126  const size_type column_block_size)
127  : uplo('L')
128  , // for non-symmetric matrices this is not needed
129  first_process_row(0)
130  , first_process_column(0)
131  , submatrix_row(1)
132  , submatrix_column(1)
133 {
134 # ifndef DEAL_II_WITH_HDF5
135  (void)filename;
136  (void)process_grid;
137  (void)row_block_size;
138  (void)column_block_size;
139  Assert(
140  false,
141  ExcMessage(
142  "This function is only available when deal.II is configured with HDF5"));
143 # else
144 
145  const unsigned int this_mpi_process(
146  Utilities::MPI::this_mpi_process(process_grid->mpi_communicator));
147 
148  // Before reading the content from disk the root process determines the
149  // dimensions of the matrix. Subsequently, memory is allocated by a call to
150  // reinit() and the matrix is loaded by a call to load().
151  if (this_mpi_process == 0)
152  {
153  herr_t status = 0;
154 
155  // open file in read-only mode
156  hid_t file = H5Fopen(filename.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT);
157  AssertThrow(file >= 0, ExcIO());
158 
159  // get data set in file
160  hid_t dataset = H5Dopen2(file, "/matrix", H5P_DEFAULT);
161  AssertThrow(dataset >= 0, ExcIO());
162 
163  // determine file space
164  hid_t filespace = H5Dget_space(dataset);
165 
166  // get number of dimensions in data set
167  int rank = H5Sget_simple_extent_ndims(filespace);
168  AssertThrow(rank == 2, ExcIO());
169  hsize_t dims[2];
170  status = H5Sget_simple_extent_dims(filespace, dims, nullptr);
171  AssertThrow(status >= 0, ExcIO());
172 
173  // due to ScaLAPACK's column-major memory layout the dimensions are
174  // swapped
175  n_rows = dims[1];
176  n_columns = dims[0];
177 
178  // close/release resources
179  status = H5Sclose(filespace);
180  AssertThrow(status >= 0, ExcIO());
181  status = H5Dclose(dataset);
182  AssertThrow(status >= 0, ExcIO());
183  status = H5Fclose(file);
184  AssertThrow(status >= 0, ExcIO());
185  }
186  int ierr = MPI_Bcast(&n_rows,
187  1,
189  0 /*from root*/,
190  process_grid->mpi_communicator);
191  AssertThrowMPI(ierr);
192 
193  ierr = MPI_Bcast(&n_columns,
194  1,
196  0 /*from root*/,
197  process_grid->mpi_communicator);
198  AssertThrowMPI(ierr);
199 
200  // the property will be overwritten by the subsequent call to load()
201  reinit(n_rows,
202  n_columns,
203  process_grid,
207 
208  load(filename.c_str());
209 
210 # endif // DEAL_II_WITH_HDF5
211 }
212 
213 
214 
215 template <typename NumberType>
216 void
218  const size_type n_rows_,
219  const size_type n_columns_,
220  const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
221  const size_type row_block_size_,
222  const size_type column_block_size_,
223  const LAPACKSupport::Property property_)
224 {
225  Assert(row_block_size_ > 0, ExcMessage("Row block size has to be positive."));
226  Assert(column_block_size_ > 0,
227  ExcMessage("Column block size has to be positive."));
228  Assert(
229  row_block_size_ <= n_rows_,
230  ExcMessage(
231  "Row block size can not be greater than the number of rows of the matrix"));
232  Assert(
233  column_block_size_ <= n_columns_,
234  ExcMessage(
235  "Column block size can not be greater than the number of columns of the matrix"));
236 
238  property = property_;
239  grid = process_grid;
240  n_rows = n_rows_;
241  n_columns = n_columns_;
242  row_block_size = row_block_size_;
243  column_block_size = column_block_size_;
244 
245  if (grid->mpi_process_is_active)
246  {
247  // Get local sizes:
248  n_local_rows = numroc_(&n_rows,
249  &row_block_size,
250  &(grid->this_process_row),
251  &first_process_row,
252  &(grid->n_process_rows));
253  n_local_columns = numroc_(&n_columns,
254  &column_block_size,
255  &(grid->this_process_column),
256  &first_process_column,
257  &(grid->n_process_columns));
258 
259  // LLD_A = MAX(1,NUMROC(M_A, MB_A, MYROW, RSRC_A, NPROW)), different
260  // between processes
261  int lda = std::max(1, n_local_rows);
262 
263  int info = 0;
264  descinit_(descriptor,
265  &n_rows,
266  &n_columns,
267  &row_block_size,
268  &column_block_size,
269  &first_process_row,
270  &first_process_column,
271  &(grid->blacs_context),
272  &lda,
273  &info);
274  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("descinit_", info));
275 
276  this->TransposeTable<NumberType>::reinit(n_local_rows, n_local_columns);
277  }
278  else
279  {
280  // set process-local variables to something telling:
281  n_local_rows = -1;
282  n_local_columns = -1;
283  std::fill(std::begin(descriptor), std::end(descriptor), -1);
284  }
285 }
286 
287 
288 
289 template <typename NumberType>
290 void
292  const size_type size,
293  const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
294  const size_type block_size,
295  const LAPACKSupport::Property property)
296 {
297  reinit(size, size, process_grid, block_size, block_size, property);
298 }
299 
300 
301 
302 template <typename NumberType>
303 void
305  const LAPACKSupport::Property property_)
306 {
307  property = property_;
308 }
309 
310 
311 
312 template <typename NumberType>
315 {
316  return property;
317 }
318 
319 
320 
321 template <typename NumberType>
324 {
325  return state;
326 }
327 
328 
329 
330 template <typename NumberType>
333 {
334  // FIXME: another way to copy is to use pdgeadd_ PBLAS routine.
335  // This routine computes the sum of two matrices B := a*A + b*B.
336  // Matrices can have different distribution,in particular matrix A can
337  // be owned by only one process, so we can set a=1 and b=0 to copy
338  // non-distributed matrix A into distributed matrix B.
339  Assert(n_rows == int(matrix.m()), ExcDimensionMismatch(n_rows, matrix.m()));
340  Assert(n_columns == int(matrix.n()),
341  ExcDimensionMismatch(n_columns, matrix.n()));
342 
343  if (grid->mpi_process_is_active)
344  {
345  for (int i = 0; i < n_local_rows; ++i)
346  {
347  const int glob_i = global_row(i);
348  for (int j = 0; j < n_local_columns; ++j)
349  {
350  const int glob_j = global_column(j);
351  local_el(i, j) = matrix(glob_i, glob_j);
352  }
353  }
354  }
355  state = LAPACKSupport::matrix;
356  return *this;
357 }
358 
359 
360 
361 template <typename NumberType>
362 void
364  const unsigned int rank)
365 {
366  if (n_rows * n_columns == 0)
367  return;
368 
369  const unsigned int this_mpi_process(
370  Utilities::MPI::this_mpi_process(this->grid->mpi_communicator));
371 
372 # ifdef DEBUG
373  Assert(Utilities::MPI::max(rank, this->grid->mpi_communicator) == rank,
374  ExcMessage("All processes have to call routine with identical rank"));
375  Assert(Utilities::MPI::min(rank, this->grid->mpi_communicator) == rank,
376  ExcMessage("All processes have to call routine with identical rank"));
377 # endif
378 
379  // root process has to be active in the grid of A
380  if (this_mpi_process == rank)
381  {
382  Assert(grid->mpi_process_is_active, ExcInternalError());
383  Assert(n_rows == int(B.m()), ExcDimensionMismatch(n_rows, B.m()));
384  Assert(n_columns == int(B.n()), ExcDimensionMismatch(n_columns, B.n()));
385  }
386  // Create 1x1 grid for matrix B.
387  // The underlying grid for matrix B only contains the process #rank.
388  // This grid will be used to copy the serial matrix B to the distributed
389  // matrix using the ScaLAPACK routine pgemr2d.
390  MPI_Group group_A;
391  MPI_Comm_group(this->grid->mpi_communicator, &group_A);
392  const int n = 1;
393  const std::vector<int> ranks(n, rank);
394  MPI_Group group_B;
395  MPI_Group_incl(group_A, n, ranks.data(), &group_B);
396  MPI_Comm communicator_B;
397 
399  const int ierr = MPI_Comm_create_group(this->grid->mpi_communicator,
400  group_B,
401  mpi_tag,
402  &communicator_B);
403  AssertThrowMPI(ierr);
404  int n_proc_rows_B = 1, n_proc_cols_B = 1;
405  int this_process_row_B = -1, this_process_column_B = -1;
406  int blacs_context_B = -1;
407  if (MPI_COMM_NULL != communicator_B)
408  {
409  // Initialize Cblas context from the provided communicator
410  blacs_context_B = Csys2blacs_handle(communicator_B);
411  const char *order = "Col";
412  Cblacs_gridinit(&blacs_context_B, order, n_proc_rows_B, n_proc_cols_B);
413  Cblacs_gridinfo(blacs_context_B,
414  &n_proc_rows_B,
415  &n_proc_cols_B,
416  &this_process_row_B,
417  &this_process_column_B);
418  Assert(n_proc_rows_B * n_proc_cols_B == 1, ExcInternalError());
419  // the active process of grid B has to be process #rank of the
420  // communicator attached to A
422  }
423  const bool mpi_process_is_active_B =
424  (this_process_row_B >= 0 && this_process_column_B >= 0);
425 
426  // create descriptor for matrix B
427  std::vector<int> descriptor_B(9, -1);
428  const int first_process_row_B = 0, first_process_col_B = 0;
429 
430  if (mpi_process_is_active_B)
431  {
432  // Get local sizes:
433  int n_local_rows_B = numroc_(&n_rows,
434  &n_rows,
435  &this_process_row_B,
436  &first_process_row_B,
437  &n_proc_rows_B);
438  int n_local_cols_B = numroc_(&n_columns,
439  &n_columns,
440  &this_process_column_B,
441  &first_process_col_B,
442  &n_proc_cols_B);
443  Assert(n_local_rows_B == n_rows, ExcInternalError());
444  Assert(n_local_cols_B == n_columns, ExcInternalError());
445  (void)n_local_cols_B;
446 
447  int lda = std::max(1, n_local_rows_B);
448  int info = 0;
449  descinit_(descriptor_B.data(),
450  &n_rows,
451  &n_columns,
452  &n_rows,
453  &n_columns,
454  &first_process_row_B,
455  &first_process_col_B,
456  &blacs_context_B,
457  &lda,
458  &info);
459  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("descinit_", info));
460  }
461  if (this->grid->mpi_process_is_active)
462  {
463  const int ii = 1;
464  NumberType *loc_vals_A =
465  this->values.size() > 0 ? this->values.data() : nullptr;
466  const NumberType *loc_vals_B =
467  mpi_process_is_active_B ? &(B(0, 0)) : nullptr;
468 
469  // pgemr2d has to be called only for processes active on grid attached to
470  // matrix A
471  pgemr2d(&n_rows,
472  &n_columns,
473  loc_vals_B,
474  &ii,
475  &ii,
476  descriptor_B.data(),
477  loc_vals_A,
478  &ii,
479  &ii,
480  this->descriptor,
481  &(this->grid->blacs_context));
482  }
483  if (mpi_process_is_active_B)
484  Cblacs_gridexit(blacs_context_B);
485 
486  MPI_Group_free(&group_A);
487  MPI_Group_free(&group_B);
488  if (MPI_COMM_NULL != communicator_B)
489  Utilities::MPI::free_communicator(communicator_B);
490 
491  state = LAPACKSupport::matrix;
492 }
493 
494 
495 
496 template <typename NumberType>
497 unsigned int
498 ScaLAPACKMatrix<NumberType>::global_row(const unsigned int loc_row) const
499 {
500  Assert(n_local_rows >= 0 && loc_row < static_cast<unsigned int>(n_local_rows),
501  ExcIndexRange(loc_row, 0, n_local_rows));
502  const int i = loc_row + 1;
503  return indxl2g_(&i,
504  &row_block_size,
505  &(grid->this_process_row),
506  &first_process_row,
507  &(grid->n_process_rows)) -
508  1;
509 }
510 
511 
512 
513 template <typename NumberType>
514 unsigned int
515 ScaLAPACKMatrix<NumberType>::global_column(const unsigned int loc_column) const
516 {
517  Assert(n_local_columns >= 0 &&
518  loc_column < static_cast<unsigned int>(n_local_columns),
519  ExcIndexRange(loc_column, 0, n_local_columns));
520  const int j = loc_column + 1;
521  return indxl2g_(&j,
522  &column_block_size,
523  &(grid->this_process_column),
524  &first_process_column,
525  &(grid->n_process_columns)) -
526  1;
527 }
528 
529 
530 
531 template <typename NumberType>
532 void
534  const unsigned int rank) const
535 {
536  if (n_rows * n_columns == 0)
537  return;
538 
539  const unsigned int this_mpi_process(
540  Utilities::MPI::this_mpi_process(this->grid->mpi_communicator));
541 
542 # ifdef DEBUG
543  Assert(Utilities::MPI::max(rank, this->grid->mpi_communicator) == rank,
544  ExcMessage("All processes have to call routine with identical rank"));
545  Assert(Utilities::MPI::min(rank, this->grid->mpi_communicator) == rank,
546  ExcMessage("All processes have to call routine with identical rank"));
547 # endif
548 
549  if (this_mpi_process == rank)
550  {
551  // the process which gets the serial copy has to be in the process grid
552  Assert(this->grid->is_process_active(), ExcInternalError());
553  Assert(n_rows == int(B.m()), ExcDimensionMismatch(n_rows, B.m()));
554  Assert(n_columns == int(B.n()), ExcDimensionMismatch(n_columns, B.n()));
555  }
556 
557  // Create 1x1 grid for matrix B.
558  // The underlying grid for matrix B only contains the process #rank.
559  // This grid will be used to copy to the distributed matrix to the serial
560  // matrix B using the ScaLAPACK routine pgemr2d.
561  MPI_Group group_A;
562  MPI_Comm_group(this->grid->mpi_communicator, &group_A);
563  const int n = 1;
564  const std::vector<int> ranks(n, rank);
565  MPI_Group group_B;
566  MPI_Group_incl(group_A, n, ranks.data(), &group_B);
567  MPI_Comm communicator_B;
568 
570  const int ierr = MPI_Comm_create_group(this->grid->mpi_communicator,
571  group_B,
572  mpi_tag,
573  &communicator_B);
574  AssertThrowMPI(ierr);
575  int n_proc_rows_B = 1, n_proc_cols_B = 1;
576  int this_process_row_B = -1, this_process_column_B = -1;
577  int blacs_context_B = -1;
578  if (MPI_COMM_NULL != communicator_B)
579  {
580  // Initialize Cblas context from the provided communicator
581  blacs_context_B = Csys2blacs_handle(communicator_B);
582  const char *order = "Col";
583  Cblacs_gridinit(&blacs_context_B, order, n_proc_rows_B, n_proc_cols_B);
584  Cblacs_gridinfo(blacs_context_B,
585  &n_proc_rows_B,
586  &n_proc_cols_B,
587  &this_process_row_B,
588  &this_process_column_B);
589  Assert(n_proc_rows_B * n_proc_cols_B == 1, ExcInternalError());
590  // the active process of grid B has to be process #rank of the
591  // communicator attached to A
593  }
594  const bool mpi_process_is_active_B =
595  (this_process_row_B >= 0 && this_process_column_B >= 0);
596 
597  // create descriptor for matrix B
598  std::vector<int> descriptor_B(9, -1);
599  const int first_process_row_B = 0, first_process_col_B = 0;
600 
601  if (mpi_process_is_active_B)
602  {
603  // Get local sizes:
604  int n_local_rows_B = numroc_(&n_rows,
605  &n_rows,
606  &this_process_row_B,
607  &first_process_row_B,
608  &n_proc_rows_B);
609  int n_local_cols_B = numroc_(&n_columns,
610  &n_columns,
611  &this_process_column_B,
612  &first_process_col_B,
613  &n_proc_cols_B);
614  Assert(n_local_rows_B == n_rows, ExcInternalError());
615  Assert(n_local_cols_B == n_columns, ExcInternalError());
616  (void)n_local_cols_B;
617 
618  int lda = std::max(1, n_local_rows_B);
619  int info = 0;
620  // fill descriptor for matrix B
621  descinit_(descriptor_B.data(),
622  &n_rows,
623  &n_columns,
624  &n_rows,
625  &n_columns,
626  &first_process_row_B,
627  &first_process_col_B,
628  &blacs_context_B,
629  &lda,
630  &info);
631  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("descinit_", info));
632  }
633  // pgemr2d has to be called only for processes active on grid attached to
634  // matrix A
635  if (this->grid->mpi_process_is_active)
636  {
637  const int ii = 1;
638  const NumberType *loc_vals_A =
639  this->values.size() > 0 ? this->values.data() : nullptr;
640  NumberType *loc_vals_B = mpi_process_is_active_B ? &(B(0, 0)) : nullptr;
641 
642  pgemr2d(&n_rows,
643  &n_columns,
644  loc_vals_A,
645  &ii,
646  &ii,
647  this->descriptor,
648  loc_vals_B,
649  &ii,
650  &ii,
651  descriptor_B.data(),
652  &(this->grid->blacs_context));
653  }
654  if (mpi_process_is_active_B)
655  Cblacs_gridexit(blacs_context_B);
656 
657  MPI_Group_free(&group_A);
658  MPI_Group_free(&group_B);
659  if (MPI_COMM_NULL != communicator_B)
660  Utilities::MPI::free_communicator(communicator_B);
661 }
662 
663 
664 
665 template <typename NumberType>
666 void
668 {
669  // FIXME: use PDGEMR2D for copying?
670  // PDGEMR2D copies a submatrix of A on a submatrix of B.
671  // A and B can have different distributions
672  // see http://icl.cs.utk.edu/lapack-forum/viewtopic.php?t=50
673  Assert(n_rows == int(matrix.m()), ExcDimensionMismatch(n_rows, matrix.m()));
674  Assert(n_columns == int(matrix.n()),
675  ExcDimensionMismatch(n_columns, matrix.n()));
676 
677  matrix = 0.;
678  if (grid->mpi_process_is_active)
679  {
680  for (int i = 0; i < n_local_rows; ++i)
681  {
682  const int glob_i = global_row(i);
683  for (int j = 0; j < n_local_columns; ++j)
684  {
685  const int glob_j = global_column(j);
686  matrix(glob_i, glob_j) = local_el(i, j);
687  }
688  }
689  }
690  Utilities::MPI::sum(matrix, grid->mpi_communicator, matrix);
691 
692  // we could move the following lines under the main loop above,
693  // but they would be dependent on glob_i and glob_j, which
694  // won't make it much prettier
695  if (state == LAPACKSupport::cholesky)
696  {
697  if (property == LAPACKSupport::lower_triangular)
698  for (unsigned int i = 0; i < matrix.n(); ++i)
699  for (unsigned int j = i + 1; j < matrix.m(); ++j)
700  matrix(i, j) = 0.;
701  else if (property == LAPACKSupport::upper_triangular)
702  for (unsigned int i = 0; i < matrix.n(); ++i)
703  for (unsigned int j = 0; j < i; ++j)
704  matrix(i, j) = 0.;
705  }
706  else if (property == LAPACKSupport::symmetric &&
708  {
709  if (uplo == 'L')
710  for (unsigned int i = 0; i < matrix.n(); ++i)
711  for (unsigned int j = i + 1; j < matrix.m(); ++j)
712  matrix(i, j) = matrix(j, i);
713  else if (uplo == 'U')
714  for (unsigned int i = 0; i < matrix.n(); ++i)
715  for (unsigned int j = 0; j < i; ++j)
716  matrix(i, j) = matrix(j, i);
717  }
718 }
719 
720 
721 
722 template <typename NumberType>
723 void
726  const std::pair<unsigned int, unsigned int> &offset_A,
727  const std::pair<unsigned int, unsigned int> &offset_B,
728  const std::pair<unsigned int, unsigned int> &submatrix_size) const
729 {
730  // submatrix is empty
731  if (submatrix_size.first == 0 || submatrix_size.second == 0)
732  return;
733 
734  // range checking for matrix A
735  AssertIndexRange(offset_A.first, n_rows - submatrix_size.first + 1);
736  AssertIndexRange(offset_A.second, n_columns - submatrix_size.second + 1);
737 
738  // range checking for matrix B
739  AssertIndexRange(offset_B.first, B.n_rows - submatrix_size.first + 1);
740  AssertIndexRange(offset_B.second, B.n_columns - submatrix_size.second + 1);
741 
742  // Currently, copying of matrices will only be supported if A and B share the
743  // same MPI communicator
744  int ierr, comparison;
745  ierr = MPI_Comm_compare(grid->mpi_communicator,
746  B.grid->mpi_communicator,
747  &comparison);
748  AssertThrowMPI(ierr);
749  Assert(comparison == MPI_IDENT,
750  ExcMessage("Matrix A and B must have a common MPI Communicator"));
751 
752  /*
753  * The routine pgemr2d requires a BLACS context resembling at least the union
754  * of process grids described by the BLACS contexts held by the ProcessGrids
755  * of matrix A and B. As A and B share the same MPI communicator, there is no
756  * need to create a union MPI communicator to initialise the BLACS context
757  */
758  int union_blacs_context = Csys2blacs_handle(this->grid->mpi_communicator);
759  const char *order = "Col";
760  int union_n_process_rows =
761  Utilities::MPI::n_mpi_processes(this->grid->mpi_communicator);
762  int union_n_process_columns = 1;
763  Cblacs_gridinit(&union_blacs_context,
764  order,
765  union_n_process_rows,
766  union_n_process_columns);
767 
768  int n_grid_rows_A, n_grid_columns_A, my_row_A, my_column_A;
769  Cblacs_gridinfo(this->grid->blacs_context,
770  &n_grid_rows_A,
771  &n_grid_columns_A,
772  &my_row_A,
773  &my_column_A);
774 
775  // check whether process is in the BLACS context of matrix A
776  const bool in_context_A =
777  (my_row_A >= 0 && my_row_A < n_grid_rows_A) &&
778  (my_column_A >= 0 && my_column_A < n_grid_columns_A);
779 
780  int n_grid_rows_B, n_grid_columns_B, my_row_B, my_column_B;
781  Cblacs_gridinfo(B.grid->blacs_context,
782  &n_grid_rows_B,
783  &n_grid_columns_B,
784  &my_row_B,
785  &my_column_B);
786 
787  // check whether process is in the BLACS context of matrix B
788  const bool in_context_B =
789  (my_row_B >= 0 && my_row_B < n_grid_rows_B) &&
790  (my_column_B >= 0 && my_column_B < n_grid_columns_B);
791 
792  const int n_rows_submatrix = submatrix_size.first;
793  const int n_columns_submatrix = submatrix_size.second;
794 
795  // due to Fortran indexing one has to be added
796  int ia = offset_A.first + 1, ja = offset_A.second + 1;
797  int ib = offset_B.first + 1, jb = offset_B.second + 1;
798 
799  std::array<int, 9> desc_A, desc_B;
800 
801  const NumberType *loc_vals_A = nullptr;
802  NumberType * loc_vals_B = nullptr;
803 
804  // Note: the function pgemr2d has to be called for all processes in the union
805  // BLACS context If the calling process is not part of the BLACS context of A,
806  // desc_A[1] has to be -1 and all other parameters do not have to be set If
807  // the calling process is not part of the BLACS context of B, desc_B[1] has to
808  // be -1 and all other parameters do not have to be set
809  if (in_context_A)
810  {
811  if (this->values.size() != 0)
812  loc_vals_A = this->values.data();
813 
814  for (unsigned int i = 0; i < desc_A.size(); ++i)
815  desc_A[i] = this->descriptor[i];
816  }
817  else
818  desc_A[1] = -1;
819 
820  if (in_context_B)
821  {
822  if (B.values.size() != 0)
823  loc_vals_B = B.values.data();
824 
825  for (unsigned int i = 0; i < desc_B.size(); ++i)
826  desc_B[i] = B.descriptor[i];
827  }
828  else
829  desc_B[1] = -1;
830 
831  pgemr2d(&n_rows_submatrix,
832  &n_columns_submatrix,
833  loc_vals_A,
834  &ia,
835  &ja,
836  desc_A.data(),
837  loc_vals_B,
838  &ib,
839  &jb,
840  desc_B.data(),
841  &union_blacs_context);
842 
844 
845  // releasing the union BLACS context
846  Cblacs_gridexit(union_blacs_context);
847 }
848 
849 
850 
851 template <typename NumberType>
852 void
854 {
855  Assert(n_rows == dest.n_rows, ExcDimensionMismatch(n_rows, dest.n_rows));
856  Assert(n_columns == dest.n_columns,
857  ExcDimensionMismatch(n_columns, dest.n_columns));
858 
859  if (this->grid->mpi_process_is_active)
860  AssertThrow(
861  this->descriptor[0] == 1,
862  ExcMessage(
863  "Copying of ScaLAPACK matrices only implemented for dense matrices"));
864  if (dest.grid->mpi_process_is_active)
865  AssertThrow(
866  dest.descriptor[0] == 1,
867  ExcMessage(
868  "Copying of ScaLAPACK matrices only implemented for dense matrices"));
869 
870  /*
871  * just in case of different process grids or block-cyclic distributions
872  * inter-process communication is necessary
873  * if distributed matrices have the same process grid and block sizes, local
874  * copying is enough
875  */
876  if ((this->grid != dest.grid) || (row_block_size != dest.row_block_size) ||
877  (column_block_size != dest.column_block_size))
878  {
879  /*
880  * get the MPI communicator, which is the union of the source and
881  * destination MPI communicator
882  */
883  int ierr = 0;
884  MPI_Group group_source, group_dest, group_union;
885  ierr = MPI_Comm_group(this->grid->mpi_communicator, &group_source);
886  AssertThrowMPI(ierr);
887  ierr = MPI_Comm_group(dest.grid->mpi_communicator, &group_dest);
888  AssertThrowMPI(ierr);
889  ierr = MPI_Group_union(group_source, group_dest, &group_union);
890  AssertThrowMPI(ierr);
891  MPI_Comm mpi_communicator_union;
892 
893  // to create a communicator representing the union of the source
894  // and destination MPI communicator we need a communicator that
895  // is guaranteed to contain all desired processes -- i.e.,
896  // MPI_COMM_WORLD. on the other hand, as documented in the MPI
897  // standard, MPI_Comm_create_group is not collective on all
898  // processes in the first argument, but instead is collective on
899  // only those processes listed in the group. in other words,
900  // there is really no harm in passing MPI_COMM_WORLD as the
901  // first argument, even if the program we are currently running
902  // and that is calling this function only works on a subset of
903  // processes. the same holds for the wrapper/fallback we are using here.
904 
906  ierr = MPI_Comm_create_group(MPI_COMM_WORLD,
907  group_union,
908  mpi_tag,
909  &mpi_communicator_union);
910  AssertThrowMPI(ierr);
911 
912  /*
913  * The routine pgemr2d requires a BLACS context resembling at least the
914  * union of process grids described by the BLACS contexts of matrix A and
915  * B
916  */
917  int union_blacs_context = Csys2blacs_handle(mpi_communicator_union);
918  const char *order = "Col";
919  int union_n_process_rows =
920  Utilities::MPI::n_mpi_processes(mpi_communicator_union);
921  int union_n_process_columns = 1;
922  Cblacs_gridinit(&union_blacs_context,
923  order,
924  union_n_process_rows,
925  union_n_process_columns);
926 
927  const NumberType *loc_vals_source = nullptr;
928  NumberType * loc_vals_dest = nullptr;
929 
930  if (this->grid->mpi_process_is_active && (this->values.size() > 0))
931  {
932  AssertThrow(this->values.size() > 0,
933  ::ExcMessage(
934  "source: process is active but local matrix empty"));
935  loc_vals_source = this->values.data();
936  }
937  if (dest.grid->mpi_process_is_active && (dest.values.size() > 0))
938  {
939  AssertThrow(
940  dest.values.size() > 0,
941  ::ExcMessage(
942  "destination: process is active but local matrix empty"));
943  loc_vals_dest = dest.values.data();
944  }
945  pgemr2d(&n_rows,
946  &n_columns,
947  loc_vals_source,
948  &submatrix_row,
949  &submatrix_column,
950  descriptor,
951  loc_vals_dest,
952  &dest.submatrix_row,
953  &dest.submatrix_column,
954  dest.descriptor,
955  &union_blacs_context);
956 
957  Cblacs_gridexit(union_blacs_context);
958 
959  if (mpi_communicator_union != MPI_COMM_NULL)
960  Utilities::MPI::free_communicator(mpi_communicator_union);
961  ierr = MPI_Group_free(&group_source);
962  AssertThrowMPI(ierr);
963  ierr = MPI_Group_free(&group_dest);
964  AssertThrowMPI(ierr);
965  ierr = MPI_Group_free(&group_union);
966  AssertThrowMPI(ierr);
967  }
968  else
969  // process is active in the process grid
970  if (this->grid->mpi_process_is_active)
971  dest.values = this->values;
972 
973  dest.state = state;
974  dest.property = property;
975 }
976 
977 
978 
979 template <typename NumberType>
980 void
983 {
984  add(B, 0, 1, true);
985 }
986 
987 
988 
989 template <typename NumberType>
990 void
992  const NumberType alpha,
993  const NumberType beta,
994  const bool transpose_B)
995 {
996  if (transpose_B)
997  {
998  Assert(n_rows == B.n_columns, ExcDimensionMismatch(n_rows, B.n_columns));
999  Assert(n_columns == B.n_rows, ExcDimensionMismatch(n_columns, B.n_rows));
1000  Assert(column_block_size == B.row_block_size,
1001  ExcDimensionMismatch(column_block_size, B.row_block_size));
1002  Assert(row_block_size == B.column_block_size,
1003  ExcDimensionMismatch(row_block_size, B.column_block_size));
1004  }
1005  else
1006  {
1007  Assert(n_rows == B.n_rows, ExcDimensionMismatch(n_rows, B.n_rows));
1008  Assert(n_columns == B.n_columns,
1009  ExcDimensionMismatch(n_columns, B.n_columns));
1010  Assert(column_block_size == B.column_block_size,
1011  ExcDimensionMismatch(column_block_size, B.column_block_size));
1012  Assert(row_block_size == B.row_block_size,
1013  ExcDimensionMismatch(row_block_size, B.row_block_size));
1014  }
1015  Assert(this->grid == B.grid,
1016  ExcMessage("The matrices A and B need to have the same process grid"));
1017 
1018  if (this->grid->mpi_process_is_active)
1019  {
1020  char trans_b = transpose_B ? 'T' : 'N';
1021  NumberType *A_loc =
1022  (this->values.size() > 0) ? this->values.data() : nullptr;
1023  const NumberType *B_loc =
1024  (B.values.size() > 0) ? B.values.data() : nullptr;
1025 
1026  pgeadd(&trans_b,
1027  &n_rows,
1028  &n_columns,
1029  &beta,
1030  B_loc,
1031  &B.submatrix_row,
1032  &B.submatrix_column,
1033  B.descriptor,
1034  &alpha,
1035  A_loc,
1036  &submatrix_row,
1037  &submatrix_column,
1038  descriptor);
1039  }
1040  state = LAPACKSupport::matrix;
1041 }
1042 
1043 
1044 
1045 template <typename NumberType>
1046 void
1048  const ScaLAPACKMatrix<NumberType> &B)
1049 {
1050  add(B, 1, a, false);
1051 }
1052 
1053 
1054 
1055 template <typename NumberType>
1056 void
1058  const ScaLAPACKMatrix<NumberType> &B)
1059 {
1060  add(B, 1, a, true);
1061 }
1062 
1063 
1064 
1065 template <typename NumberType>
1066 void
1068  const ScaLAPACKMatrix<NumberType> &B,
1069  const NumberType c,
1071  const bool transpose_A,
1072  const bool transpose_B) const
1073 {
1074  Assert(this->grid == B.grid,
1075  ExcMessage("The matrices A and B need to have the same process grid"));
1076  Assert(C.grid == B.grid,
1077  ExcMessage("The matrices B and C need to have the same process grid"));
1078 
1079  // see for further info:
1080  // https://www.ibm.com/support/knowledgecenter/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_lgemm.htm
1081  if (!transpose_A && !transpose_B)
1082  {
1083  Assert(this->n_columns == B.n_rows,
1084  ExcDimensionMismatch(this->n_columns, B.n_rows));
1085  Assert(this->n_rows == C.n_rows,
1086  ExcDimensionMismatch(this->n_rows, C.n_rows));
1087  Assert(B.n_columns == C.n_columns,
1088  ExcDimensionMismatch(B.n_columns, C.n_columns));
1089  Assert(this->row_block_size == C.row_block_size,
1090  ExcDimensionMismatch(this->row_block_size, C.row_block_size));
1091  Assert(this->column_block_size == B.row_block_size,
1092  ExcDimensionMismatch(this->column_block_size, B.row_block_size));
1093  Assert(B.column_block_size == C.column_block_size,
1094  ExcDimensionMismatch(B.column_block_size, C.column_block_size));
1095  }
1096  else if (transpose_A && !transpose_B)
1097  {
1098  Assert(this->n_rows == B.n_rows,
1099  ExcDimensionMismatch(this->n_rows, B.n_rows));
1100  Assert(this->n_columns == C.n_rows,
1101  ExcDimensionMismatch(this->n_columns, C.n_rows));
1102  Assert(B.n_columns == C.n_columns,
1103  ExcDimensionMismatch(B.n_columns, C.n_columns));
1104  Assert(this->column_block_size == C.row_block_size,
1105  ExcDimensionMismatch(this->column_block_size, C.row_block_size));
1106  Assert(this->row_block_size == B.row_block_size,
1107  ExcDimensionMismatch(this->row_block_size, B.row_block_size));
1108  Assert(B.column_block_size == C.column_block_size,
1109  ExcDimensionMismatch(B.column_block_size, C.column_block_size));
1110  }
1111  else if (!transpose_A && transpose_B)
1112  {
1113  Assert(this->n_columns == B.n_columns,
1114  ExcDimensionMismatch(this->n_columns, B.n_columns));
1115  Assert(this->n_rows == C.n_rows,
1116  ExcDimensionMismatch(this->n_rows, C.n_rows));
1117  Assert(B.n_rows == C.n_columns,
1118  ExcDimensionMismatch(B.n_rows, C.n_columns));
1119  Assert(this->row_block_size == C.row_block_size,
1120  ExcDimensionMismatch(this->row_block_size, C.row_block_size));
1121  Assert(this->column_block_size == B.column_block_size,
1122  ExcDimensionMismatch(this->column_block_size,
1123  B.column_block_size));
1124  Assert(B.row_block_size == C.column_block_size,
1125  ExcDimensionMismatch(B.row_block_size, C.column_block_size));
1126  }
1127  else // if (transpose_A && transpose_B)
1128  {
1129  Assert(this->n_rows == B.n_columns,
1130  ExcDimensionMismatch(this->n_rows, B.n_columns));
1131  Assert(this->n_columns == C.n_rows,
1132  ExcDimensionMismatch(this->n_columns, C.n_rows));
1133  Assert(B.n_rows == C.n_columns,
1134  ExcDimensionMismatch(B.n_rows, C.n_columns));
1135  Assert(this->column_block_size == C.row_block_size,
1136  ExcDimensionMismatch(this->row_block_size, C.row_block_size));
1137  Assert(this->row_block_size == B.column_block_size,
1138  ExcDimensionMismatch(this->column_block_size, B.row_block_size));
1139  Assert(B.row_block_size == C.column_block_size,
1140  ExcDimensionMismatch(B.column_block_size, C.column_block_size));
1141  }
1142 
1143  if (this->grid->mpi_process_is_active)
1144  {
1145  char trans_a = transpose_A ? 'T' : 'N';
1146  char trans_b = transpose_B ? 'T' : 'N';
1147 
1148  const NumberType *A_loc =
1149  (this->values.size() > 0) ? this->values.data() : nullptr;
1150  const NumberType *B_loc =
1151  (B.values.size() > 0) ? B.values.data() : nullptr;
1152  NumberType *C_loc = (C.values.size() > 0) ? C.values.data() : nullptr;
1153  int m = C.n_rows;
1154  int n = C.n_columns;
1155  int k = transpose_A ? this->n_rows : this->n_columns;
1156 
1157  pgemm(&trans_a,
1158  &trans_b,
1159  &m,
1160  &n,
1161  &k,
1162  &b,
1163  A_loc,
1164  &(this->submatrix_row),
1165  &(this->submatrix_column),
1166  this->descriptor,
1167  B_loc,
1168  &B.submatrix_row,
1169  &B.submatrix_column,
1170  B.descriptor,
1171  &c,
1172  C_loc,
1173  &C.submatrix_row,
1174  &C.submatrix_column,
1175  C.descriptor);
1176  }
1177  C.state = LAPACKSupport::matrix;
1178 }
1179 
1180 
1181 
1182 template <typename NumberType>
1183 void
1185  const ScaLAPACKMatrix<NumberType> &B,
1186  const bool adding) const
1187 {
1188  if (adding)
1189  mult(1., B, 1., C, false, false);
1190  else
1191  mult(1., B, 0, C, false, false);
1192 }
1193 
1194 
1195 
1196 template <typename NumberType>
1197 void
1199  const ScaLAPACKMatrix<NumberType> &B,
1200  const bool adding) const
1201 {
1202  if (adding)
1203  mult(1., B, 1., C, true, false);
1204  else
1205  mult(1., B, 0, C, true, false);
1206 }
1207 
1208 
1209 
1210 template <typename NumberType>
1211 void
1213  const ScaLAPACKMatrix<NumberType> &B,
1214  const bool adding) const
1215 {
1216  if (adding)
1217  mult(1., B, 1., C, false, true);
1218  else
1219  mult(1., B, 0, C, false, true);
1220 }
1221 
1222 
1223 
1224 template <typename NumberType>
1225 void
1227  const ScaLAPACKMatrix<NumberType> &B,
1228  const bool adding) const
1229 {
1230  if (adding)
1231  mult(1., B, 1., C, true, true);
1232  else
1233  mult(1., B, 0, C, true, true);
1234 }
1235 
1236 
1237 
1238 template <typename NumberType>
1239 void
1241 {
1242  Assert(
1243  n_columns == n_rows && property == LAPACKSupport::Property::symmetric,
1244  ExcMessage(
1245  "Cholesky factorization can be applied to symmetric matrices only."));
1246  Assert(state == LAPACKSupport::matrix,
1247  ExcMessage(
1248  "Matrix has to be in Matrix state before calling this function."));
1249 
1250  if (grid->mpi_process_is_active)
1251  {
1252  int info = 0;
1253  NumberType *A_loc = this->values.data();
1254  // pdpotrf_(&uplo,&n_columns,A_loc,&submatrix_row,&submatrix_column,descriptor,&info);
1255  ppotrf(&uplo,
1256  &n_columns,
1257  A_loc,
1258  &submatrix_row,
1259  &submatrix_column,
1260  descriptor,
1261  &info);
1262  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("ppotrf", info));
1263  }
1264  state = LAPACKSupport::cholesky;
1265  property = (uplo == 'L' ? LAPACKSupport::lower_triangular :
1267 }
1268 
1269 
1270 
1271 template <typename NumberType>
1272 void
1274 {
1275  Assert(state == LAPACKSupport::matrix,
1276  ExcMessage(
1277  "Matrix has to be in Matrix state before calling this function."));
1278 
1279  if (grid->mpi_process_is_active)
1280  {
1281  int info = 0;
1282  NumberType *A_loc = this->values.data();
1283 
1284  const int iarow = indxg2p_(&submatrix_row,
1285  &row_block_size,
1286  &(grid->this_process_row),
1287  &first_process_row,
1288  &(grid->n_process_rows));
1289  const int mp = numroc_(&n_rows,
1290  &row_block_size,
1291  &(grid->this_process_row),
1292  &iarow,
1293  &(grid->n_process_rows));
1294  ipiv.resize(mp + row_block_size);
1295 
1296  pgetrf(&n_rows,
1297  &n_columns,
1298  A_loc,
1299  &submatrix_row,
1300  &submatrix_column,
1301  descriptor,
1302  ipiv.data(),
1303  &info);
1304  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("pgetrf", info));
1305  }
1306  state = LAPACKSupport::State::lu;
1308 }
1309 
1310 
1311 
1312 template <typename NumberType>
1313 void
1315 {
1316  // Check whether matrix is symmetric and save flag.
1317  // If a Cholesky factorization has been applied previously,
1318  // the original matrix was symmetric.
1319  const bool is_symmetric = (property == LAPACKSupport::symmetric ||
1321 
1322  // Check whether matrix is triangular and is in an unfactorized state.
1323  const bool is_triangular = (property == LAPACKSupport::upper_triangular ||
1324  property == LAPACKSupport::lower_triangular) &&
1325  (state == LAPACKSupport::State::matrix ||
1327 
1328  if (is_triangular)
1329  {
1330  if (grid->mpi_process_is_active)
1331  {
1332  const char uploTriangular =
1333  property == LAPACKSupport::upper_triangular ? 'U' : 'L';
1334  const char diag = 'N';
1335  int info = 0;
1336  NumberType *A_loc = this->values.data();
1337  ptrtri(&uploTriangular,
1338  &diag,
1339  &n_columns,
1340  A_loc,
1341  &submatrix_row,
1342  &submatrix_column,
1343  descriptor,
1344  &info);
1345  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("ptrtri", info));
1346  // The inversion is stored in the same part as the triangular matrix,
1347  // so we don't need to re-set the property here.
1348  }
1349  }
1350  else
1351  {
1352  // Matrix is neither in Cholesky nor LU state.
1353  // Compute the required factorizations based on the property of the
1354  // matrix.
1355  if (!(state == LAPACKSupport::State::lu ||
1357  {
1358  if (is_symmetric)
1359  compute_cholesky_factorization();
1360  else
1361  compute_lu_factorization();
1362  }
1363  if (grid->mpi_process_is_active)
1364  {
1365  int info = 0;
1366  NumberType *A_loc = this->values.data();
1367 
1368  if (is_symmetric)
1369  {
1370  ppotri(&uplo,
1371  &n_columns,
1372  A_loc,
1373  &submatrix_row,
1374  &submatrix_column,
1375  descriptor,
1376  &info);
1377  AssertThrow(info == 0,
1378  LAPACKSupport::ExcErrorCode("ppotri", info));
1380  }
1381  else
1382  {
1383  int lwork = -1, liwork = -1;
1384  work.resize(1);
1385  iwork.resize(1);
1386 
1387  pgetri(&n_columns,
1388  A_loc,
1389  &submatrix_row,
1390  &submatrix_column,
1391  descriptor,
1392  ipiv.data(),
1393  work.data(),
1394  &lwork,
1395  iwork.data(),
1396  &liwork,
1397  &info);
1398 
1399  AssertThrow(info == 0,
1400  LAPACKSupport::ExcErrorCode("pgetri", info));
1401  lwork = static_cast<int>(work[0]);
1402  liwork = iwork[0];
1403  work.resize(lwork);
1404  iwork.resize(liwork);
1405 
1406  pgetri(&n_columns,
1407  A_loc,
1408  &submatrix_row,
1409  &submatrix_column,
1410  descriptor,
1411  ipiv.data(),
1412  work.data(),
1413  &lwork,
1414  iwork.data(),
1415  &liwork,
1416  &info);
1417 
1418  AssertThrow(info == 0,
1419  LAPACKSupport::ExcErrorCode("pgetri", info));
1420  }
1421  }
1422  }
1424 }
1425 
1426 
1427 
1428 template <typename NumberType>
1429 std::vector<NumberType>
1431  const std::pair<unsigned int, unsigned int> &index_limits,
1432  const bool compute_eigenvectors)
1433 {
1434  // check validity of index limits
1435  AssertIndexRange(index_limits.first, n_rows);
1436  AssertIndexRange(index_limits.second, n_rows);
1437 
1438  std::pair<unsigned int, unsigned int> idx =
1439  std::make_pair(std::min(index_limits.first, index_limits.second),
1440  std::max(index_limits.first, index_limits.second));
1441 
1442  // compute all eigenvalues/eigenvectors
1443  if (idx.first == 0 && idx.second == static_cast<unsigned int>(n_rows - 1))
1444  return eigenpairs_symmetric(compute_eigenvectors);
1445  else
1446  return eigenpairs_symmetric(compute_eigenvectors, idx);
1447 }
1448 
1449 
1450 
1451 template <typename NumberType>
1452 std::vector<NumberType>
1454  const std::pair<NumberType, NumberType> &value_limits,
1455  const bool compute_eigenvectors)
1456 {
1457  Assert(!std::isnan(value_limits.first),
1458  ExcMessage("value_limits.first is NaN"));
1459  Assert(!std::isnan(value_limits.second),
1460  ExcMessage("value_limits.second is NaN"));
1461 
1462  std::pair<unsigned int, unsigned int> indices =
1463  std::make_pair(numbers::invalid_unsigned_int,
1465 
1466  return eigenpairs_symmetric(compute_eigenvectors, indices, value_limits);
1467 }
1468 
1469 
1470 
1471 template <typename NumberType>
1472 std::vector<NumberType>
1474  const bool compute_eigenvectors,
1475  const std::pair<unsigned int, unsigned int> &eigenvalue_idx,
1476  const std::pair<NumberType, NumberType> & eigenvalue_limits)
1477 {
1478  Assert(state == LAPACKSupport::matrix,
1479  ExcMessage(
1480  "Matrix has to be in Matrix state before calling this function."));
1481  Assert(property == LAPACKSupport::symmetric,
1482  ExcMessage("Matrix has to be symmetric for this operation."));
1483 
1484  std::lock_guard<std::mutex> lock(mutex);
1485 
1486  const bool use_values = (std::isnan(eigenvalue_limits.first) ||
1487  std::isnan(eigenvalue_limits.second)) ?
1488  false :
1489  true;
1490  const bool use_indices =
1491  ((eigenvalue_idx.first == numbers::invalid_unsigned_int) ||
1492  (eigenvalue_idx.second == numbers::invalid_unsigned_int)) ?
1493  false :
1494  true;
1495 
1496  Assert(
1497  !(use_values && use_indices),
1498  ExcMessage(
1499  "Prescribing both the index and value range for the eigenvalues is ambiguous"));
1500 
1501  // if computation of eigenvectors is not required use a sufficiently small
1502  // distributed matrix
1503  std::unique_ptr<ScaLAPACKMatrix<NumberType>> eigenvectors =
1504  compute_eigenvectors ?
1505  std::make_unique<ScaLAPACKMatrix<NumberType>>(n_rows,
1506  grid,
1507  row_block_size) :
1508  std::make_unique<ScaLAPACKMatrix<NumberType>>(
1509  grid->n_process_rows, grid->n_process_columns, grid, 1, 1);
1510 
1511  eigenvectors->property = property;
1512  // number of eigenvalues to be returned from psyevx; upon successful exit ev
1513  // contains the m seclected eigenvalues in ascending order set to all
1514  // eigenvaleus in case we will be using psyev.
1515  int m = n_rows;
1516  std::vector<NumberType> ev(n_rows);
1517 
1518  if (grid->mpi_process_is_active)
1519  {
1520  int info = 0;
1521  /*
1522  * for jobz==N only eigenvalues are computed, for jobz='V' also the
1523  * eigenvectors of the matrix are computed
1524  */
1525  char jobz = compute_eigenvectors ? 'V' : 'N';
1526  char range = 'A';
1527  // default value is to compute all eigenvalues and optionally eigenvectors
1528  bool all_eigenpairs = true;
1529  NumberType vl = NumberType(), vu = NumberType();
1530  int il = 1, iu = 1;
1531  // number of eigenvectors to be returned;
1532  // upon successful exit the first m=nz columns contain the selected
1533  // eigenvectors (only if jobz=='V')
1534  int nz = 0;
1535  NumberType abstol = NumberType();
1536 
1537  // orfac decides which eigenvectors should be reorthogonalized
1538  // see
1539  // http://www.netlib.org/scalapack/explore-html/df/d1a/pdsyevx_8f_source.html
1540  // for explanation to keeps simple no reorthogonalized will be done by
1541  // setting orfac to 0
1542  NumberType orfac = 0;
1543  // contains the indices of eigenvectors that failed to converge
1544  std::vector<int> ifail;
1545  // This array contains indices of eigenvectors corresponding to
1546  // a cluster of eigenvalues that could not be reorthogonalized
1547  // due to insufficient workspace
1548  // see
1549  // http://www.netlib.org/scalapack/explore-html/df/d1a/pdsyevx_8f_source.html
1550  // for explanation
1551  std::vector<int> iclustr;
1552  // This array contains the gap between eigenvalues whose
1553  // eigenvectors could not be reorthogonalized.
1554  // see
1555  // http://www.netlib.org/scalapack/explore-html/df/d1a/pdsyevx_8f_source.html
1556  // for explanation
1557  std::vector<NumberType> gap(n_local_rows * n_local_columns);
1558 
1559  // index range for eigenvalues is not specified
1560  if (!use_indices)
1561  {
1562  // interval for eigenvalues is not specified and consequently all
1563  // eigenvalues/eigenpairs will be computed
1564  if (!use_values)
1565  {
1566  range = 'A';
1567  all_eigenpairs = true;
1568  }
1569  else
1570  {
1571  range = 'V';
1572  all_eigenpairs = false;
1573  vl = std::min(eigenvalue_limits.first, eigenvalue_limits.second);
1574  vu = std::max(eigenvalue_limits.first, eigenvalue_limits.second);
1575  }
1576  }
1577  else
1578  {
1579  range = 'I';
1580  all_eigenpairs = false;
1581  // as Fortran starts counting/indexing from 1 unlike C/C++, where it
1582  // starts from 0
1583  il = std::min(eigenvalue_idx.first, eigenvalue_idx.second) + 1;
1584  iu = std::max(eigenvalue_idx.first, eigenvalue_idx.second) + 1;
1585  }
1586  NumberType *A_loc = this->values.data();
1587  /*
1588  * by setting lwork to -1 a workspace query for optimal length of work is
1589  * performed
1590  */
1591  int lwork = -1;
1592  int liwork = -1;
1593  NumberType *eigenvectors_loc =
1594  (compute_eigenvectors ? eigenvectors->values.data() : nullptr);
1595  work.resize(1);
1596  iwork.resize(1);
1597 
1598  if (all_eigenpairs)
1599  {
1600  psyev(&jobz,
1601  &uplo,
1602  &n_rows,
1603  A_loc,
1604  &submatrix_row,
1605  &submatrix_column,
1606  descriptor,
1607  ev.data(),
1608  eigenvectors_loc,
1609  &eigenvectors->submatrix_row,
1610  &eigenvectors->submatrix_column,
1611  eigenvectors->descriptor,
1612  work.data(),
1613  &lwork,
1614  &info);
1615  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("psyev", info));
1616  }
1617  else
1618  {
1619  char cmach = compute_eigenvectors ? 'U' : 'S';
1620  plamch(&(this->grid->blacs_context), &cmach, abstol);
1621  abstol *= 2;
1622  ifail.resize(n_rows);
1623  iclustr.resize(2 * grid->n_process_rows * grid->n_process_columns);
1624  gap.resize(grid->n_process_rows * grid->n_process_columns);
1625 
1626  psyevx(&jobz,
1627  &range,
1628  &uplo,
1629  &n_rows,
1630  A_loc,
1631  &submatrix_row,
1632  &submatrix_column,
1633  descriptor,
1634  &vl,
1635  &vu,
1636  &il,
1637  &iu,
1638  &abstol,
1639  &m,
1640  &nz,
1641  ev.data(),
1642  &orfac,
1643  eigenvectors_loc,
1644  &eigenvectors->submatrix_row,
1645  &eigenvectors->submatrix_column,
1646  eigenvectors->descriptor,
1647  work.data(),
1648  &lwork,
1649  iwork.data(),
1650  &liwork,
1651  ifail.data(),
1652  iclustr.data(),
1653  gap.data(),
1654  &info);
1655  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("psyevx", info));
1656  }
1657  lwork = static_cast<int>(work[0]);
1658  work.resize(lwork);
1659 
1660  if (all_eigenpairs)
1661  {
1662  psyev(&jobz,
1663  &uplo,
1664  &n_rows,
1665  A_loc,
1666  &submatrix_row,
1667  &submatrix_column,
1668  descriptor,
1669  ev.data(),
1670  eigenvectors_loc,
1671  &eigenvectors->submatrix_row,
1672  &eigenvectors->submatrix_column,
1673  eigenvectors->descriptor,
1674  work.data(),
1675  &lwork,
1676  &info);
1677 
1678  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("psyev", info));
1679  }
1680  else
1681  {
1682  liwork = iwork[0];
1683  AssertThrow(liwork > 0, ExcInternalError());
1684  iwork.resize(liwork);
1685 
1686  psyevx(&jobz,
1687  &range,
1688  &uplo,
1689  &n_rows,
1690  A_loc,
1691  &submatrix_row,
1692  &submatrix_column,
1693  descriptor,
1694  &vl,
1695  &vu,
1696  &il,
1697  &iu,
1698  &abstol,
1699  &m,
1700  &nz,
1701  ev.data(),
1702  &orfac,
1703  eigenvectors_loc,
1704  &eigenvectors->submatrix_row,
1705  &eigenvectors->submatrix_column,
1706  eigenvectors->descriptor,
1707  work.data(),
1708  &lwork,
1709  iwork.data(),
1710  &liwork,
1711  ifail.data(),
1712  iclustr.data(),
1713  gap.data(),
1714  &info);
1715 
1716  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("psyevx", info));
1717  }
1718  // if eigenvectors are queried copy eigenvectors to original matrix
1719  // as the temporary matrix eigenvectors has identical dimensions and
1720  // block-cyclic distribution we simply swap the local array
1721  if (compute_eigenvectors)
1722  this->values.swap(eigenvectors->values);
1723 
1724  // adapt the size of ev to fit m upon return
1725  while (ev.size() > static_cast<size_type>(m))
1726  ev.pop_back();
1727  }
1728  /*
1729  * send number of computed eigenvalues to inactive processes
1730  */
1731  grid->send_to_inactive(&m, 1);
1732 
1733  /*
1734  * inactive processes have to resize array of eigenvalues
1735  */
1736  if (!grid->mpi_process_is_active)
1737  ev.resize(m);
1738  /*
1739  * send the eigenvalues to processors not being part of the process grid
1740  */
1741  grid->send_to_inactive(ev.data(), ev.size());
1742 
1743  /*
1744  * if only eigenvalues are queried the content of the matrix will be destroyed
1745  * if the eigenpairs are queried matrix A on exit stores the eigenvectors in
1746  * the columns
1747  */
1748  if (compute_eigenvectors)
1749  {
1752  }
1753  else
1754  state = LAPACKSupport::unusable;
1755 
1756  return ev;
1757 }
1758 
1759 
1760 
1761 template <typename NumberType>
1762 std::vector<NumberType>
1764  const std::pair<unsigned int, unsigned int> &index_limits,
1765  const bool compute_eigenvectors)
1766 {
1767  // Check validity of index limits.
1768  AssertIndexRange(index_limits.first, static_cast<unsigned int>(n_rows));
1769  AssertIndexRange(index_limits.second, static_cast<unsigned int>(n_rows));
1770 
1771  const std::pair<unsigned int, unsigned int> idx =
1772  std::make_pair(std::min(index_limits.first, index_limits.second),
1773  std::max(index_limits.first, index_limits.second));
1774 
1775  // Compute all eigenvalues/eigenvectors.
1776  if (idx.first == 0 && idx.second == static_cast<unsigned int>(n_rows - 1))
1777  return eigenpairs_symmetric_MRRR(compute_eigenvectors);
1778  else
1779  return eigenpairs_symmetric_MRRR(compute_eigenvectors, idx);
1780 }
1781 
1782 
1783 
1784 template <typename NumberType>
1785 std::vector<NumberType>
1787  const std::pair<NumberType, NumberType> &value_limits,
1788  const bool compute_eigenvectors)
1789 {
1790  AssertIsFinite(value_limits.first);
1791  AssertIsFinite(value_limits.second);
1792 
1793  const std::pair<unsigned int, unsigned int> indices =
1794  std::make_pair(numbers::invalid_unsigned_int,
1796 
1797  return eigenpairs_symmetric_MRRR(compute_eigenvectors, indices, value_limits);
1798 }
1799 
1800 
1801 
1802 template <typename NumberType>
1803 std::vector<NumberType>
1805  const bool compute_eigenvectors,
1806  const std::pair<unsigned int, unsigned int> &eigenvalue_idx,
1807  const std::pair<NumberType, NumberType> & eigenvalue_limits)
1808 {
1809  Assert(state == LAPACKSupport::matrix,
1810  ExcMessage(
1811  "Matrix has to be in Matrix state before calling this function."));
1812  Assert(property == LAPACKSupport::symmetric,
1813  ExcMessage("Matrix has to be symmetric for this operation."));
1814 
1815  std::lock_guard<std::mutex> lock(mutex);
1816 
1817  const bool use_values = (std::isnan(eigenvalue_limits.first) ||
1818  std::isnan(eigenvalue_limits.second)) ?
1819  false :
1820  true;
1821  const bool use_indices =
1822  ((eigenvalue_idx.first == numbers::invalid_unsigned_int) ||
1823  (eigenvalue_idx.second == numbers::invalid_unsigned_int)) ?
1824  false :
1825  true;
1826 
1827  Assert(
1828  !(use_values && use_indices),
1829  ExcMessage(
1830  "Prescribing both the index and value range for the eigenvalues is ambiguous"));
1831 
1832  // If computation of eigenvectors is not required, use a sufficiently small
1833  // distributed matrix.
1834  std::unique_ptr<ScaLAPACKMatrix<NumberType>> eigenvectors =
1835  compute_eigenvectors ?
1836  std::make_unique<ScaLAPACKMatrix<NumberType>>(n_rows,
1837  grid,
1838  row_block_size) :
1839  std::make_unique<ScaLAPACKMatrix<NumberType>>(
1840  grid->n_process_rows, grid->n_process_columns, grid, 1, 1);
1841 
1842  eigenvectors->property = property;
1843  // Number of eigenvalues to be returned from psyevr; upon successful exit ev
1844  // contains the m seclected eigenvalues in ascending order.
1845  int m = n_rows;
1846  std::vector<NumberType> ev(n_rows);
1847 
1848  // Number of eigenvectors to be returned;
1849  // Upon successful exit the first m=nz columns contain the selected
1850  // eigenvectors (only if jobz=='V').
1851  int nz = 0;
1852 
1853  if (grid->mpi_process_is_active)
1854  {
1855  int info = 0;
1856  /*
1857  * For jobz==N only eigenvalues are computed, for jobz='V' also the
1858  * eigenvectors of the matrix are computed.
1859  */
1860  char jobz = compute_eigenvectors ? 'V' : 'N';
1861  // Default value is to compute all eigenvalues and optionally
1862  // eigenvectors.
1863  char range = 'A';
1864  NumberType vl = NumberType(), vu = NumberType();
1865  int il = 1, iu = 1;
1866 
1867  // Index range for eigenvalues is not specified.
1868  if (!use_indices)
1869  {
1870  // Interval for eigenvalues is not specified and consequently all
1871  // eigenvalues/eigenpairs will be computed.
1872  if (!use_values)
1873  {
1874  range = 'A';
1875  }
1876  else
1877  {
1878  range = 'V';
1879  vl = std::min(eigenvalue_limits.first, eigenvalue_limits.second);
1880  vu = std::max(eigenvalue_limits.first, eigenvalue_limits.second);
1881  }
1882  }
1883  else
1884  {
1885  range = 'I';
1886  // As Fortran starts counting/indexing from 1 unlike C/C++, where it
1887  // starts from 0.
1888  il = std::min(eigenvalue_idx.first, eigenvalue_idx.second) + 1;
1889  iu = std::max(eigenvalue_idx.first, eigenvalue_idx.second) + 1;
1890  }
1891  NumberType *A_loc = this->values.data();
1892 
1893  /*
1894  * By setting lwork to -1 a workspace query for optimal length of work is
1895  * performed.
1896  */
1897  int lwork = -1;
1898  int liwork = -1;
1899  NumberType *eigenvectors_loc =
1900  (compute_eigenvectors ? eigenvectors->values.data() : nullptr);
1901  work.resize(1);
1902  iwork.resize(1);
1903 
1904  psyevr(&jobz,
1905  &range,
1906  &uplo,
1907  &n_rows,
1908  A_loc,
1909  &submatrix_row,
1910  &submatrix_column,
1911  descriptor,
1912  &vl,
1913  &vu,
1914  &il,
1915  &iu,
1916  &m,
1917  &nz,
1918  ev.data(),
1919  eigenvectors_loc,
1920  &eigenvectors->submatrix_row,
1921  &eigenvectors->submatrix_column,
1922  eigenvectors->descriptor,
1923  work.data(),
1924  &lwork,
1925  iwork.data(),
1926  &liwork,
1927  &info);
1928 
1929  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("psyevr", info));
1930 
1931  lwork = static_cast<int>(work[0]);
1932  work.resize(lwork);
1933  liwork = iwork[0];
1934  iwork.resize(liwork);
1935 
1936  psyevr(&jobz,
1937  &range,
1938  &uplo,
1939  &n_rows,
1940  A_loc,
1941  &submatrix_row,
1942  &submatrix_column,
1943  descriptor,
1944  &vl,
1945  &vu,
1946  &il,
1947  &iu,
1948  &m,
1949  &nz,
1950  ev.data(),
1951  eigenvectors_loc,
1952  &eigenvectors->submatrix_row,
1953  &eigenvectors->submatrix_column,
1954  eigenvectors->descriptor,
1955  work.data(),
1956  &lwork,
1957  iwork.data(),
1958  &liwork,
1959  &info);
1960 
1961  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("psyevr", info));
1962 
1963  if (compute_eigenvectors)
1964  AssertThrow(
1965  m == nz,
1966  ExcMessage(
1967  "psyevr failed to compute all eigenvectors for the selected eigenvalues"));
1968 
1969  // If eigenvectors are queried, copy eigenvectors to original matrix.
1970  // As the temporary matrix eigenvectors has identical dimensions and
1971  // block-cyclic distribution we simply swap the local array.
1972  if (compute_eigenvectors)
1973  this->values.swap(eigenvectors->values);
1974 
1975  // Adapt the size of ev to fit m upon return.
1976  while (ev.size() > static_cast<size_type>(m))
1977  ev.pop_back();
1978  }
1979  /*
1980  * Send number of computed eigenvalues to inactive processes.
1981  */
1982  grid->send_to_inactive(&m, 1);
1983 
1984  /*
1985  * Inactive processes have to resize array of eigenvalues.
1986  */
1987  if (!grid->mpi_process_is_active)
1988  ev.resize(m);
1989  /*
1990  * Send the eigenvalues to processors not being part of the process grid.
1991  */
1992  grid->send_to_inactive(ev.data(), ev.size());
1993 
1994  /*
1995  * If only eigenvalues are queried, the content of the matrix will be
1996  * destroyed. If the eigenpairs are queried, matrix A on exit stores the
1997  * eigenvectors in the columns.
1998  */
1999  if (compute_eigenvectors)
2000  {
2003  }
2004  else
2005  state = LAPACKSupport::unusable;
2006 
2007  return ev;
2008 }
2009 
2010 
2011 
2012 template <typename NumberType>
2013 std::vector<NumberType>
2016 {
2017  Assert(state == LAPACKSupport::matrix,
2018  ExcMessage(
2019  "Matrix has to be in Matrix state before calling this function."));
2020  Assert(row_block_size == column_block_size,
2021  ExcDimensionMismatch(row_block_size, column_block_size));
2022 
2023  const bool left_singluar_vectors = (U != nullptr) ? true : false;
2024  const bool right_singluar_vectors = (VT != nullptr) ? true : false;
2025 
2026  if (left_singluar_vectors)
2027  {
2028  Assert(n_rows == U->n_rows, ExcDimensionMismatch(n_rows, U->n_rows));
2029  Assert(U->n_rows == U->n_columns,
2030  ExcDimensionMismatch(U->n_rows, U->n_columns));
2031  Assert(row_block_size == U->row_block_size,
2032  ExcDimensionMismatch(row_block_size, U->row_block_size));
2033  Assert(column_block_size == U->column_block_size,
2034  ExcDimensionMismatch(column_block_size, U->column_block_size));
2035  Assert(grid->blacs_context == U->grid->blacs_context,
2036  ExcDimensionMismatch(grid->blacs_context, U->grid->blacs_context));
2037  }
2038  if (right_singluar_vectors)
2039  {
2040  Assert(n_columns == VT->n_rows,
2041  ExcDimensionMismatch(n_columns, VT->n_rows));
2042  Assert(VT->n_rows == VT->n_columns,
2044  Assert(row_block_size == VT->row_block_size,
2045  ExcDimensionMismatch(row_block_size, VT->row_block_size));
2046  Assert(column_block_size == VT->column_block_size,
2047  ExcDimensionMismatch(column_block_size, VT->column_block_size));
2048  Assert(grid->blacs_context == VT->grid->blacs_context,
2049  ExcDimensionMismatch(grid->blacs_context,
2050  VT->grid->blacs_context));
2051  }
2052  std::lock_guard<std::mutex> lock(mutex);
2053 
2054  std::vector<NumberType> sv(std::min(n_rows, n_columns));
2055 
2056  if (grid->mpi_process_is_active)
2057  {
2058  char jobu = left_singluar_vectors ? 'V' : 'N';
2059  char jobvt = right_singluar_vectors ? 'V' : 'N';
2060  NumberType *A_loc = this->values.data();
2061  NumberType *U_loc = left_singluar_vectors ? U->values.data() : nullptr;
2062  NumberType *VT_loc = right_singluar_vectors ? VT->values.data() : nullptr;
2063  int info = 0;
2064  /*
2065  * by setting lwork to -1 a workspace query for optimal length of work is
2066  * performed
2067  */
2068  int lwork = -1;
2069  work.resize(1);
2070 
2071  pgesvd(&jobu,
2072  &jobvt,
2073  &n_rows,
2074  &n_columns,
2075  A_loc,
2076  &submatrix_row,
2077  &submatrix_column,
2078  descriptor,
2079  &*sv.begin(),
2080  U_loc,
2081  &U->submatrix_row,
2082  &U->submatrix_column,
2083  U->descriptor,
2084  VT_loc,
2085  &VT->submatrix_row,
2086  &VT->submatrix_column,
2087  VT->descriptor,
2088  work.data(),
2089  &lwork,
2090  &info);
2091  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("pgesvd", info));
2092 
2093  lwork = static_cast<int>(work[0]);
2094  work.resize(lwork);
2095 
2096  pgesvd(&jobu,
2097  &jobvt,
2098  &n_rows,
2099  &n_columns,
2100  A_loc,
2101  &submatrix_row,
2102  &submatrix_column,
2103  descriptor,
2104  &*sv.begin(),
2105  U_loc,
2106  &U->submatrix_row,
2107  &U->submatrix_column,
2108  U->descriptor,
2109  VT_loc,
2110  &VT->submatrix_row,
2111  &VT->submatrix_column,
2112  VT->descriptor,
2113  work.data(),
2114  &lwork,
2115  &info);
2116  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("pgesvd", info));
2117  }
2118 
2119  /*
2120  * send the singular values to processors not being part of the process grid
2121  */
2122  grid->send_to_inactive(sv.data(), sv.size());
2123 
2126 
2127  return sv;
2128 }
2129 
2130 
2131 
2132 template <typename NumberType>
2133 void
2135  const bool transpose)
2136 {
2137  Assert(grid == B.grid,
2138  ExcMessage("The matrices A and B need to have the same process grid"));
2139  Assert(state == LAPACKSupport::matrix,
2140  ExcMessage(
2141  "Matrix has to be in Matrix state before calling this function."));
2143  ExcMessage(
2144  "Matrix B has to be in Matrix state before calling this function."));
2145 
2146  if (transpose)
2147  {
2148  Assert(n_columns == B.n_rows, ExcDimensionMismatch(n_columns, B.n_rows));
2149  }
2150  else
2151  {
2152  Assert(n_rows == B.n_rows, ExcDimensionMismatch(n_rows, B.n_rows));
2153  }
2154 
2155  // see
2156  // https://www.ibm.com/support/knowledgecenter/en/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_lgels.htm
2157  Assert(row_block_size == column_block_size,
2158  ExcMessage(
2159  "Use identical block sizes for rows and columns of matrix A"));
2161  ExcMessage(
2162  "Use identical block sizes for rows and columns of matrix B"));
2163  Assert(row_block_size == B.row_block_size,
2164  ExcMessage(
2165  "Use identical block-cyclic distribution for matrices A and B"));
2166 
2167  std::lock_guard<std::mutex> lock(mutex);
2168 
2169  if (grid->mpi_process_is_active)
2170  {
2171  char trans = transpose ? 'T' : 'N';
2172  NumberType *A_loc = this->values.data();
2173  NumberType *B_loc = B.values.data();
2174  int info = 0;
2175  /*
2176  * by setting lwork to -1 a workspace query for optimal length of work is
2177  * performed
2178  */
2179  int lwork = -1;
2180  work.resize(1);
2181 
2182  pgels(&trans,
2183  &n_rows,
2184  &n_columns,
2185  &B.n_columns,
2186  A_loc,
2187  &submatrix_row,
2188  &submatrix_column,
2189  descriptor,
2190  B_loc,
2191  &B.submatrix_row,
2192  &B.submatrix_column,
2193  B.descriptor,
2194  work.data(),
2195  &lwork,
2196  &info);
2197  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("pgels", info));
2198 
2199  lwork = static_cast<int>(work[0]);
2200  work.resize(lwork);
2201 
2202  pgels(&trans,
2203  &n_rows,
2204  &n_columns,
2205  &B.n_columns,
2206  A_loc,
2207  &submatrix_row,
2208  &submatrix_column,
2209  descriptor,
2210  B_loc,
2211  &B.submatrix_row,
2212  &B.submatrix_column,
2213  B.descriptor,
2214  work.data(),
2215  &lwork,
2216  &info);
2217  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("pgels", info));
2218  }
2220 }
2221 
2222 
2223 
2224 template <typename NumberType>
2225 unsigned int
2227 {
2228  Assert(state == LAPACKSupport::matrix,
2229  ExcMessage(
2230  "Matrix has to be in Matrix state before calling this function."));
2231  Assert(row_block_size == column_block_size,
2232  ExcMessage(
2233  "Use identical block sizes for rows and columns of matrix A"));
2234  Assert(
2235  ratio > 0. && ratio < 1.,
2236  ExcMessage(
2237  "input parameter ratio has to be larger than zero and smaller than 1"));
2238 
2240  n_rows,
2241  grid,
2242  row_block_size,
2243  row_block_size,
2245  ScaLAPACKMatrix<NumberType> VT(n_columns,
2246  n_columns,
2247  grid,
2248  row_block_size,
2249  row_block_size,
2251  std::vector<NumberType> sv = this->compute_SVD(&U, &VT);
2253  ExcMessage("Matrix has rank 0"));
2254 
2255  // Get number of singular values fulfilling the following: sv[i] > sv[0] *
2256  // ratio Obviously, 0-th element already satisfies sv[0] > sv[0] * ratio The
2257  // singular values in sv are ordered by descending value so we break out of
2258  // the loop if a singular value is smaller than sv[0] * ratio.
2259  unsigned int n_sv = 1;
2260  std::vector<NumberType> inv_sigma;
2261  inv_sigma.push_back(1 / sv[0]);
2262 
2263  for (unsigned int i = 1; i < sv.size(); ++i)
2264  if (sv[i] > sv[0] * ratio)
2265  {
2266  ++n_sv;
2267  inv_sigma.push_back(1 / sv[i]);
2268  }
2269  else
2270  break;
2271 
2272  // For the matrix multiplication we use only the columns of U and rows of VT
2273  // which are associated with singular values larger than the limit. That saves
2274  // computational time for matrices with rank significantly smaller than
2275  // min(n_rows,n_columns)
2276  ScaLAPACKMatrix<NumberType> U_R(n_rows,
2277  n_sv,
2278  grid,
2279  row_block_size,
2280  row_block_size,
2282  ScaLAPACKMatrix<NumberType> VT_R(n_sv,
2283  n_columns,
2284  grid,
2285  row_block_size,
2286  row_block_size,
2288  U.copy_to(U_R,
2289  std::make_pair(0, 0),
2290  std::make_pair(0, 0),
2291  std::make_pair(n_rows, n_sv));
2292  VT.copy_to(VT_R,
2293  std::make_pair(0, 0),
2294  std::make_pair(0, 0),
2295  std::make_pair(n_sv, n_columns));
2296 
2297  VT_R.scale_rows(inv_sigma);
2298  this->reinit(n_columns,
2299  n_rows,
2300  this->grid,
2301  row_block_size,
2302  column_block_size,
2304  VT_R.mult(1, U_R, 0, *this, true, true);
2306  return n_sv;
2307 }
2308 
2309 
2310 
2311 template <typename NumberType>
2312 NumberType
2314  const NumberType a_norm) const
2315 {
2317  ExcMessage(
2318  "Matrix has to be in Cholesky state before calling this function."));
2319  std::lock_guard<std::mutex> lock(mutex);
2320  NumberType rcond = 0.;
2321 
2322  if (grid->mpi_process_is_active)
2323  {
2324  int liwork = n_local_rows;
2325  iwork.resize(liwork);
2326 
2327  int info = 0;
2328  const NumberType *A_loc = this->values.data();
2329 
2330  // by setting lwork to -1 a workspace query for optimal length of work is
2331  // performed
2332  int lwork = -1;
2333  work.resize(1);
2334  ppocon(&uplo,
2335  &n_columns,
2336  A_loc,
2337  &submatrix_row,
2338  &submatrix_column,
2339  descriptor,
2340  &a_norm,
2341  &rcond,
2342  work.data(),
2343  &lwork,
2344  iwork.data(),
2345  &liwork,
2346  &info);
2347  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("pdpocon", info));
2348  lwork = static_cast<int>(std::ceil(work[0]));
2349  work.resize(lwork);
2350 
2351  // now the actual run:
2352  ppocon(&uplo,
2353  &n_columns,
2354  A_loc,
2355  &submatrix_row,
2356  &submatrix_column,
2357  descriptor,
2358  &a_norm,
2359  &rcond,
2360  work.data(),
2361  &lwork,
2362  iwork.data(),
2363  &liwork,
2364  &info);
2365  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("pdpocon", info));
2366  }
2367  grid->send_to_inactive(&rcond);
2368  return rcond;
2369 }
2370 
2371 
2372 
2373 template <typename NumberType>
2374 NumberType
2376 {
2377  const char type('O');
2378 
2379  if (property == LAPACKSupport::symmetric)
2380  return norm_symmetric(type);
2381  else
2382  return norm_general(type);
2383 }
2384 
2385 
2386 
2387 template <typename NumberType>
2388 NumberType
2390 {
2391  const char type('I');
2392 
2393  if (property == LAPACKSupport::symmetric)
2394  return norm_symmetric(type);
2395  else
2396  return norm_general(type);
2397 }
2398 
2399 
2400 
2401 template <typename NumberType>
2402 NumberType
2404 {
2405  const char type('F');
2406 
2407  if (property == LAPACKSupport::symmetric)
2408  return norm_symmetric(type);
2409  else
2410  return norm_general(type);
2411 }
2412 
2413 
2414 
2415 template <typename NumberType>
2416 NumberType
2418 {
2419  Assert(state == LAPACKSupport::matrix ||
2421  ExcMessage("norms can be called in matrix state only."));
2422  std::lock_guard<std::mutex> lock(mutex);
2423  NumberType res = 0.;
2424 
2425  if (grid->mpi_process_is_active)
2426  {
2427  const int iarow = indxg2p_(&submatrix_row,
2428  &row_block_size,
2429  &(grid->this_process_row),
2430  &first_process_row,
2431  &(grid->n_process_rows));
2432  const int iacol = indxg2p_(&submatrix_column,
2433  &column_block_size,
2434  &(grid->this_process_column),
2435  &first_process_column,
2436  &(grid->n_process_columns));
2437  const int mp0 = numroc_(&n_rows,
2438  &row_block_size,
2439  &(grid->this_process_row),
2440  &iarow,
2441  &(grid->n_process_rows));
2442  const int nq0 = numroc_(&n_columns,
2443  &column_block_size,
2444  &(grid->this_process_column),
2445  &iacol,
2446  &(grid->n_process_columns));
2447 
2448  // type='M': compute largest absolute value
2449  // type='F' || type='E': compute Frobenius norm
2450  // type='0' || type='1': compute infinity norm
2451  int lwork = 0; // for type == 'M' || type == 'F' || type == 'E'
2452  if (type == 'O' || type == '1')
2453  lwork = nq0;
2454  else if (type == 'I')
2455  lwork = mp0;
2456 
2457  work.resize(lwork);
2458  const NumberType *A_loc = this->values.begin();
2459  res = plange(&type,
2460  &n_rows,
2461  &n_columns,
2462  A_loc,
2463  &submatrix_row,
2464  &submatrix_column,
2465  descriptor,
2466  work.data());
2467  }
2468  grid->send_to_inactive(&res);
2469  return res;
2470 }
2471 
2472 
2473 
2474 template <typename NumberType>
2475 NumberType
2477 {
2478  Assert(state == LAPACKSupport::matrix ||
2480  ExcMessage("norms can be called in matrix state only."));
2481  Assert(property == LAPACKSupport::symmetric,
2482  ExcMessage("Matrix has to be symmetric for this operation."));
2483  std::lock_guard<std::mutex> lock(mutex);
2484  NumberType res = 0.;
2485 
2486  if (grid->mpi_process_is_active)
2487  {
2488  // int IROFFA = MOD( IA-1, MB_A )
2489  // int ICOFFA = MOD( JA-1, NB_A )
2490  const int lcm =
2491  ilcm_(&(grid->n_process_rows), &(grid->n_process_columns));
2492  const int v2 = lcm / (grid->n_process_rows);
2493 
2494  const int IAROW = indxg2p_(&submatrix_row,
2495  &row_block_size,
2496  &(grid->this_process_row),
2497  &first_process_row,
2498  &(grid->n_process_rows));
2499  const int IACOL = indxg2p_(&submatrix_column,
2500  &column_block_size,
2501  &(grid->this_process_column),
2502  &first_process_column,
2503  &(grid->n_process_columns));
2504  const int Np0 = numroc_(&n_columns /*+IROFFA*/,
2505  &row_block_size,
2506  &(grid->this_process_row),
2507  &IAROW,
2508  &(grid->n_process_rows));
2509  const int Nq0 = numroc_(&n_columns /*+ICOFFA*/,
2510  &column_block_size,
2511  &(grid->this_process_column),
2512  &IACOL,
2513  &(grid->n_process_columns));
2514 
2515  const int v1 = iceil_(&Np0, &row_block_size);
2516  const int ldw = (n_local_rows == n_local_columns) ?
2517  0 :
2518  row_block_size * iceil_(&v1, &v2);
2519 
2520  const int lwork =
2521  (type == 'M' || type == 'F' || type == 'E') ? 0 : 2 * Nq0 + Np0 + ldw;
2522  work.resize(lwork);
2523  const NumberType *A_loc = this->values.begin();
2524  res = plansy(&type,
2525  &uplo,
2526  &n_columns,
2527  A_loc,
2528  &submatrix_row,
2529  &submatrix_column,
2530  descriptor,
2531  work.data());
2532  }
2533  grid->send_to_inactive(&res);
2534  return res;
2535 }
2536 
2537 
2538 
2539 # ifdef DEAL_II_WITH_HDF5
2540 namespace internal
2541 {
2542  namespace
2543  {
2544  void
2545  create_HDF5_state_enum_id(hid_t &state_enum_id)
2546  {
2547  // create HDF5 enum type for LAPACKSupport::State
2549  state_enum_id = H5Tcreate(H5T_ENUM, sizeof(LAPACKSupport::State));
2551  herr_t status = H5Tenum_insert(state_enum_id, "cholesky", &val);
2552  AssertThrow(status >= 0, ExcInternalError());
2554  status = H5Tenum_insert(state_enum_id, "eigenvalues", &val);
2555  AssertThrow(status >= 0, ExcInternalError());
2557  status = H5Tenum_insert(state_enum_id, "inverse_matrix", &val);
2558  AssertThrow(status >= 0, ExcInternalError());
2560  status = H5Tenum_insert(state_enum_id, "inverse_svd", &val);
2561  AssertThrow(status >= 0, ExcInternalError());
2563  status = H5Tenum_insert(state_enum_id, "lu", &val);
2564  AssertThrow(status >= 0, ExcInternalError());
2566  status = H5Tenum_insert(state_enum_id, "matrix", &val);
2567  AssertThrow(status >= 0, ExcInternalError());
2569  status = H5Tenum_insert(state_enum_id, "svd", &val);
2570  AssertThrow(status >= 0, ExcInternalError());
2572  status = H5Tenum_insert(state_enum_id, "unusable", &val);
2573  AssertThrow(status >= 0, ExcInternalError());
2574  }
2575 
2576  void
2577  create_HDF5_property_enum_id(hid_t &property_enum_id)
2578  {
2579  // create HDF5 enum type for LAPACKSupport::Property
2580  property_enum_id = H5Tcreate(H5T_ENUM, sizeof(LAPACKSupport::Property));
2582  herr_t status = H5Tenum_insert(property_enum_id, "diagonal", &prop);
2583  AssertThrow(status >= 0, ExcInternalError());
2585  status = H5Tenum_insert(property_enum_id, "general", &prop);
2586  AssertThrow(status >= 0, ExcInternalError());
2588  status = H5Tenum_insert(property_enum_id, "hessenberg", &prop);
2589  AssertThrow(status >= 0, ExcInternalError());
2591  status = H5Tenum_insert(property_enum_id, "lower_triangular", &prop);
2592  AssertThrow(status >= 0, ExcInternalError());
2594  status = H5Tenum_insert(property_enum_id, "symmetric", &prop);
2595  AssertThrow(status >= 0, ExcInternalError());
2597  status = H5Tenum_insert(property_enum_id, "upper_triangular", &prop);
2598  AssertThrow(status >= 0, ExcInternalError());
2599  }
2600  } // namespace
2601 } // namespace internal
2602 # endif
2603 
2604 
2605 
2606 template <typename NumberType>
2607 void
2609  const std::string & filename,
2610  const std::pair<unsigned int, unsigned int> &chunk_size) const
2611 {
2612 # ifndef DEAL_II_WITH_HDF5
2613  (void)filename;
2614  (void)chunk_size;
2615  AssertThrow(false, ExcNeedsHDF5());
2616 # else
2617 
2618  std::pair<unsigned int, unsigned int> chunks_size_ = chunk_size;
2619 
2620  if (chunks_size_.first == numbers::invalid_unsigned_int ||
2621  chunks_size_.second == numbers::invalid_unsigned_int)
2622  {
2623  // default: store the matrix in chunks of columns
2624  chunks_size_.first = n_rows;
2625  chunks_size_.second = 1;
2626  }
2627  Assert(chunks_size_.first > 0,
2628  ExcMessage("The row chunk size must be larger than 0."));
2629  AssertIndexRange(chunks_size_.first, n_rows + 1);
2630  Assert(chunks_size_.second > 0,
2631  ExcMessage("The column chunk size must be larger than 0."));
2632  AssertIndexRange(chunks_size_.second, n_columns + 1);
2633 
2634 # ifdef H5_HAVE_PARALLEL
2635  // implementation for configurations equipped with a parallel file system
2636  save_parallel(filename, chunks_size_);
2637 
2638 # else
2639  // implementation for configurations with no parallel file system
2640  save_serial(filename, chunks_size_);
2641 
2642 # endif
2643 # endif
2644 }
2645 
2646 
2647 
2648 template <typename NumberType>
2649 void
2651  const std::string & filename,
2652  const std::pair<unsigned int, unsigned int> &chunk_size) const
2653 {
2654 # ifndef DEAL_II_WITH_HDF5
2655  (void)filename;
2656  (void)chunk_size;
2657  Assert(false, ExcInternalError());
2658 # else
2659 
2660  /*
2661  * The content of the distributed matrix is copied to a matrix using a 1x1
2662  * process grid. Therefore, one process has all the data and can write it to a
2663  * file.
2664  *
2665  * Create a 1x1 column grid which will be used to initialize
2666  * an effectively serial ScaLAPACK matrix to gather the contents from the
2667  * current object
2668  */
2669  const auto column_grid =
2670  std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,
2671  1,
2672  1);
2673 
2674  const int MB = n_rows, NB = n_columns;
2675  ScaLAPACKMatrix<NumberType> tmp(n_rows, n_columns, column_grid, MB, NB);
2676  copy_to(tmp);
2677 
2678  // the 1x1 grid has only one process and this one writes
2679  // the content of the matrix to the HDF5 file
2680  if (tmp.grid->mpi_process_is_active)
2681  {
2682  herr_t status;
2683 
2684  // create a new file using default properties
2685  hid_t file_id =
2686  H5Fcreate(filename.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
2687 
2688  // modify dataset creation properties, i.e. enable chunking
2689  hsize_t chunk_dims[2];
2690  // revert order of rows and columns as ScaLAPACK uses column-major
2691  // ordering
2692  chunk_dims[0] = chunk_size.second;
2693  chunk_dims[1] = chunk_size.first;
2694  hid_t data_property = H5Pcreate(H5P_DATASET_CREATE);
2695  status = H5Pset_chunk(data_property, 2, chunk_dims);
2696  AssertThrow(status >= 0, ExcIO());
2697 
2698  // create the data space for the dataset
2699  hsize_t dims[2];
2700  // change order of rows and columns as ScaLAPACKMatrix uses column major
2701  // ordering
2702  dims[0] = n_columns;
2703  dims[1] = n_rows;
2704  hid_t dataspace_id = H5Screate_simple(2, dims, nullptr);
2705 
2706  // create the dataset within the file using chunk creation properties
2707  hid_t type_id = hdf5_type_id(tmp.values.data());
2708  hid_t dataset_id = H5Dcreate2(file_id,
2709  "/matrix",
2710  type_id,
2711  dataspace_id,
2712  H5P_DEFAULT,
2713  data_property,
2714  H5P_DEFAULT);
2715 
2716  // write the dataset
2717  status = H5Dwrite(
2718  dataset_id, type_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, tmp.values.data());
2719  AssertThrow(status >= 0, ExcIO());
2720 
2721  // create HDF5 enum type for LAPACKSupport::State and
2722  // LAPACKSupport::Property
2723  hid_t state_enum_id, property_enum_id;
2724  internal::create_HDF5_state_enum_id(state_enum_id);
2725  internal::create_HDF5_property_enum_id(property_enum_id);
2726 
2727  // create the data space for the state enum
2728  hsize_t dims_state[1];
2729  dims_state[0] = 1;
2730  hid_t state_enum_dataspace = H5Screate_simple(1, dims_state, nullptr);
2731  // create the dataset for the state enum
2732  hid_t state_enum_dataset = H5Dcreate2(file_id,
2733  "/state",
2734  state_enum_id,
2735  state_enum_dataspace,
2736  H5P_DEFAULT,
2737  H5P_DEFAULT,
2738  H5P_DEFAULT);
2739  // write the dataset for the state enum
2740  status = H5Dwrite(state_enum_dataset,
2741  state_enum_id,
2742  H5S_ALL,
2743  H5S_ALL,
2744  H5P_DEFAULT,
2745  &state);
2746  AssertThrow(status >= 0, ExcIO());
2747 
2748  // create the data space for the property enum
2749  hsize_t dims_property[1];
2750  dims_property[0] = 1;
2751  hid_t property_enum_dataspace =
2752  H5Screate_simple(1, dims_property, nullptr);
2753  // create the dataset for the property enum
2754  hid_t property_enum_dataset = H5Dcreate2(file_id,
2755  "/property",
2756  property_enum_id,
2757  property_enum_dataspace,
2758  H5P_DEFAULT,
2759  H5P_DEFAULT,
2760  H5P_DEFAULT);
2761  // write the dataset for the property enum
2762  status = H5Dwrite(property_enum_dataset,
2763  property_enum_id,
2764  H5S_ALL,
2765  H5S_ALL,
2766  H5P_DEFAULT,
2767  &property);
2768  AssertThrow(status >= 0, ExcIO());
2769 
2770  // end access to the datasets and release resources used by them
2771  status = H5Dclose(dataset_id);
2772  AssertThrow(status >= 0, ExcIO());
2773  status = H5Dclose(state_enum_dataset);
2774  AssertThrow(status >= 0, ExcIO());
2775  status = H5Dclose(property_enum_dataset);
2776  AssertThrow(status >= 0, ExcIO());
2777 
2778  // terminate access to the data spaces
2779  status = H5Sclose(dataspace_id);
2780  AssertThrow(status >= 0, ExcIO());
2781  status = H5Sclose(state_enum_dataspace);
2782  AssertThrow(status >= 0, ExcIO());
2783  status = H5Sclose(property_enum_dataspace);
2784  AssertThrow(status >= 0, ExcIO());
2785 
2786  // release enum data types
2787  status = H5Tclose(state_enum_id);
2788  AssertThrow(status >= 0, ExcIO());
2789  status = H5Tclose(property_enum_id);
2790  AssertThrow(status >= 0, ExcIO());
2791 
2792  // release the creation property
2793  status = H5Pclose(data_property);
2794  AssertThrow(status >= 0, ExcIO());
2795 
2796  // close the file.
2797  status = H5Fclose(file_id);
2798  AssertThrow(status >= 0, ExcIO());
2799  }
2800 # endif
2801 }
2802 
2803 
2804 
2805 template <typename NumberType>
2806 void
2808  const std::string & filename,
2809  const std::pair<unsigned int, unsigned int> &chunk_size) const
2810 {
2811 # ifndef DEAL_II_WITH_HDF5
2812  (void)filename;
2813  (void)chunk_size;
2814  Assert(false, ExcInternalError());
2815 # else
2816 
2817  const unsigned int n_mpi_processes(
2818  Utilities::MPI::n_mpi_processes(this->grid->mpi_communicator));
2819  MPI_Info info = MPI_INFO_NULL;
2820  /*
2821  * The content of the distributed matrix is copied to a matrix using a
2822  * 1xn_processes process grid. Therefore, the processes hold contiguous chunks
2823  * of the matrix, which they can write to the file
2824  *
2825  * Create a 1xn_processes column grid
2826  */
2827  const auto column_grid =
2828  std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,
2829  1,
2830  n_mpi_processes);
2831 
2832  const int MB = n_rows;
2833  /*
2834  * If the ratio n_columns/n_mpi_processes is smaller than the column block
2835  * size of the original matrix, the redistribution and saving of the matrix
2836  * requires a significant amount of MPI communication. Therefore, it is better
2837  * to set a minimum value for the block size NB, causing only
2838  * ceil(n_columns/NB) processes being actively involved in saving the matrix.
2839  * Example: A 2*10^9 x 400 matrix is distributed on a 80 x 5 process grid
2840  * using block size 32. Instead of distributing the matrix on a 1 x 400
2841  * process grid with a row block size of 2*10^9 and a column block size of 1,
2842  * the minimum value for NB yields that only ceil(400/32)=13 processes will be
2843  * writing the matrix to disk.
2844  */
2845  const int NB = std::max(static_cast<int>(std::ceil(
2846  static_cast<double>(n_columns) / n_mpi_processes)),
2847  column_block_size);
2848 
2849  ScaLAPACKMatrix<NumberType> tmp(n_rows, n_columns, column_grid, MB, NB);
2850  copy_to(tmp);
2851 
2852  // get pointer to data held by the process
2853  NumberType *data = (tmp.values.size() > 0) ? tmp.values.data() : nullptr;
2854 
2855  herr_t status;
2856  // dataset dimensions
2857  hsize_t dims[2];
2858 
2859  // set up file access property list with parallel I/O access
2860  hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
2861  status = H5Pset_fapl_mpio(plist_id, tmp.grid->mpi_communicator, info);
2862  AssertThrow(status >= 0, ExcIO());
2863 
2864  // create a new file collectively and release property list identifier
2865  hid_t file_id =
2866  H5Fcreate(filename.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, plist_id);
2867  status = H5Pclose(plist_id);
2868  AssertThrow(status >= 0, ExcIO());
2869 
2870  // As ScaLAPACK, and therefore the class ScaLAPACKMatrix, uses column-major
2871  // ordering but HDF5 row-major ordering, we have to reverse entries related to
2872  // columns and rows in the following. create the dataspace for the dataset
2873  dims[0] = tmp.n_columns;
2874  dims[1] = tmp.n_rows;
2875 
2876  hid_t filespace = H5Screate_simple(2, dims, nullptr);
2877 
2878  // create the chunked dataset with default properties and close filespace
2879  hsize_t chunk_dims[2];
2880  // revert order of rows and columns as ScaLAPACK uses column-major ordering
2881  chunk_dims[0] = chunk_size.second;
2882  chunk_dims[1] = chunk_size.first;
2883  plist_id = H5Pcreate(H5P_DATASET_CREATE);
2884  H5Pset_chunk(plist_id, 2, chunk_dims);
2885  hid_t type_id = hdf5_type_id(data);
2886  hid_t dset_id = H5Dcreate2(
2887  file_id, "/matrix", type_id, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
2888 
2889  status = H5Sclose(filespace);
2890  AssertThrow(status >= 0, ExcIO());
2891 
2892  status = H5Pclose(plist_id);
2893  AssertThrow(status >= 0, ExcIO());
2894 
2895  // gather the number of local rows and columns from all processes
2896  std::vector<int> proc_n_local_rows(n_mpi_processes),
2897  proc_n_local_columns(n_mpi_processes);
2898  int ierr = MPI_Allgather(&tmp.n_local_rows,
2899  1,
2900  MPI_INT,
2901  proc_n_local_rows.data(),
2902  1,
2903  MPI_INT,
2904  tmp.grid->mpi_communicator);
2905  AssertThrowMPI(ierr);
2906  ierr = MPI_Allgather(&tmp.n_local_columns,
2907  1,
2908  MPI_INT,
2909  proc_n_local_columns.data(),
2910  1,
2911  MPI_INT,
2912  tmp.grid->mpi_communicator);
2913  AssertThrowMPI(ierr);
2914 
2915  const unsigned int my_rank(
2916  Utilities::MPI::this_mpi_process(tmp.grid->mpi_communicator));
2917 
2918  // hyperslab selection parameters
2919  // each process defines dataset in memory and writes it to the hyperslab in
2920  // the file
2921  hsize_t count[2];
2922  count[0] = tmp.n_local_columns;
2923  count[1] = tmp.n_rows;
2924  hid_t memspace = H5Screate_simple(2, count, nullptr);
2925 
2926  hsize_t offset[2] = {0};
2927  for (unsigned int i = 0; i < my_rank; ++i)
2928  offset[0] += proc_n_local_columns[i];
2929 
2930  // select hyperslab in the file.
2931  filespace = H5Dget_space(dset_id);
2932  status = H5Sselect_hyperslab(
2933  filespace, H5S_SELECT_SET, offset, nullptr, count, nullptr);
2934  AssertThrow(status >= 0, ExcIO());
2935 
2936  // create property list for independent dataset write
2937  plist_id = H5Pcreate(H5P_DATASET_XFER);
2938  status = H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_INDEPENDENT);
2939  AssertThrow(status >= 0, ExcIO());
2940 
2941  // process with no data will not participate in writing to the file
2942  if (tmp.values.size() > 0)
2943  {
2944  status = H5Dwrite(dset_id, type_id, memspace, filespace, plist_id, data);
2945  AssertThrow(status >= 0, ExcIO());
2946  }
2947  // close/release sources
2948  status = H5Dclose(dset_id);
2949  AssertThrow(status >= 0, ExcIO());
2950  status = H5Sclose(filespace);
2951  AssertThrow(status >= 0, ExcIO());
2952  status = H5Sclose(memspace);
2953  AssertThrow(status >= 0, ExcIO());
2954  status = H5Pclose(plist_id);
2955  AssertThrow(status >= 0, ExcIO());
2956  status = H5Fclose(file_id);
2957  AssertThrow(status >= 0, ExcIO());
2958 
2959  // before writing the state and property to file wait for
2960  // all processes to finish writing the matrix content to the file
2961  ierr = MPI_Barrier(tmp.grid->mpi_communicator);
2962  AssertThrowMPI(ierr);
2963 
2964  // only root process will write state and property to the file
2965  if (tmp.grid->this_mpi_process == 0)
2966  {
2967  // open file using default properties
2968  hid_t file_id_reopen =
2969  H5Fopen(filename.c_str(), H5F_ACC_RDWR, H5P_DEFAULT);
2970 
2971  // create HDF5 enum type for LAPACKSupport::State and
2972  // LAPACKSupport::Property
2973  hid_t state_enum_id, property_enum_id;
2974  internal::create_HDF5_state_enum_id(state_enum_id);
2975  internal::create_HDF5_property_enum_id(property_enum_id);
2976 
2977  // create the data space for the state enum
2978  hsize_t dims_state[1];
2979  dims_state[0] = 1;
2980  hid_t state_enum_dataspace = H5Screate_simple(1, dims_state, nullptr);
2981  // create the dataset for the state enum
2982  hid_t state_enum_dataset = H5Dcreate2(file_id_reopen,
2983  "/state",
2984  state_enum_id,
2985  state_enum_dataspace,
2986  H5P_DEFAULT,
2987  H5P_DEFAULT,
2988  H5P_DEFAULT);
2989  // write the dataset for the state enum
2990  status = H5Dwrite(state_enum_dataset,
2991  state_enum_id,
2992  H5S_ALL,
2993  H5S_ALL,
2994  H5P_DEFAULT,
2995  &state);
2996  AssertThrow(status >= 0, ExcIO());
2997 
2998  // create the data space for the property enum
2999  hsize_t dims_property[1];
3000  dims_property[0] = 1;
3001  hid_t property_enum_dataspace =
3002  H5Screate_simple(1, dims_property, nullptr);
3003  // create the dataset for the property enum
3004  hid_t property_enum_dataset = H5Dcreate2(file_id_reopen,
3005  "/property",
3006  property_enum_id,
3007  property_enum_dataspace,
3008  H5P_DEFAULT,
3009  H5P_DEFAULT,
3010  H5P_DEFAULT);
3011  // write the dataset for the property enum
3012  status = H5Dwrite(property_enum_dataset,
3013  property_enum_id,
3014  H5S_ALL,
3015  H5S_ALL,
3016  H5P_DEFAULT,
3017  &property);
3018  AssertThrow(status >= 0, ExcIO());
3019 
3020  status = H5Dclose(state_enum_dataset);
3021  AssertThrow(status >= 0, ExcIO());
3022  status = H5Dclose(property_enum_dataset);
3023  AssertThrow(status >= 0, ExcIO());
3024  status = H5Sclose(state_enum_dataspace);
3025  AssertThrow(status >= 0, ExcIO());
3026  status = H5Sclose(property_enum_dataspace);
3027  AssertThrow(status >= 0, ExcIO());
3028  status = H5Tclose(state_enum_id);
3029  AssertThrow(status >= 0, ExcIO());
3030  status = H5Tclose(property_enum_id);
3031  AssertThrow(status >= 0, ExcIO());
3032  status = H5Fclose(file_id_reopen);
3033  AssertThrow(status >= 0, ExcIO());
3034  }
3035 
3036 # endif
3037 }
3038 
3039 
3040 
3041 template <typename NumberType>
3042 void
3043 ScaLAPACKMatrix<NumberType>::load(const std::string &filename)
3044 {
3045 # ifndef DEAL_II_WITH_HDF5
3046  (void)filename;
3047  AssertThrow(false, ExcNeedsHDF5());
3048 # else
3049 # ifdef H5_HAVE_PARALLEL
3050  // implementation for configurations equipped with a parallel file system
3051  load_parallel(filename);
3052 
3053 # else
3054  // implementation for configurations with no parallel file system
3055  load_serial(filename);
3056 # endif
3057 # endif
3058 }
3059 
3060 
3061 
3062 template <typename NumberType>
3063 void
3064 ScaLAPACKMatrix<NumberType>::load_serial(const std::string &filename)
3065 {
3066 # ifndef DEAL_II_WITH_HDF5
3067  (void)filename;
3068  Assert(false, ExcInternalError());
3069 # else
3070 
3071  /*
3072  * The content of the distributed matrix is copied to a matrix using a 1x1
3073  * process grid. Therefore, one process has all the data and can write it to a
3074  * file
3075  */
3076  // create a 1xP column grid with P being the number of MPI processes
3077  const auto one_grid =
3078  std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,
3079  1,
3080  1);
3081 
3082  const int MB = n_rows, NB = n_columns;
3083  ScaLAPACKMatrix<NumberType> tmp(n_rows, n_columns, one_grid, MB, NB);
3084 
3085  int state_int = -1;
3086  int property_int = -1;
3087 
3088  // the 1x1 grid has only one process and this one reads
3089  // the content of the matrix from the HDF5 file
3090  if (tmp.grid->mpi_process_is_active)
3091  {
3092  herr_t status;
3093 
3094  // open the file in read-only mode
3095  hid_t file_id = H5Fopen(filename.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT);
3096 
3097  // open the dataset in the file
3098  hid_t dataset_id = H5Dopen2(file_id, "/matrix", H5P_DEFAULT);
3099 
3100  // check the datatype of the data in the file
3101  // datatype of source and destination must have the same class
3102  // see HDF User's Guide: 6.10. Data Transfer: Datatype Conversion and
3103  // Selection
3104  hid_t datatype = H5Dget_type(dataset_id);
3105  H5T_class_t t_class_in = H5Tget_class(datatype);
3106  H5T_class_t t_class = H5Tget_class(hdf5_type_id(tmp.values.data()));
3107  AssertThrow(
3108  t_class_in == t_class,
3109  ExcMessage(
3110  "The data type of the matrix to be read does not match the archive"));
3111 
3112  // get dataspace handle
3113  hid_t dataspace_id = H5Dget_space(dataset_id);
3114  // get number of dimensions
3115  const int ndims = H5Sget_simple_extent_ndims(dataspace_id);
3116  AssertThrow(ndims == 2, ExcIO());
3117  // get every dimension
3118  hsize_t dims[2];
3119  H5Sget_simple_extent_dims(dataspace_id, dims, nullptr);
3120  AssertThrow(
3121  static_cast<int>(dims[0]) == n_columns,
3122  ExcMessage(
3123  "The number of columns of the matrix does not match the content of the archive"));
3124  AssertThrow(
3125  static_cast<int>(dims[1]) == n_rows,
3126  ExcMessage(
3127  "The number of rows of the matrix does not match the content of the archive"));
3128 
3129  // read data
3130  status = H5Dread(dataset_id,
3131  hdf5_type_id(tmp.values.data()),
3132  H5S_ALL,
3133  H5S_ALL,
3134  H5P_DEFAULT,
3135  tmp.values.data());
3136  AssertThrow(status >= 0, ExcIO());
3137 
3138  // create HDF5 enum type for LAPACKSupport::State and
3139  // LAPACKSupport::Property
3140  hid_t state_enum_id, property_enum_id;
3141  internal::create_HDF5_state_enum_id(state_enum_id);
3142  internal::create_HDF5_property_enum_id(property_enum_id);
3143 
3144  // open the datasets for the state and property enum in the file
3145  hid_t dataset_state_id = H5Dopen2(file_id, "/state", H5P_DEFAULT);
3146  hid_t datatype_state = H5Dget_type(dataset_state_id);
3147  H5T_class_t t_class_state = H5Tget_class(datatype_state);
3148  AssertThrow(t_class_state == H5T_ENUM, ExcIO());
3149 
3150  hid_t dataset_property_id = H5Dopen2(file_id, "/property", H5P_DEFAULT);
3151  hid_t datatype_property = H5Dget_type(dataset_property_id);
3152  H5T_class_t t_class_property = H5Tget_class(datatype_property);
3153  AssertThrow(t_class_property == H5T_ENUM, ExcIO());
3154 
3155  // get dataspace handles
3156  hid_t dataspace_state = H5Dget_space(dataset_state_id);
3157  hid_t dataspace_property = H5Dget_space(dataset_property_id);
3158  // get number of dimensions
3159  const int ndims_state = H5Sget_simple_extent_ndims(dataspace_state);
3160  AssertThrow(ndims_state == 1, ExcIO());
3161  const int ndims_property = H5Sget_simple_extent_ndims(dataspace_property);
3162  AssertThrow(ndims_property == 1, ExcIO());
3163  // get every dimension
3164  hsize_t dims_state[1];
3165  H5Sget_simple_extent_dims(dataspace_state, dims_state, nullptr);
3166  AssertThrow(static_cast<int>(dims_state[0]) == 1, ExcIO());
3167  hsize_t dims_property[1];
3168  H5Sget_simple_extent_dims(dataspace_property, dims_property, nullptr);
3169  AssertThrow(static_cast<int>(dims_property[0]) == 1, ExcIO());
3170 
3171  // read data
3172  status = H5Dread(dataset_state_id,
3173  state_enum_id,
3174  H5S_ALL,
3175  H5S_ALL,
3176  H5P_DEFAULT,
3177  &tmp.state);
3178  AssertThrow(status >= 0, ExcIO());
3179  // To send the state from the root process to the other processes
3180  // the state enum is casted to an integer, that will be broadcasted and
3181  // subsequently casted back to the enum type
3182  state_int = static_cast<int>(tmp.state);
3183 
3184  status = H5Dread(dataset_property_id,
3185  property_enum_id,
3186  H5S_ALL,
3187  H5S_ALL,
3188  H5P_DEFAULT,
3189  &tmp.property);
3190  AssertThrow(status >= 0, ExcIO());
3191  // To send the property from the root process to the other processes
3192  // the state enum is casted to an integer, that will be broadcasted and
3193  // subsequently casted back to the enum type
3194  property_int = static_cast<int>(tmp.property);
3195 
3196  // terminate access to the data spaces
3197  status = H5Sclose(dataspace_id);
3198  AssertThrow(status >= 0, ExcIO());
3199  status = H5Sclose(dataspace_state);
3200  AssertThrow(status >= 0, ExcIO());
3201  status = H5Sclose(dataspace_property);
3202  AssertThrow(status >= 0, ExcIO());
3203 
3204  // release data type handles
3205  status = H5Tclose(datatype);
3206  AssertThrow(status >= 0, ExcIO());
3207  status = H5Tclose(state_enum_id);
3208  AssertThrow(status >= 0, ExcIO());
3209  status = H5Tclose(property_enum_id);
3210  AssertThrow(status >= 0, ExcIO());
3211 
3212  // end access to the data sets and release resources used by them
3213  status = H5Dclose(dataset_state_id);
3214  AssertThrow(status >= 0, ExcIO());
3215  status = H5Dclose(dataset_id);
3216  AssertThrow(status >= 0, ExcIO());
3217  status = H5Dclose(dataset_property_id);
3218  AssertThrow(status >= 0, ExcIO());
3219 
3220  // close the file.
3221  status = H5Fclose(file_id);
3222  AssertThrow(status >= 0, ExcIO());
3223  }
3224  // so far only the root process has the correct state integer --> broadcasting
3225  tmp.grid->send_to_inactive(&state_int, 1);
3226  // so far only the root process has the correct property integer -->
3227  // broadcasting
3228  tmp.grid->send_to_inactive(&property_int, 1);
3229 
3230  tmp.state = static_cast<LAPACKSupport::State>(state_int);
3231  tmp.property = static_cast<LAPACKSupport::Property>(property_int);
3232 
3233  tmp.copy_to(*this);
3234 
3235 # endif // DEAL_II_WITH_HDF5
3236 }
3237 
3238 
3239 
3240 template <typename NumberType>
3241 void
3243 {
3244 # ifndef DEAL_II_WITH_HDF5
3245  (void)filename;
3246  Assert(false, ExcInternalError());
3247 # else
3248 # ifndef H5_HAVE_PARALLEL
3249  Assert(false, ExcInternalError());
3250 # else
3251 
3252  const unsigned int n_mpi_processes(
3253  Utilities::MPI::n_mpi_processes(this->grid->mpi_communicator));
3254  MPI_Info info = MPI_INFO_NULL;
3255  /*
3256  * The content of the distributed matrix is copied to a matrix using a
3257  * 1xn_processes process grid. Therefore, the processes hold contiguous chunks
3258  * of the matrix, which they can write to the file
3259  */
3260  // create a 1xP column grid with P being the number of MPI processes
3261  const auto column_grid =
3262  std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,
3263  1,
3264  n_mpi_processes);
3265 
3266  const int MB = n_rows;
3267  // for the choice of NB see explanation in save_parallel()
3268  const int NB = std::max(static_cast<int>(std::ceil(
3269  static_cast<double>(n_columns) / n_mpi_processes)),
3270  column_block_size);
3271 
3272  ScaLAPACKMatrix<NumberType> tmp(n_rows, n_columns, column_grid, MB, NB);
3273 
3274  // get pointer to data held by the process
3275  NumberType *data = (tmp.values.size() > 0) ? tmp.values.data() : nullptr;
3276 
3277  herr_t status;
3278 
3279  // set up file access property list with parallel I/O access
3280  hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
3281  status = H5Pset_fapl_mpio(plist_id, tmp.grid->mpi_communicator, info);
3282  AssertThrow(status >= 0, ExcIO());
3283 
3284  // open file collectively in read-only mode and release property list
3285  // identifier
3286  hid_t file_id = H5Fopen(filename.c_str(), H5F_ACC_RDONLY, plist_id);
3287  status = H5Pclose(plist_id);
3288  AssertThrow(status >= 0, ExcIO());
3289 
3290  // open the dataset in the file collectively
3291  hid_t dataset_id = H5Dopen2(file_id, "/matrix", H5P_DEFAULT);
3292 
3293  // check the datatype of the dataset in the file
3294  // if the classes of type of the dataset and the matrix do not match abort
3295  // see HDF User's Guide: 6.10. Data Transfer: Datatype Conversion and
3296  // Selection
3297  hid_t datatype = hdf5_type_id(data);
3298  hid_t datatype_inp = H5Dget_type(dataset_id);
3299  H5T_class_t t_class_inp = H5Tget_class(datatype_inp);
3300  H5T_class_t t_class = H5Tget_class(datatype);
3301  AssertThrow(
3302  t_class_inp == t_class,
3303  ExcMessage(
3304  "The data type of the matrix to be read does not match the archive"));
3305 
3306  // get the dimensions of the matrix stored in the file
3307  // get dataspace handle
3308  hid_t dataspace_id = H5Dget_space(dataset_id);
3309  // get number of dimensions
3310  const int ndims = H5Sget_simple_extent_ndims(dataspace_id);
3311  AssertThrow(ndims == 2, ExcIO());
3312  // get every dimension
3313  hsize_t dims[2];
3314  status = H5Sget_simple_extent_dims(dataspace_id, dims, nullptr);
3315  AssertThrow(status >= 0, ExcIO());
3316  AssertThrow(
3317  static_cast<int>(dims[0]) == n_columns,
3318  ExcMessage(
3319  "The number of columns of the matrix does not match the content of the archive"));
3320  AssertThrow(
3321  static_cast<int>(dims[1]) == n_rows,
3322  ExcMessage(
3323  "The number of rows of the matrix does not match the content of the archive"));
3324 
3325  // gather the number of local rows and columns from all processes
3326  std::vector<int> proc_n_local_rows(n_mpi_processes),
3327  proc_n_local_columns(n_mpi_processes);
3328  int ierr = MPI_Allgather(&tmp.n_local_rows,
3329  1,
3330  MPI_INT,
3331  proc_n_local_rows.data(),
3332  1,
3333  MPI_INT,
3334  tmp.grid->mpi_communicator);
3335  AssertThrowMPI(ierr);
3336  ierr = MPI_Allgather(&tmp.n_local_columns,
3337  1,
3338  MPI_INT,
3339  proc_n_local_columns.data(),
3340  1,
3341  MPI_INT,
3342  tmp.grid->mpi_communicator);
3343  AssertThrowMPI(ierr);
3344 
3345  const unsigned int my_rank(
3346  Utilities::MPI::this_mpi_process(tmp.grid->mpi_communicator));
3347 
3348  // hyperslab selection parameters
3349  // each process defines dataset in memory and writes it to the hyperslab in
3350  // the file
3351  hsize_t count[2];
3352  count[0] = tmp.n_local_columns;
3353  count[1] = tmp.n_local_rows;
3354 
3355  hsize_t offset[2] = {0};
3356  for (unsigned int i = 0; i < my_rank; ++i)
3357  offset[0] += proc_n_local_columns[i];
3358 
3359  // select hyperslab in the file
3360  status = H5Sselect_hyperslab(
3361  dataspace_id, H5S_SELECT_SET, offset, nullptr, count, nullptr);
3362  AssertThrow(status >= 0, ExcIO());
3363 
3364  // create a memory dataspace independently
3365  hid_t memspace = H5Screate_simple(2, count, nullptr);
3366 
3367  // read data independently
3368  status =
3369  H5Dread(dataset_id, datatype, memspace, dataspace_id, H5P_DEFAULT, data);
3370  AssertThrow(status >= 0, ExcIO());
3371 
3372  // create HDF5 enum type for LAPACKSupport::State and LAPACKSupport::Property
3373  hid_t state_enum_id, property_enum_id;
3374  internal::create_HDF5_state_enum_id(state_enum_id);
3375  internal::create_HDF5_property_enum_id(property_enum_id);
3376 
3377  // open the datasets for the state and property enum in the file
3378  hid_t dataset_state_id = H5Dopen2(file_id, "/state", H5P_DEFAULT);
3379  hid_t datatype_state = H5Dget_type(dataset_state_id);
3380  H5T_class_t t_class_state = H5Tget_class(datatype_state);
3381  AssertThrow(t_class_state == H5T_ENUM, ExcIO());
3382 
3383  hid_t dataset_property_id = H5Dopen2(file_id, "/property", H5P_DEFAULT);
3384  hid_t datatype_property = H5Dget_type(dataset_property_id);
3385  H5T_class_t t_class_property = H5Tget_class(datatype_property);
3386  AssertThrow(t_class_property == H5T_ENUM, ExcIO());
3387 
3388  // get dataspace handles
3389  hid_t dataspace_state = H5Dget_space(dataset_state_id);
3390  hid_t dataspace_property = H5Dget_space(dataset_property_id);
3391  // get number of dimensions
3392  const int ndims_state = H5Sget_simple_extent_ndims(dataspace_state);
3393  AssertThrow(ndims_state == 1, ExcIO());
3394  const int ndims_property = H5Sget_simple_extent_ndims(dataspace_property);
3395  AssertThrow(ndims_property == 1, ExcIO());
3396  // get every dimension
3397  hsize_t dims_state[1];
3398  H5Sget_simple_extent_dims(dataspace_state, dims_state, nullptr);
3399  AssertThrow(static_cast<int>(dims_state[0]) == 1, ExcIO());
3400  hsize_t dims_property[1];
3401  H5Sget_simple_extent_dims(dataspace_property, dims_property, nullptr);
3402  AssertThrow(static_cast<int>(dims_property[0]) == 1, ExcIO());
3403 
3404  // read data
3405  status = H5Dread(
3406  dataset_state_id, state_enum_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, &tmp.state);
3407  AssertThrow(status >= 0, ExcIO());
3408 
3409  status = H5Dread(dataset_property_id,
3410  property_enum_id,
3411  H5S_ALL,
3412  H5S_ALL,
3413  H5P_DEFAULT,
3414  &tmp.property);
3415  AssertThrow(status >= 0, ExcIO());
3416 
3417  // close/release sources
3418  status = H5Sclose(memspace);
3419  AssertThrow(status >= 0, ExcIO());
3420  status = H5Dclose(dataset_id);
3421  AssertThrow(status >= 0, ExcIO());
3422  status = H5Dclose(dataset_state_id);
3423  AssertThrow(status >= 0, ExcIO());
3424  status = H5Dclose(dataset_property_id);
3425  AssertThrow(status >= 0, ExcIO());
3426  status = H5Sclose(dataspace_id);
3427  AssertThrow(status >= 0, ExcIO());
3428  status = H5Sclose(dataspace_state);
3429  AssertThrow(status >= 0, ExcIO());
3430  status = H5Sclose(dataspace_property);
3431  AssertThrow(status >= 0, ExcIO());
3432  // status = H5Tclose(datatype);
3433  // AssertThrow(status >= 0, ExcIO());
3434  status = H5Tclose(state_enum_id);
3435  AssertThrow(status >= 0, ExcIO());
3436  status = H5Tclose(property_enum_id);
3437  AssertThrow(status >= 0, ExcIO());
3438  status = H5Fclose(file_id);
3439  AssertThrow(status >= 0, ExcIO());
3440 
3441  // copying the distributed matrices
3442  tmp.copy_to(*this);
3443 
3444 # endif // H5_HAVE_PARALLEL
3445 # endif // DEAL_II_WITH_HDF5
3446 }
3447 
3448 
3449 
3450 namespace internal
3451 {
3452  namespace
3453  {
3454  template <typename NumberType>
3455  void
3456  scale_columns(ScaLAPACKMatrix<NumberType> & matrix,
3457  const ArrayView<const NumberType> &factors)
3458  {
3459  Assert(matrix.n() == factors.size(),
3460  ExcDimensionMismatch(matrix.n(), factors.size()));
3461 
3462  for (unsigned int i = 0; i < matrix.local_n(); ++i)
3463  {
3464  const NumberType s = factors[matrix.global_column(i)];
3465 
3466  for (unsigned int j = 0; j < matrix.local_m(); ++j)
3467  matrix.local_el(j, i) *= s;
3468  }
3469  }
3470 
3471  template <typename NumberType>
3472  void
3473  scale_rows(ScaLAPACKMatrix<NumberType> & matrix,
3474  const ArrayView<const NumberType> &factors)
3475  {
3476  Assert(matrix.m() == factors.size(),
3477  ExcDimensionMismatch(matrix.m(), factors.size()));
3478 
3479  for (unsigned int i = 0; i < matrix.local_m(); ++i)
3480  {
3481  const NumberType s = factors[matrix.global_row(i)];
3482 
3483  for (unsigned int j = 0; j < matrix.local_n(); ++j)
3484  matrix.local_el(i, j) *= s;
3485  }
3486  }
3487 
3488  } // namespace
3489 } // namespace internal
3490 
3491 
3492 
3493 template <typename NumberType>
3494 template <class InputVector>
3495 void
3497 {
3498  if (this->grid->mpi_process_is_active)
3499  internal::scale_columns(*this, make_array_view(factors));
3500 }
3501 
3502 
3503 
3504 template <typename NumberType>
3505 template <class InputVector>
3506 void
3507 ScaLAPACKMatrix<NumberType>::scale_rows(const InputVector &factors)
3508 {
3509  if (this->grid->mpi_process_is_active)
3510  internal::scale_rows(*this, make_array_view(factors));
3511 }
3512 
3513 
3514 
3515 // instantiations
3516 # include "scalapack.inst"
3517 
3518 
3520 
3521 #endif // DEAL_II_WITH_SCALAPACK
ArrayView< typename std::remove_reference< typename std::iterator_traits< Iterator >::reference >::type, MemorySpaceType > make_array_view(const Iterator begin, const Iterator end)
Definition: array_view.h:699
std::size_t size() const
Definition: array_view.h:576
DerivativeForm< 1, spacedim, dim, Number > transpose(const DerivativeForm< 1, dim, spacedim, Number > &DF)
size_type m() const
size_type n() const
int descriptor[9]
Definition: scalapack.h:935
std::vector< NumberType > compute_SVD(ScaLAPACKMatrix< NumberType > *U=nullptr, ScaLAPACKMatrix< NumberType > *VT=nullptr)
Definition: scalapack.cc:2014
std::vector< NumberType > eigenpairs_symmetric_by_value(const std::pair< NumberType, NumberType > &value_limits, const bool compute_eigenvectors)
Definition: scalapack.cc:1453
NumberType frobenius_norm() const
Definition: scalapack.cc:2403
unsigned int pseudoinverse(const NumberType ratio)
Definition: scalapack.cc:2226
std::vector< NumberType > eigenpairs_symmetric_by_value_MRRR(const std::pair< NumberType, NumberType > &value_limits, const bool compute_eigenvectors)
Definition: scalapack.cc:1786
void copy_from(const LAPACKFullMatrix< NumberType > &matrix, const unsigned int rank)
Definition: scalapack.cc:363
void save_parallel(const std::string &filename, const std::pair< unsigned int, unsigned int > &chunk_size) const
Definition: scalapack.cc:2807
void least_squares(ScaLAPACKMatrix< NumberType > &B, const bool transpose=false)
Definition: scalapack.cc:2134
ScaLAPACKMatrix< NumberType > & operator=(const FullMatrix< NumberType > &)
Definition: scalapack.cc:332
void Tadd(const NumberType b, const ScaLAPACKMatrix< NumberType > &B)
Definition: scalapack.cc:1057
void mTmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
Definition: scalapack.cc:1212
void add(const ScaLAPACKMatrix< NumberType > &B, const NumberType a=0., const NumberType b=1., const bool transpose_B=false)
Definition: scalapack.cc:991
LAPACKSupport::State get_state() const
Definition: scalapack.cc:323
LAPACKSupport::Property get_property() const
Definition: scalapack.cc:314
int column_block_size
Definition: scalapack.h:920
void Tmmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
Definition: scalapack.cc:1198
std::vector< NumberType > eigenpairs_symmetric_MRRR(const bool compute_eigenvectors, const std::pair< unsigned int, unsigned int > &index_limits=std::make_pair(numbers::invalid_unsigned_int, numbers::invalid_unsigned_int), const std::pair< NumberType, NumberType > &value_limits=std::make_pair(std::numeric_limits< NumberType >::quiet_NaN(), std::numeric_limits< NumberType >::quiet_NaN()))
Definition: scalapack.cc:1804
void scale_rows(const InputVector &factors)
Definition: scalapack.cc:3507
std::shared_ptr< const Utilities::MPI::ProcessGrid > grid
Definition: scalapack.h:900
ScaLAPACKMatrix(const size_type n_rows, const size_type n_columns, const std::shared_ptr< const Utilities::MPI::ProcessGrid > &process_grid, const size_type row_block_size=32, const size_type column_block_size=32, const LAPACKSupport::Property property=LAPACKSupport::Property::general)
Definition: scalapack.cc:81
void load(const std::string &filename)
Definition: scalapack.cc:3043
const int submatrix_column
Definition: scalapack.h:981
const int submatrix_row
Definition: scalapack.h:975
void mmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
Definition: scalapack.cc:1184
std::vector< NumberType > eigenpairs_symmetric(const bool compute_eigenvectors, const std::pair< unsigned int, unsigned int > &index_limits=std::make_pair(numbers::invalid_unsigned_int, numbers::invalid_unsigned_int), const std::pair< NumberType, NumberType > &value_limits=std::make_pair(std::numeric_limits< NumberType >::quiet_NaN(), std::numeric_limits< NumberType >::quiet_NaN()))
Definition: scalapack.cc:1473
void save_serial(const std::string &filename, const std::pair< unsigned int, unsigned int > &chunk_size) const
Definition: scalapack.cc:2650
NumberType norm_general(const char type) const
Definition: scalapack.cc:2417
void save(const std::string &filename, const std::pair< unsigned int, unsigned int > &chunk_size=std::make_pair(numbers::invalid_unsigned_int, numbers::invalid_unsigned_int)) const
Definition: scalapack.cc:2608
void load_parallel(const std::string &filename)
Definition: scalapack.cc:3242
NumberType l1_norm() const
Definition: scalapack.cc:2375
void compute_lu_factorization()
Definition: scalapack.cc:1273
NumberType norm_symmetric(const char type) const
Definition: scalapack.cc:2476
void mult(const NumberType b, const ScaLAPACKMatrix< NumberType > &B, const NumberType c, ScaLAPACKMatrix< NumberType > &C, const bool transpose_A=false, const bool transpose_B=false) const
Definition: scalapack.cc:1067
LAPACKSupport::Property property
Definition: scalapack.h:893
void set_property(const LAPACKSupport::Property property)
Definition: scalapack.cc:304
void reinit(const size_type n_rows, const size_type n_columns, const std::shared_ptr< const Utilities::MPI::ProcessGrid > &process_grid, const size_type row_block_size=32, const size_type column_block_size=32, const LAPACKSupport::Property property=LAPACKSupport::Property::general)
Definition: scalapack.cc:217
void load_serial(const std::string &filename)
Definition: scalapack.cc:3064
std::vector< NumberType > eigenpairs_symmetric_by_index_MRRR(const std::pair< unsigned int, unsigned int > &index_limits, const bool compute_eigenvectors)
Definition: scalapack.cc:1763
NumberType reciprocal_condition_number(const NumberType a_norm) const
Definition: scalapack.cc:2313
LAPACKSupport::State state
Definition: scalapack.h:887
void TmTmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
Definition: scalapack.cc:1226
std::vector< NumberType > eigenpairs_symmetric_by_index(const std::pair< unsigned int, unsigned int > &index_limits, const bool compute_eigenvectors)
Definition: scalapack.cc:1430
unsigned int global_column(const unsigned int loc_column) const
Definition: scalapack.cc:515
void copy_to(FullMatrix< NumberType > &matrix) const
Definition: scalapack.cc:667
unsigned int global_row(const unsigned int loc_row) const
Definition: scalapack.cc:498
void compute_cholesky_factorization()
Definition: scalapack.cc:1240
void copy_transposed(const ScaLAPACKMatrix< NumberType > &B)
Definition: scalapack.cc:981
NumberType linfty_norm() const
Definition: scalapack.cc:2389
void scale_columns(const InputVector &factors)
Definition: scalapack.cc:3496
std::array< std::pair< Number, Tensor< 1, dim, Number > >, std::integral_constant< int, dim >::value > eigenvectors(const SymmetricTensor< 2, dim, Number > &T, const SymmetricTensorEigenvectorMethod method=SymmetricTensorEigenvectorMethod::ql_implicit_shifts)
AlignedVector< T > values
Definition: table.h:796
typename AlignedVector< T >::size_type size_type
Definition: table.h:449
void reinit(const size_type size1, const size_type size2, const bool omit_default_initialization=false)
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:457
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:458
const unsigned int v1
Definition: grid_tools.cc:1016
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcNeedsHDF5()
static ::ExceptionBase & ExcErrorCode(std::string arg1, types::blas_int arg2)
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
#define Assert(cond, exc)
Definition: exceptions.h:1501
static ::ExceptionBase & ExcNotImplemented()
#define AssertIsFinite(number)
Definition: exceptions.h:1786
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1818
#define AssertIndexRange(index, range)
Definition: exceptions.h:1760
static ::ExceptionBase & ExcIndexRange(std::size_t arg1, std::size_t arg2, std::size_t arg3)
static ::ExceptionBase & ExcIO()
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
Definition: exceptions.h:1611
constexpr int chunk_size
Definition: cuda_size.h:35
constexpr int block_size
Definition: cuda_size.h:29
Expression ceil(const Expression &x)
@ cholesky
Contents is a Cholesky decomposition.
@ lu
Contents is an LU decomposition.
@ matrix
Contents is actually a matrix.
@ unusable
Contents is something useless.
@ inverse_matrix
Contents is the inverse of a matrix.
@ svd
Matrix contains singular value decomposition,.
@ inverse_svd
Matrix is the inverse of a singular value decomposition.
@ eigenvalues
Eigenvalue vector is filled.
static const char U
@ symmetric
Matrix is symmetric.
@ hessenberg
Matrix is in upper Hessenberg form.
@ diagonal
Matrix is diagonal.
@ upper_triangular
Matrix is upper triangular.
@ lower_triangular
Matrix is lower triangular.
@ general
No special properties.
SymmetricTensor< 2, dim, Number > C(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > b(const Tensor< 2, dim, Number > &F)
VectorType::value_type * begin(VectorType &V)
VectorType::value_type * end(VectorType &V)
@ scalapack_copy_from
ScaLAPACKMatrix<NumberType>::copy_from.
Definition: mpi_tags.h:121
@ scalapack_copy_to2
ScaLAPACKMatrix<NumberType>::copy_to.
Definition: mpi_tags.h:119
@ scalapack_copy_to
ScaLAPACKMatrix<NumberType>::copy_to.
Definition: mpi_tags.h:117
void free_communicator(MPI_Comm &mpi_communicator)
Definition: mpi.cc:200
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:157
T min(const T &t, const MPI_Comm &mpi_communicator)
T sum(const T &t, const MPI_Comm &mpi_communicator)
const MPI_Datatype mpi_type_id_for_type
Definition: mpi.h:1708
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:146
T max(const T &t, const MPI_Comm &mpi_communicator)
void reinit(MatrixBlock< MatrixType > &v, const BlockSparsityPattern &p)
Definition: matrix_block.h:618
static const unsigned int invalid_unsigned_int
Definition: types.h:206
hid_t hdf5_type_id(const number *)
Definition: scalapack.cc:40