Reference documentation for deal.II version 9.2.0
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
scalapack.cc
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2017 - 2019 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #include <deal.II/lac/scalapack.h>
18 
19 #ifdef DEAL_II_WITH_SCALAPACK
20 
21 # include <deal.II/base/array_view.h>
22 # include <deal.II/base/mpi.h>
23 # include <deal.II/base/mpi.templates.h>
25 
26 # include <deal.II/lac/scalapack.templates.h>
27 
28 # ifdef DEAL_II_WITH_HDF5
29 # include <hdf5.h>
30 # endif
31 
33 
34 # ifdef DEAL_II_WITH_HDF5
35 
36 template <typename number>
37 inline hid_t
38 hdf5_type_id(const number *)
39 {
40  Assert(false, ::ExcNotImplemented());
41  // don't know what to put here; it does not matter
42  return -1;
43 }
44 
45 inline hid_t
46 hdf5_type_id(const double *)
47 {
48  return H5T_NATIVE_DOUBLE;
49 }
50 
51 inline hid_t
52 hdf5_type_id(const float *)
53 {
54  return H5T_NATIVE_FLOAT;
55 }
56 
57 inline hid_t
58 hdf5_type_id(const int *)
59 {
60  return H5T_NATIVE_INT;
61 }
62 
63 inline hid_t
64 hdf5_type_id(const unsigned int *)
65 {
66  return H5T_NATIVE_UINT;
67 }
68 
69 inline hid_t
70 hdf5_type_id(const char *)
71 {
72  return H5T_NATIVE_CHAR;
73 }
74 # endif // DEAL_II_WITH_HDF5
75 
76 
77 
78 template <typename NumberType>
80  const size_type n_rows_,
81  const size_type n_columns_,
82  const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
83  const size_type row_block_size_,
84  const size_type column_block_size_,
85  const LAPACKSupport::Property property_)
86  : uplo('L')
87  , // for non-symmetric matrices this is not needed
88  first_process_row(0)
89  , first_process_column(0)
90  , submatrix_row(1)
91  , submatrix_column(1)
92 {
93  reinit(n_rows_,
94  n_columns_,
95  process_grid,
96  row_block_size_,
97  column_block_size_,
98  property_);
99 }
100 
101 
102 
103 template <typename NumberType>
105  const size_type size,
106  const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
107  const size_type block_size,
108  const LAPACKSupport::Property property)
109  : ScaLAPACKMatrix<NumberType>(size,
110  size,
111  process_grid,
112  block_size,
113  block_size,
114  property)
115 {}
116 
117 
118 
119 template <typename NumberType>
121  const std::string & filename,
122  const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
123  const size_type row_block_size,
124  const size_type column_block_size)
125  : uplo('L')
126  , // for non-symmetric matrices this is not needed
127  first_process_row(0)
128  , first_process_column(0)
129  , submatrix_row(1)
130  , submatrix_column(1)
131 {
132 # ifndef DEAL_II_WITH_HDF5
133  (void)filename;
134  (void)process_grid;
135  (void)row_block_size;
136  (void)column_block_size;
137  Assert(
138  false,
139  ExcMessage(
140  "This function is only available when deal.II is configured with HDF5"));
141 # else
142 
143  const unsigned int this_mpi_process(
145 
146  // Before reading the content from disk the root process determines the
147  // dimensions of the matrix. Subsequently, memory is allocated by a call to
148  // reinit() and the matrix is loaded by a call to load().
149  if (this_mpi_process == 0)
150  {
151  herr_t status = 0;
152 
153  // open file in read-only mode
154  hid_t file = H5Fopen(filename.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT);
155  AssertThrow(file >= 0, ExcIO());
156 
157  // get data set in file
158  hid_t dataset = H5Dopen2(file, "/matrix", H5P_DEFAULT);
159  AssertThrow(dataset >= 0, ExcIO());
160 
161  // determine file space
162  hid_t filespace = H5Dget_space(dataset);
163 
164  // get number of dimensions in data set
165  int rank = H5Sget_simple_extent_ndims(filespace);
166  AssertThrow(rank == 2, ExcIO());
167  hsize_t dims[2];
168  status = H5Sget_simple_extent_dims(filespace, dims, nullptr);
169  AssertThrow(status >= 0, ExcIO());
170 
171  // due to ScaLAPACK's column-major memory layout the dimensions are
172  // swapped
173  n_rows = dims[1];
174  n_columns = dims[0];
175 
176  // close/release resources
177  status = H5Sclose(filespace);
178  AssertThrow(status >= 0, ExcIO());
179  status = H5Dclose(dataset);
180  AssertThrow(status >= 0, ExcIO());
181  status = H5Fclose(file);
182  AssertThrow(status >= 0, ExcIO());
183  }
184  int ierr = MPI_Bcast(&n_rows,
185  1,
186  Utilities::MPI::internal::mpi_type_id(&n_rows),
187  0 /*from root*/,
188  process_grid->mpi_communicator);
189  AssertThrowMPI(ierr);
190 
191  ierr = MPI_Bcast(&n_columns,
192  1,
193  Utilities::MPI::internal::mpi_type_id(&n_columns),
194  0 /*from root*/,
195  process_grid->mpi_communicator);
196  AssertThrowMPI(ierr);
197 
198  // the property will be overwritten by the subsequent call to load()
199  reinit(n_rows,
200  n_columns,
201  process_grid,
205 
206  load(filename.c_str());
207 
208 # endif // DEAL_II_WITH_HDF5
209 }
210 
211 
212 
213 template <typename NumberType>
214 void
216  const size_type n_rows_,
217  const size_type n_columns_,
218  const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
219  const size_type row_block_size_,
220  const size_type column_block_size_,
221  const LAPACKSupport::Property property_)
222 {
223  Assert(row_block_size_ > 0, ExcMessage("Row block size has to be positive."));
224  Assert(column_block_size_ > 0,
225  ExcMessage("Column block size has to be positive."));
226  Assert(
227  row_block_size_ <= n_rows_,
228  ExcMessage(
229  "Row block size can not be greater than the number of rows of the matrix"));
230  Assert(
231  column_block_size_ <= n_columns_,
232  ExcMessage(
233  "Column block size can not be greater than the number of columns of the matrix"));
234 
236  property = property_;
237  grid = process_grid;
238  n_rows = n_rows_;
239  n_columns = n_columns_;
240  row_block_size = row_block_size_;
241  column_block_size = column_block_size_;
242 
243  if (grid->mpi_process_is_active)
244  {
245  // Get local sizes:
246  n_local_rows = numroc_(&n_rows,
247  &row_block_size,
248  &(grid->this_process_row),
249  &first_process_row,
250  &(grid->n_process_rows));
251  n_local_columns = numroc_(&n_columns,
252  &column_block_size,
253  &(grid->this_process_column),
254  &first_process_column,
255  &(grid->n_process_columns));
256 
257  // LLD_A = MAX(1,NUMROC(M_A, MB_A, MYROW, RSRC_A, NPROW)), different
258  // between processes
259  int lda = std::max(1, n_local_rows);
260 
261  int info = 0;
262  descinit_(descriptor,
263  &n_rows,
264  &n_columns,
265  &row_block_size,
266  &column_block_size,
267  &first_process_row,
268  &first_process_column,
269  &(grid->blacs_context),
270  &lda,
271  &info);
272  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("descinit_", info));
273 
274  this->TransposeTable<NumberType>::reinit(n_local_rows, n_local_columns);
275  }
276  else
277  {
278  // set process-local variables to something telling:
279  n_local_rows = -1;
280  n_local_columns = -1;
281  std::fill(std::begin(descriptor), std::end(descriptor), -1);
282  }
283 }
284 
285 
286 
287 template <typename NumberType>
288 void
290  const size_type size,
291  const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
292  const size_type block_size,
293  const LAPACKSupport::Property property)
294 {
295  reinit(size, size, process_grid, block_size, block_size, property);
296 }
297 
298 
299 
300 template <typename NumberType>
301 void
303  const LAPACKSupport::Property property_)
304 {
305  property = property_;
306 }
307 
308 
309 
310 template <typename NumberType>
313 {
314  return property;
315 }
316 
317 
318 
319 template <typename NumberType>
322 {
323  return state;
324 }
325 
326 
327 
328 template <typename NumberType>
331 {
332  // FIXME: another way to copy is to use pdgeadd_ PBLAS routine.
333  // This routine computes the sum of two matrices B := a*A + b*B.
334  // Matrices can have different distribution,in particular matrix A can
335  // be owned by only one process, so we can set a=1 and b=0 to copy
336  // non-distributed matrix A into distributed matrix B.
337  Assert(n_rows == int(matrix.m()), ExcDimensionMismatch(n_rows, matrix.m()));
338  Assert(n_columns == int(matrix.n()),
339  ExcDimensionMismatch(n_columns, matrix.n()));
340 
341  if (grid->mpi_process_is_active)
342  {
343  for (int i = 0; i < n_local_rows; ++i)
344  {
345  const int glob_i = global_row(i);
346  for (int j = 0; j < n_local_columns; ++j)
347  {
348  const int glob_j = global_column(j);
349  local_el(i, j) = matrix(glob_i, glob_j);
350  }
351  }
352  }
353  state = LAPACKSupport::matrix;
354  return *this;
355 }
356 
357 
358 
359 template <typename NumberType>
360 void
362  const unsigned int rank)
363 {
364  if (n_rows * n_columns == 0)
365  return;
366 
367  const unsigned int this_mpi_process(
368  Utilities::MPI::this_mpi_process(this->grid->mpi_communicator));
369 
370 # ifdef DEBUG
371  Assert(Utilities::MPI::max(rank, this->grid->mpi_communicator) == rank,
372  ExcMessage("All processes have to call routine with identical rank"));
373  Assert(Utilities::MPI::min(rank, this->grid->mpi_communicator) == rank,
374  ExcMessage("All processes have to call routine with identical rank"));
375 # endif
376 
377  // root process has to be active in the grid of A
378  if (this_mpi_process == rank)
379  {
380  Assert(grid->mpi_process_is_active, ExcInternalError());
381  Assert(n_rows == int(B.m()), ExcDimensionMismatch(n_rows, B.m()));
382  Assert(n_columns == int(B.n()), ExcDimensionMismatch(n_columns, B.n()));
383  }
384  // Create 1x1 grid for matrix B.
385  // The underlying grid for matrix B only contains the process #rank.
386  // This grid will be used to copy the serial matrix B to the distributed
387  // matrix using the ScaLAPACK routine pgemr2d.
388  MPI_Group group_A;
389  MPI_Comm_group(this->grid->mpi_communicator, &group_A);
390  const int n = 1;
391  const std::vector<int> ranks(n, rank);
392  MPI_Group group_B;
393  MPI_Group_incl(group_A, n, DEAL_II_MPI_CONST_CAST(ranks.data()), &group_B);
394  MPI_Comm communicator_B;
395 
397  Utilities::MPI::create_group(this->grid->mpi_communicator,
398  group_B,
399  mpi_tag,
400  &communicator_B);
401  int n_proc_rows_B = 1, n_proc_cols_B = 1;
402  int this_process_row_B = -1, this_process_column_B = -1;
403  int blacs_context_B = -1;
404  if (MPI_COMM_NULL != communicator_B)
405  {
406  // Initialize Cblas context from the provided communicator
407  blacs_context_B = Csys2blacs_handle(communicator_B);
408  const char *order = "Col";
409  Cblacs_gridinit(&blacs_context_B, order, n_proc_rows_B, n_proc_cols_B);
410  Cblacs_gridinfo(blacs_context_B,
411  &n_proc_rows_B,
412  &n_proc_cols_B,
413  &this_process_row_B,
414  &this_process_column_B);
415  Assert(n_proc_rows_B * n_proc_cols_B == 1, ExcInternalError());
416  // the active process of grid B has to be process #rank of the
417  // communicator attached to A
419  }
420  const bool mpi_process_is_active_B =
421  (this_process_row_B >= 0 && this_process_column_B >= 0);
422 
423  // create descriptor for matrix B
424  std::vector<int> descriptor_B(9, -1);
425  const int first_process_row_B = 0, first_process_col_B = 0;
426 
427  if (mpi_process_is_active_B)
428  {
429  // Get local sizes:
430  int n_local_rows_B = numroc_(&n_rows,
431  &n_rows,
432  &this_process_row_B,
433  &first_process_row_B,
434  &n_proc_rows_B);
435  int n_local_cols_B = numroc_(&n_columns,
436  &n_columns,
437  &this_process_column_B,
438  &first_process_col_B,
439  &n_proc_cols_B);
440  Assert(n_local_rows_B == n_rows, ExcInternalError());
441  Assert(n_local_cols_B == n_columns, ExcInternalError());
442  (void)n_local_cols_B;
443 
444  int lda = std::max(1, n_local_rows_B);
445  int info = 0;
446  descinit_(descriptor_B.data(),
447  &n_rows,
448  &n_columns,
449  &n_rows,
450  &n_columns,
451  &first_process_row_B,
452  &first_process_col_B,
453  &blacs_context_B,
454  &lda,
455  &info);
456  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("descinit_", info));
457  }
458  if (this->grid->mpi_process_is_active)
459  {
460  const int ii = 1;
461  NumberType *loc_vals_A =
462  this->values.size() > 0 ? this->values.data() : nullptr;
463  const NumberType *loc_vals_B =
464  mpi_process_is_active_B ? &(B(0, 0)) : nullptr;
465 
466  // pgemr2d has to be called only for processes active on grid attached to
467  // matrix A
468  pgemr2d(&n_rows,
469  &n_columns,
470  loc_vals_B,
471  &ii,
472  &ii,
473  descriptor_B.data(),
474  loc_vals_A,
475  &ii,
476  &ii,
477  this->descriptor,
478  &(this->grid->blacs_context));
479  }
480  if (mpi_process_is_active_B)
481  Cblacs_gridexit(blacs_context_B);
482 
483  MPI_Group_free(&group_A);
484  MPI_Group_free(&group_B);
485  if (MPI_COMM_NULL != communicator_B)
486  MPI_Comm_free(&communicator_B);
487 
488  state = LAPACKSupport::matrix;
489 }
490 
491 
492 
493 template <typename NumberType>
494 unsigned int
495 ScaLAPACKMatrix<NumberType>::global_row(const unsigned int loc_row) const
496 {
497  Assert(n_local_rows >= 0 && loc_row < static_cast<unsigned int>(n_local_rows),
498  ExcIndexRange(loc_row, 0, n_local_rows));
499  const int i = loc_row + 1;
500  return indxl2g_(&i,
501  &row_block_size,
502  &(grid->this_process_row),
503  &first_process_row,
504  &(grid->n_process_rows)) -
505  1;
506 }
507 
508 
509 
510 template <typename NumberType>
511 unsigned int
512 ScaLAPACKMatrix<NumberType>::global_column(const unsigned int loc_column) const
513 {
514  Assert(n_local_columns >= 0 &&
515  loc_column < static_cast<unsigned int>(n_local_columns),
516  ExcIndexRange(loc_column, 0, n_local_columns));
517  const int j = loc_column + 1;
518  return indxl2g_(&j,
519  &column_block_size,
520  &(grid->this_process_column),
521  &first_process_column,
522  &(grid->n_process_columns)) -
523  1;
524 }
525 
526 
527 
528 template <typename NumberType>
529 void
531  const unsigned int rank) const
532 {
533  if (n_rows * n_columns == 0)
534  return;
535 
536  const unsigned int this_mpi_process(
537  Utilities::MPI::this_mpi_process(this->grid->mpi_communicator));
538 
539 # ifdef DEBUG
540  Assert(Utilities::MPI::max(rank, this->grid->mpi_communicator) == rank,
541  ExcMessage("All processes have to call routine with identical rank"));
542  Assert(Utilities::MPI::min(rank, this->grid->mpi_communicator) == rank,
543  ExcMessage("All processes have to call routine with identical rank"));
544 # endif
545 
546  if (this_mpi_process == rank)
547  {
548  // the process which gets the serial copy has to be in the process grid
549  Assert(this->grid->is_process_active(), ExcInternalError());
550  Assert(n_rows == int(B.m()), ExcDimensionMismatch(n_rows, B.m()));
551  Assert(n_columns == int(B.n()), ExcDimensionMismatch(n_columns, B.n()));
552  }
553 
554  // Create 1x1 grid for matrix B.
555  // The underlying grid for matrix B only contains the process #rank.
556  // This grid will be used to copy to the distributed matrix to the serial
557  // matrix B using the ScaLAPACK routine pgemr2d.
558  MPI_Group group_A;
559  MPI_Comm_group(this->grid->mpi_communicator, &group_A);
560  const int n = 1;
561  const std::vector<int> ranks(n, rank);
562  MPI_Group group_B;
563  MPI_Group_incl(group_A, n, DEAL_II_MPI_CONST_CAST(ranks.data()), &group_B);
564  MPI_Comm communicator_B;
565 
567  Utilities::MPI::create_group(this->grid->mpi_communicator,
568  group_B,
569  mpi_tag,
570  &communicator_B);
571  int n_proc_rows_B = 1, n_proc_cols_B = 1;
572  int this_process_row_B = -1, this_process_column_B = -1;
573  int blacs_context_B = -1;
574  if (MPI_COMM_NULL != communicator_B)
575  {
576  // Initialize Cblas context from the provided communicator
577  blacs_context_B = Csys2blacs_handle(communicator_B);
578  const char *order = "Col";
579  Cblacs_gridinit(&blacs_context_B, order, n_proc_rows_B, n_proc_cols_B);
580  Cblacs_gridinfo(blacs_context_B,
581  &n_proc_rows_B,
582  &n_proc_cols_B,
583  &this_process_row_B,
584  &this_process_column_B);
585  Assert(n_proc_rows_B * n_proc_cols_B == 1, ExcInternalError());
586  // the active process of grid B has to be process #rank of the
587  // communicator attached to A
589  }
590  const bool mpi_process_is_active_B =
591  (this_process_row_B >= 0 && this_process_column_B >= 0);
592 
593  // create descriptor for matrix B
594  std::vector<int> descriptor_B(9, -1);
595  const int first_process_row_B = 0, first_process_col_B = 0;
596 
597  if (mpi_process_is_active_B)
598  {
599  // Get local sizes:
600  int n_local_rows_B = numroc_(&n_rows,
601  &n_rows,
602  &this_process_row_B,
603  &first_process_row_B,
604  &n_proc_rows_B);
605  int n_local_cols_B = numroc_(&n_columns,
606  &n_columns,
607  &this_process_column_B,
608  &first_process_col_B,
609  &n_proc_cols_B);
610  Assert(n_local_rows_B == n_rows, ExcInternalError());
611  Assert(n_local_cols_B == n_columns, ExcInternalError());
612  (void)n_local_cols_B;
613 
614  int lda = std::max(1, n_local_rows_B);
615  int info = 0;
616  // fill descriptor for matrix B
617  descinit_(descriptor_B.data(),
618  &n_rows,
619  &n_columns,
620  &n_rows,
621  &n_columns,
622  &first_process_row_B,
623  &first_process_col_B,
624  &blacs_context_B,
625  &lda,
626  &info);
627  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("descinit_", info));
628  }
629  // pgemr2d has to be called only for processes active on grid attached to
630  // matrix A
631  if (this->grid->mpi_process_is_active)
632  {
633  const int ii = 1;
634  const NumberType *loc_vals_A =
635  this->values.size() > 0 ? this->values.data() : nullptr;
636  NumberType *loc_vals_B = mpi_process_is_active_B ? &(B(0, 0)) : nullptr;
637 
638  pgemr2d(&n_rows,
639  &n_columns,
640  loc_vals_A,
641  &ii,
642  &ii,
643  this->descriptor,
644  loc_vals_B,
645  &ii,
646  &ii,
647  descriptor_B.data(),
648  &(this->grid->blacs_context));
649  }
650  if (mpi_process_is_active_B)
651  Cblacs_gridexit(blacs_context_B);
652 
653  MPI_Group_free(&group_A);
654  MPI_Group_free(&group_B);
655  if (MPI_COMM_NULL != communicator_B)
656  MPI_Comm_free(&communicator_B);
657 }
658 
659 
660 
661 template <typename NumberType>
662 void
664 {
665  // FIXME: use PDGEMR2D for copying?
666  // PDGEMR2D copies a submatrix of A on a submatrix of B.
667  // A and B can have different distributions
668  // see http://icl.cs.utk.edu/lapack-forum/viewtopic.php?t=50
669  Assert(n_rows == int(matrix.m()), ExcDimensionMismatch(n_rows, matrix.m()));
670  Assert(n_columns == int(matrix.n()),
671  ExcDimensionMismatch(n_columns, matrix.n()));
672 
673  matrix = 0.;
674  if (grid->mpi_process_is_active)
675  {
676  for (int i = 0; i < n_local_rows; ++i)
677  {
678  const int glob_i = global_row(i);
679  for (int j = 0; j < n_local_columns; ++j)
680  {
681  const int glob_j = global_column(j);
682  matrix(glob_i, glob_j) = local_el(i, j);
683  }
684  }
685  }
686  Utilities::MPI::sum(matrix, grid->mpi_communicator, matrix);
687 
688  // we could move the following lines under the main loop above,
689  // but they would be dependent on glob_i and glob_j, which
690  // won't make it much prettier
691  if (state == LAPACKSupport::cholesky)
692  {
693  if (property == LAPACKSupport::lower_triangular)
694  for (unsigned int i = 0; i < matrix.n(); ++i)
695  for (unsigned int j = i + 1; j < matrix.m(); ++j)
696  matrix(i, j) = 0.;
697  else if (property == LAPACKSupport::upper_triangular)
698  for (unsigned int i = 0; i < matrix.n(); ++i)
699  for (unsigned int j = 0; j < i; ++j)
700  matrix(i, j) = 0.;
701  }
702  else if (property == LAPACKSupport::symmetric &&
704  {
705  if (uplo == 'L')
706  for (unsigned int i = 0; i < matrix.n(); ++i)
707  for (unsigned int j = i + 1; j < matrix.m(); ++j)
708  matrix(i, j) = matrix(j, i);
709  else if (uplo == 'U')
710  for (unsigned int i = 0; i < matrix.n(); ++i)
711  for (unsigned int j = 0; j < i; ++j)
712  matrix(i, j) = matrix(j, i);
713  }
714 }
715 
716 
717 
718 template <typename NumberType>
719 void
722  const std::pair<unsigned int, unsigned int> &offset_A,
723  const std::pair<unsigned int, unsigned int> &offset_B,
724  const std::pair<unsigned int, unsigned int> &submatrix_size) const
725 {
726  // submatrix is empty
727  if (submatrix_size.first == 0 || submatrix_size.second == 0)
728  return;
729 
730  // range checking for matrix A
731  AssertIndexRange(offset_A.first, n_rows - submatrix_size.first + 1);
732  AssertIndexRange(offset_A.second, n_columns - submatrix_size.second + 1);
733 
734  // range checking for matrix B
735  AssertIndexRange(offset_B.first, B.n_rows - submatrix_size.first + 1);
736  AssertIndexRange(offset_B.second, B.n_columns - submatrix_size.second + 1);
737 
738  // Currently, copying of matrices will only be supported if A and B share the
739  // same MPI communicator
740  int ierr, comparison;
741  ierr = MPI_Comm_compare(grid->mpi_communicator,
742  B.grid->mpi_communicator,
743  &comparison);
744  AssertThrowMPI(ierr);
745  Assert(comparison == MPI_IDENT,
746  ExcMessage("Matrix A and B must have a common MPI Communicator"));
747 
748  /*
749  * The routine pgemr2d requires a BLACS context resembling at least the union
750  * of process grids described by the BLACS contexts held by the ProcessGrids
751  * of matrix A and B. As A and B share the same MPI communicator, there is no
752  * need to create a union MPI communicator to initialise the BLACS context
753  */
754  int union_blacs_context = Csys2blacs_handle(this->grid->mpi_communicator);
755  const char *order = "Col";
756  int union_n_process_rows =
757  Utilities::MPI::n_mpi_processes(this->grid->mpi_communicator);
758  int union_n_process_columns = 1;
759  Cblacs_gridinit(&union_blacs_context,
760  order,
761  union_n_process_rows,
762  union_n_process_columns);
763 
764  int n_grid_rows_A, n_grid_columns_A, my_row_A, my_column_A;
765  Cblacs_gridinfo(this->grid->blacs_context,
766  &n_grid_rows_A,
767  &n_grid_columns_A,
768  &my_row_A,
769  &my_column_A);
770 
771  // check whether process is in the BLACS context of matrix A
772  const bool in_context_A =
773  (my_row_A >= 0 && my_row_A < n_grid_rows_A) &&
774  (my_column_A >= 0 && my_column_A < n_grid_columns_A);
775 
776  int n_grid_rows_B, n_grid_columns_B, my_row_B, my_column_B;
777  Cblacs_gridinfo(B.grid->blacs_context,
778  &n_grid_rows_B,
779  &n_grid_columns_B,
780  &my_row_B,
781  &my_column_B);
782 
783  // check whether process is in the BLACS context of matrix B
784  const bool in_context_B =
785  (my_row_B >= 0 && my_row_B < n_grid_rows_B) &&
786  (my_column_B >= 0 && my_column_B < n_grid_columns_B);
787 
788  const int n_rows_submatrix = submatrix_size.first;
789  const int n_columns_submatrix = submatrix_size.second;
790 
791  // due to Fortran indexing one has to be added
792  int ia = offset_A.first + 1, ja = offset_A.second + 1;
793  int ib = offset_B.first + 1, jb = offset_B.second + 1;
794 
795  std::array<int, 9> desc_A, desc_B;
796 
797  const NumberType *loc_vals_A = nullptr;
798  NumberType * loc_vals_B = nullptr;
799 
800  // Note: the function pgemr2d has to be called for all processes in the union
801  // BLACS context If the calling process is not part of the BLACS context of A,
802  // desc_A[1] has to be -1 and all other parameters do not have to be set If
803  // the calling process is not part of the BLACS context of B, desc_B[1] has to
804  // be -1 and all other parameters do not have to be set
805  if (in_context_A)
806  {
807  if (this->values.size() != 0)
808  loc_vals_A = this->values.data();
809 
810  for (unsigned int i = 0; i < desc_A.size(); ++i)
811  desc_A[i] = this->descriptor[i];
812  }
813  else
814  desc_A[1] = -1;
815 
816  if (in_context_B)
817  {
818  if (B.values.size() != 0)
819  loc_vals_B = B.values.data();
820 
821  for (unsigned int i = 0; i < desc_B.size(); ++i)
822  desc_B[i] = B.descriptor[i];
823  }
824  else
825  desc_B[1] = -1;
826 
827  pgemr2d(&n_rows_submatrix,
828  &n_columns_submatrix,
829  loc_vals_A,
830  &ia,
831  &ja,
832  desc_A.data(),
833  loc_vals_B,
834  &ib,
835  &jb,
836  desc_B.data(),
837  &union_blacs_context);
838 
840 
841  // releasing the union BLACS context
842  Cblacs_gridexit(union_blacs_context);
843 }
844 
845 
846 
847 template <typename NumberType>
848 void
850 {
851  Assert(n_rows == dest.n_rows, ExcDimensionMismatch(n_rows, dest.n_rows));
852  Assert(n_columns == dest.n_columns,
853  ExcDimensionMismatch(n_columns, dest.n_columns));
854 
855  if (this->grid->mpi_process_is_active)
856  AssertThrow(
857  this->descriptor[0] == 1,
858  ExcMessage(
859  "Copying of ScaLAPACK matrices only implemented for dense matrices"));
860  if (dest.grid->mpi_process_is_active)
861  AssertThrow(
862  dest.descriptor[0] == 1,
863  ExcMessage(
864  "Copying of ScaLAPACK matrices only implemented for dense matrices"));
865 
866  /*
867  * just in case of different process grids or block-cyclic distributions
868  * inter-process communication is necessary
869  * if distributed matrices have the same process grid and block sizes, local
870  * copying is enough
871  */
872  if ((this->grid != dest.grid) || (row_block_size != dest.row_block_size) ||
873  (column_block_size != dest.column_block_size))
874  {
875  /*
876  * get the MPI communicator, which is the union of the source and
877  * destination MPI communicator
878  */
879  int ierr = 0;
880  MPI_Group group_source, group_dest, group_union;
881  ierr = MPI_Comm_group(this->grid->mpi_communicator, &group_source);
882  AssertThrowMPI(ierr);
883  ierr = MPI_Comm_group(dest.grid->mpi_communicator, &group_dest);
884  AssertThrowMPI(ierr);
885  ierr = MPI_Group_union(group_source, group_dest, &group_union);
886  AssertThrowMPI(ierr);
887  MPI_Comm mpi_communicator_union;
888 
889  // to create a communicator representing the union of the source
890  // and destination MPI communicator we need a communicator that
891  // is guaranteed to contain all desired processes -- i.e.,
892  // MPI_COMM_WORLD. on the other hand, as documented in the MPI
893  // standard, MPI_Comm_create_group is not collective on all
894  // processes in the first argument, but instead is collective on
895  // only those processes listed in the group. in other words,
896  // there is really no harm in passing MPI_COMM_WORLD as the
897  // first argument, even if the program we are currently running
898  // and that is calling this function only works on a subset of
899  // processes. the same holds for the wrapper/fallback we are using here.
900 
902  ierr = Utilities::MPI::create_group(MPI_COMM_WORLD,
903  group_union,
904  mpi_tag,
905  &mpi_communicator_union);
906  AssertThrowMPI(ierr);
907 
908  /*
909  * The routine pgemr2d requires a BLACS context resembling at least the
910  * union of process grids described by the BLACS contexts of matrix A and
911  * B
912  */
913  int union_blacs_context = Csys2blacs_handle(mpi_communicator_union);
914  const char *order = "Col";
915  int union_n_process_rows =
916  Utilities::MPI::n_mpi_processes(mpi_communicator_union);
917  int union_n_process_columns = 1;
918  Cblacs_gridinit(&union_blacs_context,
919  order,
920  union_n_process_rows,
921  union_n_process_columns);
922 
923  const NumberType *loc_vals_source = nullptr;
924  NumberType * loc_vals_dest = nullptr;
925 
926  if (this->grid->mpi_process_is_active && (this->values.size() > 0))
927  {
928  AssertThrow(this->values.size() > 0,
929  ::ExcMessage(
930  "source: process is active but local matrix empty"));
931  loc_vals_source = this->values.data();
932  }
933  if (dest.grid->mpi_process_is_active && (dest.values.size() > 0))
934  {
935  AssertThrow(
936  dest.values.size() > 0,
937  ::ExcMessage(
938  "destination: process is active but local matrix empty"));
939  loc_vals_dest = dest.values.data();
940  }
941  pgemr2d(&n_rows,
942  &n_columns,
943  loc_vals_source,
944  &submatrix_row,
945  &submatrix_column,
946  descriptor,
947  loc_vals_dest,
948  &dest.submatrix_row,
949  &dest.submatrix_column,
950  dest.descriptor,
951  &union_blacs_context);
952 
953  Cblacs_gridexit(union_blacs_context);
954 
955  if (mpi_communicator_union != MPI_COMM_NULL)
956  {
957  ierr = MPI_Comm_free(&mpi_communicator_union);
958  AssertThrowMPI(ierr);
959  }
960  ierr = MPI_Group_free(&group_source);
961  AssertThrowMPI(ierr);
962  ierr = MPI_Group_free(&group_dest);
963  AssertThrowMPI(ierr);
964  ierr = MPI_Group_free(&group_union);
965  AssertThrowMPI(ierr);
966  }
967  else
968  // process is active in the process grid
969  if (this->grid->mpi_process_is_active)
970  dest.values = this->values;
971 
972  dest.state = state;
973  dest.property = property;
974 }
975 
976 
977 
978 template <typename NumberType>
979 void
982 {
983  add(B, 0, 1, true);
984 }
985 
986 
987 
988 template <typename NumberType>
989 void
991  const NumberType alpha,
992  const NumberType beta,
993  const bool transpose_B)
994 {
995  if (transpose_B)
996  {
997  Assert(n_rows == B.n_columns, ExcDimensionMismatch(n_rows, B.n_columns));
998  Assert(n_columns == B.n_rows, ExcDimensionMismatch(n_columns, B.n_rows));
999  Assert(column_block_size == B.row_block_size,
1000  ExcDimensionMismatch(column_block_size, B.row_block_size));
1001  Assert(row_block_size == B.column_block_size,
1002  ExcDimensionMismatch(row_block_size, B.column_block_size));
1003  }
1004  else
1005  {
1006  Assert(n_rows == B.n_rows, ExcDimensionMismatch(n_rows, B.n_rows));
1007  Assert(n_columns == B.n_columns,
1008  ExcDimensionMismatch(n_columns, B.n_columns));
1009  Assert(column_block_size == B.column_block_size,
1010  ExcDimensionMismatch(column_block_size, B.column_block_size));
1011  Assert(row_block_size == B.row_block_size,
1012  ExcDimensionMismatch(row_block_size, B.row_block_size));
1013  }
1014  Assert(this->grid == B.grid,
1015  ExcMessage("The matrices A and B need to have the same process grid"));
1016 
1017  if (this->grid->mpi_process_is_active)
1018  {
1019  char trans_b = transpose_B ? 'T' : 'N';
1020  NumberType *A_loc =
1021  (this->values.size() > 0) ? this->values.data() : nullptr;
1022  const NumberType *B_loc =
1023  (B.values.size() > 0) ? B.values.data() : nullptr;
1024 
1025  pgeadd(&trans_b,
1026  &n_rows,
1027  &n_columns,
1028  &beta,
1029  B_loc,
1030  &B.submatrix_row,
1031  &B.submatrix_column,
1032  B.descriptor,
1033  &alpha,
1034  A_loc,
1035  &submatrix_row,
1036  &submatrix_column,
1037  descriptor);
1038  }
1039  state = LAPACKSupport::matrix;
1040 }
1041 
1042 
1043 
1044 template <typename NumberType>
1045 void
1047  const ScaLAPACKMatrix<NumberType> &B)
1048 {
1049  add(B, 1, a, false);
1050 }
1051 
1052 
1053 
1054 template <typename NumberType>
1055 void
1057  const ScaLAPACKMatrix<NumberType> &B)
1058 {
1059  add(B, 1, a, true);
1060 }
1061 
1062 
1063 
1064 template <typename NumberType>
1065 void
1067  const ScaLAPACKMatrix<NumberType> &B,
1068  const NumberType c,
1070  const bool transpose_A,
1071  const bool transpose_B) const
1072 {
1073  Assert(this->grid == B.grid,
1074  ExcMessage("The matrices A and B need to have the same process grid"));
1075  Assert(C.grid == B.grid,
1076  ExcMessage("The matrices B and C need to have the same process grid"));
1077 
1078  // see for further info:
1079  // https://www.ibm.com/support/knowledgecenter/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_lgemm.htm
1080  if (!transpose_A && !transpose_B)
1081  {
1082  Assert(this->n_columns == B.n_rows,
1083  ExcDimensionMismatch(this->n_columns, B.n_rows));
1084  Assert(this->n_rows == C.n_rows,
1085  ExcDimensionMismatch(this->n_rows, C.n_rows));
1086  Assert(B.n_columns == C.n_columns,
1087  ExcDimensionMismatch(B.n_columns, C.n_columns));
1088  Assert(this->row_block_size == C.row_block_size,
1089  ExcDimensionMismatch(this->row_block_size, C.row_block_size));
1090  Assert(this->column_block_size == B.row_block_size,
1091  ExcDimensionMismatch(this->column_block_size, B.row_block_size));
1092  Assert(B.column_block_size == C.column_block_size,
1093  ExcDimensionMismatch(B.column_block_size, C.column_block_size));
1094  }
1095  else if (transpose_A && !transpose_B)
1096  {
1097  Assert(this->n_rows == B.n_rows,
1098  ExcDimensionMismatch(this->n_rows, B.n_rows));
1099  Assert(this->n_columns == C.n_rows,
1100  ExcDimensionMismatch(this->n_columns, C.n_rows));
1101  Assert(B.n_columns == C.n_columns,
1102  ExcDimensionMismatch(B.n_columns, C.n_columns));
1103  Assert(this->column_block_size == C.row_block_size,
1104  ExcDimensionMismatch(this->column_block_size, C.row_block_size));
1105  Assert(this->row_block_size == B.row_block_size,
1106  ExcDimensionMismatch(this->row_block_size, B.row_block_size));
1107  Assert(B.column_block_size == C.column_block_size,
1108  ExcDimensionMismatch(B.column_block_size, C.column_block_size));
1109  }
1110  else if (!transpose_A && transpose_B)
1111  {
1112  Assert(this->n_columns == B.n_columns,
1113  ExcDimensionMismatch(this->n_columns, B.n_columns));
1114  Assert(this->n_rows == C.n_rows,
1115  ExcDimensionMismatch(this->n_rows, C.n_rows));
1116  Assert(B.n_rows == C.n_columns,
1117  ExcDimensionMismatch(B.n_rows, C.n_columns));
1118  Assert(this->row_block_size == C.row_block_size,
1119  ExcDimensionMismatch(this->row_block_size, C.row_block_size));
1120  Assert(this->column_block_size == B.column_block_size,
1121  ExcDimensionMismatch(this->column_block_size,
1122  B.column_block_size));
1123  Assert(B.row_block_size == C.column_block_size,
1124  ExcDimensionMismatch(B.row_block_size, C.column_block_size));
1125  }
1126  else // if (transpose_A && transpose_B)
1127  {
1128  Assert(this->n_rows == B.n_columns,
1129  ExcDimensionMismatch(this->n_rows, B.n_columns));
1130  Assert(this->n_columns == C.n_rows,
1131  ExcDimensionMismatch(this->n_columns, C.n_rows));
1132  Assert(B.n_rows == C.n_columns,
1133  ExcDimensionMismatch(B.n_rows, C.n_columns));
1134  Assert(this->column_block_size == C.row_block_size,
1135  ExcDimensionMismatch(this->row_block_size, C.row_block_size));
1136  Assert(this->row_block_size == B.column_block_size,
1137  ExcDimensionMismatch(this->column_block_size, B.row_block_size));
1138  Assert(B.row_block_size == C.column_block_size,
1139  ExcDimensionMismatch(B.column_block_size, C.column_block_size));
1140  }
1141 
1142  if (this->grid->mpi_process_is_active)
1143  {
1144  char trans_a = transpose_A ? 'T' : 'N';
1145  char trans_b = transpose_B ? 'T' : 'N';
1146 
1147  const NumberType *A_loc =
1148  (this->values.size() > 0) ? this->values.data() : nullptr;
1149  const NumberType *B_loc =
1150  (B.values.size() > 0) ? B.values.data() : nullptr;
1151  NumberType *C_loc = (C.values.size() > 0) ? C.values.data() : nullptr;
1152  int m = C.n_rows;
1153  int n = C.n_columns;
1154  int k = transpose_A ? this->n_rows : this->n_columns;
1155 
1156  pgemm(&trans_a,
1157  &trans_b,
1158  &m,
1159  &n,
1160  &k,
1161  &b,
1162  A_loc,
1163  &(this->submatrix_row),
1164  &(this->submatrix_column),
1165  this->descriptor,
1166  B_loc,
1167  &B.submatrix_row,
1168  &B.submatrix_column,
1169  B.descriptor,
1170  &c,
1171  C_loc,
1172  &C.submatrix_row,
1173  &C.submatrix_column,
1174  C.descriptor);
1175  }
1176  C.state = LAPACKSupport::matrix;
1177 }
1178 
1179 
1180 
1181 template <typename NumberType>
1182 void
1184  const ScaLAPACKMatrix<NumberType> &B,
1185  const bool adding) const
1186 {
1187  if (adding)
1188  mult(1., B, 1., C, false, false);
1189  else
1190  mult(1., B, 0, C, false, false);
1191 }
1192 
1193 
1194 
1195 template <typename NumberType>
1196 void
1198  const ScaLAPACKMatrix<NumberType> &B,
1199  const bool adding) const
1200 {
1201  if (adding)
1202  mult(1., B, 1., C, true, false);
1203  else
1204  mult(1., B, 0, C, true, false);
1205 }
1206 
1207 
1208 
1209 template <typename NumberType>
1210 void
1212  const ScaLAPACKMatrix<NumberType> &B,
1213  const bool adding) const
1214 {
1215  if (adding)
1216  mult(1., B, 1., C, false, true);
1217  else
1218  mult(1., B, 0, C, false, true);
1219 }
1220 
1221 
1222 
1223 template <typename NumberType>
1224 void
1226  const ScaLAPACKMatrix<NumberType> &B,
1227  const bool adding) const
1228 {
1229  if (adding)
1230  mult(1., B, 1., C, true, true);
1231  else
1232  mult(1., B, 0, C, true, true);
1233 }
1234 
1235 
1236 
1237 template <typename NumberType>
1238 void
1240 {
1241  Assert(
1242  n_columns == n_rows && property == LAPACKSupport::Property::symmetric,
1243  ExcMessage(
1244  "Cholesky factorization can be applied to symmetric matrices only."));
1245  Assert(state == LAPACKSupport::matrix,
1246  ExcMessage(
1247  "Matrix has to be in Matrix state before calling this function."));
1248 
1249  if (grid->mpi_process_is_active)
1250  {
1251  int info = 0;
1252  NumberType *A_loc = this->values.data();
1253  // pdpotrf_(&uplo,&n_columns,A_loc,&submatrix_row,&submatrix_column,descriptor,&info);
1254  ppotrf(&uplo,
1255  &n_columns,
1256  A_loc,
1257  &submatrix_row,
1258  &submatrix_column,
1259  descriptor,
1260  &info);
1261  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("ppotrf", info));
1262  }
1263  state = LAPACKSupport::cholesky;
1264  property = (uplo == 'L' ? LAPACKSupport::lower_triangular :
1266 }
1267 
1268 
1269 
1270 template <typename NumberType>
1271 void
1273 {
1274  Assert(state == LAPACKSupport::matrix,
1275  ExcMessage(
1276  "Matrix has to be in Matrix state before calling this function."));
1277 
1278  if (grid->mpi_process_is_active)
1279  {
1280  int info = 0;
1281  NumberType *A_loc = this->values.data();
1282 
1283  const int iarow = indxg2p_(&submatrix_row,
1284  &row_block_size,
1285  &(grid->this_process_row),
1286  &first_process_row,
1287  &(grid->n_process_rows));
1288  const int mp = numroc_(&n_rows,
1289  &row_block_size,
1290  &(grid->this_process_row),
1291  &iarow,
1292  &(grid->n_process_rows));
1293  ipiv.resize(mp + row_block_size);
1294 
1295  pgetrf(&n_rows,
1296  &n_columns,
1297  A_loc,
1298  &submatrix_row,
1299  &submatrix_column,
1300  descriptor,
1301  ipiv.data(),
1302  &info);
1303  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("pgetrf", info));
1304  }
1305  state = LAPACKSupport::State::lu;
1307 }
1308 
1309 
1310 
1311 template <typename NumberType>
1312 void
1314 {
1315  // Check whether matrix is symmetric and save flag.
1316  // If a Cholesky factorization has been applied previously,
1317  // the original matrix was symmetric.
1318  const bool is_symmetric = (property == LAPACKSupport::symmetric ||
1320 
1321  // Check whether matrix is triangular and is in an unfactorized state.
1322  const bool is_triangular = (property == LAPACKSupport::upper_triangular ||
1323  property == LAPACKSupport::lower_triangular) &&
1324  (state == LAPACKSupport::State::matrix ||
1326 
1327  if (is_triangular)
1328  {
1329  if (grid->mpi_process_is_active)
1330  {
1331  const char uploTriangular =
1332  property == LAPACKSupport::upper_triangular ? 'U' : 'L';
1333  const char diag = 'N';
1334  int info = 0;
1335  NumberType *A_loc = this->values.data();
1336  ptrtri(&uploTriangular,
1337  &diag,
1338  &n_columns,
1339  A_loc,
1340  &submatrix_row,
1341  &submatrix_column,
1342  descriptor,
1343  &info);
1344  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("ptrtri", info));
1345  // The inversion is stored in the same part as the triangular matrix,
1346  // so we don't need to re-set the property here.
1347  }
1348  }
1349  else
1350  {
1351  // Matrix is neither in Cholesky nor LU state.
1352  // Compute the required factorizations based on the property of the
1353  // matrix.
1354  if (!(state == LAPACKSupport::State::lu ||
1356  {
1357  if (is_symmetric)
1358  compute_cholesky_factorization();
1359  else
1360  compute_lu_factorization();
1361  }
1362  if (grid->mpi_process_is_active)
1363  {
1364  int info = 0;
1365  NumberType *A_loc = this->values.data();
1366 
1367  if (is_symmetric)
1368  {
1369  ppotri(&uplo,
1370  &n_columns,
1371  A_loc,
1372  &submatrix_row,
1373  &submatrix_column,
1374  descriptor,
1375  &info);
1376  AssertThrow(info == 0,
1377  LAPACKSupport::ExcErrorCode("ppotri", info));
1379  }
1380  else
1381  {
1382  int lwork = -1, liwork = -1;
1383  work.resize(1);
1384  iwork.resize(1);
1385 
1386  pgetri(&n_columns,
1387  A_loc,
1388  &submatrix_row,
1389  &submatrix_column,
1390  descriptor,
1391  ipiv.data(),
1392  work.data(),
1393  &lwork,
1394  iwork.data(),
1395  &liwork,
1396  &info);
1397 
1398  AssertThrow(info == 0,
1399  LAPACKSupport::ExcErrorCode("pgetri", info));
1400  lwork = static_cast<int>(work[0]);
1401  liwork = iwork[0];
1402  work.resize(lwork);
1403  iwork.resize(liwork);
1404 
1405  pgetri(&n_columns,
1406  A_loc,
1407  &submatrix_row,
1408  &submatrix_column,
1409  descriptor,
1410  ipiv.data(),
1411  work.data(),
1412  &lwork,
1413  iwork.data(),
1414  &liwork,
1415  &info);
1416 
1417  AssertThrow(info == 0,
1418  LAPACKSupport::ExcErrorCode("pgetri", info));
1419  }
1420  }
1421  }
1423 }
1424 
1425 
1426 
1427 template <typename NumberType>
1428 std::vector<NumberType>
1430  const std::pair<unsigned int, unsigned int> &index_limits,
1431  const bool compute_eigenvectors)
1432 {
1433  // check validity of index limits
1434  AssertIndexRange(index_limits.first, n_rows);
1435  AssertIndexRange(index_limits.second, n_rows);
1436 
1437  std::pair<unsigned int, unsigned int> idx =
1438  std::make_pair(std::min(index_limits.first, index_limits.second),
1439  std::max(index_limits.first, index_limits.second));
1440 
1441  // compute all eigenvalues/eigenvectors
1442  if (idx.first == 0 && idx.second == static_cast<unsigned int>(n_rows - 1))
1443  return eigenpairs_symmetric(compute_eigenvectors);
1444  else
1445  return eigenpairs_symmetric(compute_eigenvectors, idx);
1446 }
1447 
1448 
1449 
1450 template <typename NumberType>
1451 std::vector<NumberType>
1453  const std::pair<NumberType, NumberType> &value_limits,
1454  const bool compute_eigenvectors)
1455 {
1456  Assert(!std::isnan(value_limits.first),
1457  ExcMessage("value_limits.first is NaN"));
1458  Assert(!std::isnan(value_limits.second),
1459  ExcMessage("value_limits.second is NaN"));
1460 
1461  std::pair<unsigned int, unsigned int> indices =
1462  std::make_pair(numbers::invalid_unsigned_int,
1464 
1465  return eigenpairs_symmetric(compute_eigenvectors, indices, value_limits);
1466 }
1467 
1468 
1469 
1470 template <typename NumberType>
1471 std::vector<NumberType>
1473  const bool compute_eigenvectors,
1474  const std::pair<unsigned int, unsigned int> &eigenvalue_idx,
1475  const std::pair<NumberType, NumberType> & eigenvalue_limits)
1476 {
1477  Assert(state == LAPACKSupport::matrix,
1478  ExcMessage(
1479  "Matrix has to be in Matrix state before calling this function."));
1480  Assert(property == LAPACKSupport::symmetric,
1481  ExcMessage("Matrix has to be symmetric for this operation."));
1482 
1483  std::lock_guard<std::mutex> lock(mutex);
1484 
1485  const bool use_values = (std::isnan(eigenvalue_limits.first) ||
1486  std::isnan(eigenvalue_limits.second)) ?
1487  false :
1488  true;
1489  const bool use_indices =
1490  ((eigenvalue_idx.first == numbers::invalid_unsigned_int) ||
1491  (eigenvalue_idx.second == numbers::invalid_unsigned_int)) ?
1492  false :
1493  true;
1494 
1495  Assert(
1496  !(use_values && use_indices),
1497  ExcMessage(
1498  "Prescribing both the index and value range for the eigenvalues is ambiguous"));
1499 
1500  // if computation of eigenvectors is not required use a sufficiently small
1501  // distributed matrix
1502  std::unique_ptr<ScaLAPACKMatrix<NumberType>> eigenvectors =
1503  compute_eigenvectors ?
1504  std_cxx14::make_unique<ScaLAPACKMatrix<NumberType>>(n_rows,
1505  grid,
1506  row_block_size) :
1507  std_cxx14::make_unique<ScaLAPACKMatrix<NumberType>>(
1508  grid->n_process_rows, grid->n_process_columns, grid, 1, 1);
1509 
1510  eigenvectors->property = property;
1511  // number of eigenvalues to be returned from psyevx; upon successful exit ev
1512  // contains the m seclected eigenvalues in ascending order set to all
1513  // eigenvaleus in case we will be using psyev.
1514  int m = n_rows;
1515  std::vector<NumberType> ev(n_rows);
1516 
1517  if (grid->mpi_process_is_active)
1518  {
1519  int info = 0;
1520  /*
1521  * for jobz==N only eigenvalues are computed, for jobz='V' also the
1522  * eigenvectors of the matrix are computed
1523  */
1524  char jobz = compute_eigenvectors ? 'V' : 'N';
1525  char range = 'A';
1526  // default value is to compute all eigenvalues and optionally eigenvectors
1527  bool all_eigenpairs = true;
1528  NumberType vl = NumberType(), vu = NumberType();
1529  int il = 1, iu = 1;
1530  // number of eigenvectors to be returned;
1531  // upon successful exit the first m=nz columns contain the selected
1532  // eigenvectors (only if jobz=='V')
1533  int nz = 0;
1534  NumberType abstol = NumberType();
1535 
1536  // orfac decides which eigenvectors should be reorthogonalized
1537  // see
1538  // http://www.netlib.org/scalapack/explore-html/df/d1a/pdsyevx_8f_source.html
1539  // for explanation to keeps simple no reorthogonalized will be done by
1540  // setting orfac to 0
1541  NumberType orfac = 0;
1542  // contains the indices of eigenvectors that failed to converge
1543  std::vector<int> ifail;
1544  // This array contains indices of eigenvectors corresponding to
1545  // a cluster of eigenvalues that could not be reorthogonalized
1546  // due to insufficient workspace
1547  // see
1548  // http://www.netlib.org/scalapack/explore-html/df/d1a/pdsyevx_8f_source.html
1549  // for explanation
1550  std::vector<int> iclustr;
1551  // This array contains the gap between eigenvalues whose
1552  // eigenvectors could not be reorthogonalized.
1553  // see
1554  // http://www.netlib.org/scalapack/explore-html/df/d1a/pdsyevx_8f_source.html
1555  // for explanation
1556  std::vector<NumberType> gap(n_local_rows * n_local_columns);
1557 
1558  // index range for eigenvalues is not specified
1559  if (!use_indices)
1560  {
1561  // interval for eigenvalues is not specified and consequently all
1562  // eigenvalues/eigenpairs will be computed
1563  if (!use_values)
1564  {
1565  range = 'A';
1566  all_eigenpairs = true;
1567  }
1568  else
1569  {
1570  range = 'V';
1571  all_eigenpairs = false;
1572  vl = std::min(eigenvalue_limits.first, eigenvalue_limits.second);
1573  vu = std::max(eigenvalue_limits.first, eigenvalue_limits.second);
1574  }
1575  }
1576  else
1577  {
1578  range = 'I';
1579  all_eigenpairs = false;
1580  // as Fortran starts counting/indexing from 1 unlike C/C++, where it
1581  // starts from 0
1582  il = std::min(eigenvalue_idx.first, eigenvalue_idx.second) + 1;
1583  iu = std::max(eigenvalue_idx.first, eigenvalue_idx.second) + 1;
1584  }
1585  NumberType *A_loc = this->values.data();
1586  /*
1587  * by setting lwork to -1 a workspace query for optimal length of work is
1588  * performed
1589  */
1590  int lwork = -1;
1591  int liwork = -1;
1592  NumberType *eigenvectors_loc =
1593  (compute_eigenvectors ? eigenvectors->values.data() : nullptr);
1594  work.resize(1);
1595  iwork.resize(1);
1596 
1597  if (all_eigenpairs)
1598  {
1599  psyev(&jobz,
1600  &uplo,
1601  &n_rows,
1602  A_loc,
1603  &submatrix_row,
1604  &submatrix_column,
1605  descriptor,
1606  ev.data(),
1607  eigenvectors_loc,
1608  &eigenvectors->submatrix_row,
1609  &eigenvectors->submatrix_column,
1610  eigenvectors->descriptor,
1611  work.data(),
1612  &lwork,
1613  &info);
1614  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("psyev", info));
1615  }
1616  else
1617  {
1618  char cmach = compute_eigenvectors ? 'U' : 'S';
1619  plamch(&(this->grid->blacs_context), &cmach, abstol);
1620  abstol *= 2;
1621  ifail.resize(n_rows);
1622  iclustr.resize(2 * grid->n_process_rows * grid->n_process_columns);
1623  gap.resize(grid->n_process_rows * grid->n_process_columns);
1624 
1625  psyevx(&jobz,
1626  &range,
1627  &uplo,
1628  &n_rows,
1629  A_loc,
1630  &submatrix_row,
1631  &submatrix_column,
1632  descriptor,
1633  &vl,
1634  &vu,
1635  &il,
1636  &iu,
1637  &abstol,
1638  &m,
1639  &nz,
1640  ev.data(),
1641  &orfac,
1642  eigenvectors_loc,
1643  &eigenvectors->submatrix_row,
1644  &eigenvectors->submatrix_column,
1645  eigenvectors->descriptor,
1646  work.data(),
1647  &lwork,
1648  iwork.data(),
1649  &liwork,
1650  ifail.data(),
1651  iclustr.data(),
1652  gap.data(),
1653  &info);
1654  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("psyevx", info));
1655  }
1656  lwork = static_cast<int>(work[0]);
1657  work.resize(lwork);
1658 
1659  if (all_eigenpairs)
1660  {
1661  psyev(&jobz,
1662  &uplo,
1663  &n_rows,
1664  A_loc,
1665  &submatrix_row,
1666  &submatrix_column,
1667  descriptor,
1668  ev.data(),
1669  eigenvectors_loc,
1670  &eigenvectors->submatrix_row,
1671  &eigenvectors->submatrix_column,
1672  eigenvectors->descriptor,
1673  work.data(),
1674  &lwork,
1675  &info);
1676 
1677  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("psyev", info));
1678  }
1679  else
1680  {
1681  liwork = iwork[0];
1682  AssertThrow(liwork > 0, ExcInternalError());
1683  iwork.resize(liwork);
1684 
1685  psyevx(&jobz,
1686  &range,
1687  &uplo,
1688  &n_rows,
1689  A_loc,
1690  &submatrix_row,
1691  &submatrix_column,
1692  descriptor,
1693  &vl,
1694  &vu,
1695  &il,
1696  &iu,
1697  &abstol,
1698  &m,
1699  &nz,
1700  ev.data(),
1701  &orfac,
1702  eigenvectors_loc,
1703  &eigenvectors->submatrix_row,
1704  &eigenvectors->submatrix_column,
1705  eigenvectors->descriptor,
1706  work.data(),
1707  &lwork,
1708  iwork.data(),
1709  &liwork,
1710  ifail.data(),
1711  iclustr.data(),
1712  gap.data(),
1713  &info);
1714 
1715  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("psyevx", info));
1716  }
1717  // if eigenvectors are queried copy eigenvectors to original matrix
1718  // as the temporary matrix eigenvectors has identical dimensions and
1719  // block-cyclic distribution we simply swap the local array
1720  if (compute_eigenvectors)
1721  this->values.swap(eigenvectors->values);
1722 
1723  // adapt the size of ev to fit m upon return
1724  while (ev.size() > static_cast<size_type>(m))
1725  ev.pop_back();
1726  }
1727  /*
1728  * send number of computed eigenvalues to inactive processes
1729  */
1730  grid->send_to_inactive(&m, 1);
1731 
1732  /*
1733  * inactive processes have to resize array of eigenvalues
1734  */
1735  if (!grid->mpi_process_is_active)
1736  ev.resize(m);
1737  /*
1738  * send the eigenvalues to processors not being part of the process grid
1739  */
1740  grid->send_to_inactive(ev.data(), ev.size());
1741 
1742  /*
1743  * if only eigenvalues are queried the content of the matrix will be destroyed
1744  * if the eigenpairs are queried matrix A on exit stores the eigenvectors in
1745  * the columns
1746  */
1747  if (compute_eigenvectors)
1748  {
1751  }
1752  else
1753  state = LAPACKSupport::unusable;
1754 
1755  return ev;
1756 }
1757 
1758 
1759 
1760 template <typename NumberType>
1761 std::vector<NumberType>
1763  const std::pair<unsigned int, unsigned int> &index_limits,
1764  const bool compute_eigenvectors)
1765 {
1766  // Check validity of index limits.
1767  AssertIndexRange(index_limits.first, static_cast<unsigned int>(n_rows));
1768  AssertIndexRange(index_limits.second, static_cast<unsigned int>(n_rows));
1769 
1770  const std::pair<unsigned int, unsigned int> idx =
1771  std::make_pair(std::min(index_limits.first, index_limits.second),
1772  std::max(index_limits.first, index_limits.second));
1773 
1774  // Compute all eigenvalues/eigenvectors.
1775  if (idx.first == 0 && idx.second == static_cast<unsigned int>(n_rows - 1))
1776  return eigenpairs_symmetric_MRRR(compute_eigenvectors);
1777  else
1778  return eigenpairs_symmetric_MRRR(compute_eigenvectors, idx);
1779 }
1780 
1781 
1782 
1783 template <typename NumberType>
1784 std::vector<NumberType>
1786  const std::pair<NumberType, NumberType> &value_limits,
1787  const bool compute_eigenvectors)
1788 {
1789  AssertIsFinite(value_limits.first);
1790  AssertIsFinite(value_limits.second);
1791 
1792  const std::pair<unsigned int, unsigned int> indices =
1793  std::make_pair(numbers::invalid_unsigned_int,
1795 
1796  return eigenpairs_symmetric_MRRR(compute_eigenvectors, indices, value_limits);
1797 }
1798 
1799 
1800 
1801 template <typename NumberType>
1802 std::vector<NumberType>
1804  const bool compute_eigenvectors,
1805  const std::pair<unsigned int, unsigned int> &eigenvalue_idx,
1806  const std::pair<NumberType, NumberType> & eigenvalue_limits)
1807 {
1808  Assert(state == LAPACKSupport::matrix,
1809  ExcMessage(
1810  "Matrix has to be in Matrix state before calling this function."));
1811  Assert(property == LAPACKSupport::symmetric,
1812  ExcMessage("Matrix has to be symmetric for this operation."));
1813 
1814  std::lock_guard<std::mutex> lock(mutex);
1815 
1816  const bool use_values = (std::isnan(eigenvalue_limits.first) ||
1817  std::isnan(eigenvalue_limits.second)) ?
1818  false :
1819  true;
1820  const bool use_indices =
1821  ((eigenvalue_idx.first == numbers::invalid_unsigned_int) ||
1822  (eigenvalue_idx.second == numbers::invalid_unsigned_int)) ?
1823  false :
1824  true;
1825 
1826  Assert(
1827  !(use_values && use_indices),
1828  ExcMessage(
1829  "Prescribing both the index and value range for the eigenvalues is ambiguous"));
1830 
1831  // If computation of eigenvectors is not required, use a sufficiently small
1832  // distributed matrix.
1833  std::unique_ptr<ScaLAPACKMatrix<NumberType>> eigenvectors =
1834  compute_eigenvectors ?
1835  std_cxx14::make_unique<ScaLAPACKMatrix<NumberType>>(n_rows,
1836  grid,
1837  row_block_size) :
1838  std_cxx14::make_unique<ScaLAPACKMatrix<NumberType>>(
1839  grid->n_process_rows, grid->n_process_columns, grid, 1, 1);
1840 
1841  eigenvectors->property = property;
1842  // Number of eigenvalues to be returned from psyevr; upon successful exit ev
1843  // contains the m seclected eigenvalues in ascending order.
1844  int m = n_rows;
1845  std::vector<NumberType> ev(n_rows);
1846 
1847  // Number of eigenvectors to be returned;
1848  // Upon successful exit the first m=nz columns contain the selected
1849  // eigenvectors (only if jobz=='V').
1850  int nz = 0;
1851 
1852  if (grid->mpi_process_is_active)
1853  {
1854  int info = 0;
1855  /*
1856  * For jobz==N only eigenvalues are computed, for jobz='V' also the
1857  * eigenvectors of the matrix are computed.
1858  */
1859  char jobz = compute_eigenvectors ? 'V' : 'N';
1860  // Default value is to compute all eigenvalues and optionally
1861  // eigenvectors.
1862  char range = 'A';
1863  NumberType vl = NumberType(), vu = NumberType();
1864  int il = 1, iu = 1;
1865 
1866  // Index range for eigenvalues is not specified.
1867  if (!use_indices)
1868  {
1869  // Interval for eigenvalues is not specified and consequently all
1870  // eigenvalues/eigenpairs will be computed.
1871  if (!use_values)
1872  {
1873  range = 'A';
1874  }
1875  else
1876  {
1877  range = 'V';
1878  vl = std::min(eigenvalue_limits.first, eigenvalue_limits.second);
1879  vu = std::max(eigenvalue_limits.first, eigenvalue_limits.second);
1880  }
1881  }
1882  else
1883  {
1884  range = 'I';
1885  // As Fortran starts counting/indexing from 1 unlike C/C++, where it
1886  // starts from 0.
1887  il = std::min(eigenvalue_idx.first, eigenvalue_idx.second) + 1;
1888  iu = std::max(eigenvalue_idx.first, eigenvalue_idx.second) + 1;
1889  }
1890  NumberType *A_loc = this->values.data();
1891 
1892  /*
1893  * By setting lwork to -1 a workspace query for optimal length of work is
1894  * performed.
1895  */
1896  int lwork = -1;
1897  int liwork = -1;
1898  NumberType *eigenvectors_loc =
1899  (compute_eigenvectors ? eigenvectors->values.data() : nullptr);
1900  work.resize(1);
1901  iwork.resize(1);
1902 
1903  psyevr(&jobz,
1904  &range,
1905  &uplo,
1906  &n_rows,
1907  A_loc,
1908  &submatrix_row,
1909  &submatrix_column,
1910  descriptor,
1911  &vl,
1912  &vu,
1913  &il,
1914  &iu,
1915  &m,
1916  &nz,
1917  ev.data(),
1918  eigenvectors_loc,
1919  &eigenvectors->submatrix_row,
1920  &eigenvectors->submatrix_column,
1921  eigenvectors->descriptor,
1922  work.data(),
1923  &lwork,
1924  iwork.data(),
1925  &liwork,
1926  &info);
1927 
1928  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("psyevr", info));
1929 
1930  lwork = static_cast<int>(work[0]);
1931  work.resize(lwork);
1932  liwork = iwork[0];
1933  iwork.resize(liwork);
1934 
1935  psyevr(&jobz,
1936  &range,
1937  &uplo,
1938  &n_rows,
1939  A_loc,
1940  &submatrix_row,
1941  &submatrix_column,
1942  descriptor,
1943  &vl,
1944  &vu,
1945  &il,
1946  &iu,
1947  &m,
1948  &nz,
1949  ev.data(),
1950  eigenvectors_loc,
1951  &eigenvectors->submatrix_row,
1952  &eigenvectors->submatrix_column,
1953  eigenvectors->descriptor,
1954  work.data(),
1955  &lwork,
1956  iwork.data(),
1957  &liwork,
1958  &info);
1959 
1960  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("psyevr", info));
1961 
1962  if (compute_eigenvectors)
1963  AssertThrow(
1964  m == nz,
1965  ExcMessage(
1966  "psyevr failed to compute all eigenvectors for the selected eigenvalues"));
1967 
1968  // If eigenvectors are queried, copy eigenvectors to original matrix.
1969  // As the temporary matrix eigenvectors has identical dimensions and
1970  // block-cyclic distribution we simply swap the local array.
1971  if (compute_eigenvectors)
1972  this->values.swap(eigenvectors->values);
1973 
1974  // Adapt the size of ev to fit m upon return.
1975  while (ev.size() > static_cast<size_type>(m))
1976  ev.pop_back();
1977  }
1978  /*
1979  * Send number of computed eigenvalues to inactive processes.
1980  */
1981  grid->send_to_inactive(&m, 1);
1982 
1983  /*
1984  * Inactive processes have to resize array of eigenvalues.
1985  */
1986  if (!grid->mpi_process_is_active)
1987  ev.resize(m);
1988  /*
1989  * Send the eigenvalues to processors not being part of the process grid.
1990  */
1991  grid->send_to_inactive(ev.data(), ev.size());
1992 
1993  /*
1994  * If only eigenvalues are queried, the content of the matrix will be
1995  * destroyed. If the eigenpairs are queried, matrix A on exit stores the
1996  * eigenvectors in the columns.
1997  */
1998  if (compute_eigenvectors)
1999  {
2002  }
2003  else
2004  state = LAPACKSupport::unusable;
2005 
2006  return ev;
2007 }
2008 
2009 
2010 
2011 template <typename NumberType>
2012 std::vector<NumberType>
2015 {
2016  Assert(state == LAPACKSupport::matrix,
2017  ExcMessage(
2018  "Matrix has to be in Matrix state before calling this function."));
2019  Assert(row_block_size == column_block_size,
2020  ExcDimensionMismatch(row_block_size, column_block_size));
2021 
2022  const bool left_singluar_vectors = (U != nullptr) ? true : false;
2023  const bool right_singluar_vectors = (VT != nullptr) ? true : false;
2024 
2025  if (left_singluar_vectors)
2026  {
2027  Assert(n_rows == U->n_rows, ExcDimensionMismatch(n_rows, U->n_rows));
2028  Assert(U->n_rows == U->n_columns,
2029  ExcDimensionMismatch(U->n_rows, U->n_columns));
2030  Assert(row_block_size == U->row_block_size,
2031  ExcDimensionMismatch(row_block_size, U->row_block_size));
2032  Assert(column_block_size == U->column_block_size,
2033  ExcDimensionMismatch(column_block_size, U->column_block_size));
2034  Assert(grid->blacs_context == U->grid->blacs_context,
2035  ExcDimensionMismatch(grid->blacs_context, U->grid->blacs_context));
2036  }
2037  if (right_singluar_vectors)
2038  {
2039  Assert(n_columns == VT->n_rows,
2040  ExcDimensionMismatch(n_columns, VT->n_rows));
2041  Assert(VT->n_rows == VT->n_columns,
2043  Assert(row_block_size == VT->row_block_size,
2044  ExcDimensionMismatch(row_block_size, VT->row_block_size));
2045  Assert(column_block_size == VT->column_block_size,
2046  ExcDimensionMismatch(column_block_size, VT->column_block_size));
2047  Assert(grid->blacs_context == VT->grid->blacs_context,
2048  ExcDimensionMismatch(grid->blacs_context,
2049  VT->grid->blacs_context));
2050  }
2051  std::lock_guard<std::mutex> lock(mutex);
2052 
2053  std::vector<NumberType> sv(std::min(n_rows, n_columns));
2054 
2055  if (grid->mpi_process_is_active)
2056  {
2057  char jobu = left_singluar_vectors ? 'V' : 'N';
2058  char jobvt = right_singluar_vectors ? 'V' : 'N';
2059  NumberType *A_loc = this->values.data();
2060  NumberType *U_loc = left_singluar_vectors ? U->values.data() : nullptr;
2061  NumberType *VT_loc = right_singluar_vectors ? VT->values.data() : nullptr;
2062  int info = 0;
2063  /*
2064  * by setting lwork to -1 a workspace query for optimal length of work is
2065  * performed
2066  */
2067  int lwork = -1;
2068  work.resize(1);
2069 
2070  pgesvd(&jobu,
2071  &jobvt,
2072  &n_rows,
2073  &n_columns,
2074  A_loc,
2075  &submatrix_row,
2076  &submatrix_column,
2077  descriptor,
2078  &*sv.begin(),
2079  U_loc,
2080  &U->submatrix_row,
2081  &U->submatrix_column,
2082  U->descriptor,
2083  VT_loc,
2084  &VT->submatrix_row,
2085  &VT->submatrix_column,
2086  VT->descriptor,
2087  work.data(),
2088  &lwork,
2089  &info);
2090  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("pgesvd", info));
2091 
2092  lwork = static_cast<int>(work[0]);
2093  work.resize(lwork);
2094 
2095  pgesvd(&jobu,
2096  &jobvt,
2097  &n_rows,
2098  &n_columns,
2099  A_loc,
2100  &submatrix_row,
2101  &submatrix_column,
2102  descriptor,
2103  &*sv.begin(),
2104  U_loc,
2105  &U->submatrix_row,
2106  &U->submatrix_column,
2107  U->descriptor,
2108  VT_loc,
2109  &VT->submatrix_row,
2110  &VT->submatrix_column,
2111  VT->descriptor,
2112  work.data(),
2113  &lwork,
2114  &info);
2115  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("pgesvd", info));
2116  }
2117 
2118  /*
2119  * send the singular values to processors not being part of the process grid
2120  */
2121  grid->send_to_inactive(sv.data(), sv.size());
2122 
2125 
2126  return sv;
2127 }
2128 
2129 
2130 
2131 template <typename NumberType>
2132 void
2134  const bool transpose)
2135 {
2136  Assert(grid == B.grid,
2137  ExcMessage("The matrices A and B need to have the same process grid"));
2138  Assert(state == LAPACKSupport::matrix,
2139  ExcMessage(
2140  "Matrix has to be in Matrix state before calling this function."));
2142  ExcMessage(
2143  "Matrix B has to be in Matrix state before calling this function."));
2144 
2145  if (transpose)
2146  {
2147  Assert(n_columns == B.n_rows, ExcDimensionMismatch(n_columns, B.n_rows));
2148  }
2149  else
2150  {
2151  Assert(n_rows == B.n_rows, ExcDimensionMismatch(n_rows, B.n_rows));
2152  }
2153 
2154  // see
2155  // https://www.ibm.com/support/knowledgecenter/en/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_lgels.htm
2156  Assert(row_block_size == column_block_size,
2157  ExcMessage(
2158  "Use identical block sizes for rows and columns of matrix A"));
2160  ExcMessage(
2161  "Use identical block sizes for rows and columns of matrix B"));
2162  Assert(row_block_size == B.row_block_size,
2163  ExcMessage(
2164  "Use identical block-cyclic distribution for matrices A and B"));
2165 
2166  std::lock_guard<std::mutex> lock(mutex);
2167 
2168  if (grid->mpi_process_is_active)
2169  {
2170  char trans = transpose ? 'T' : 'N';
2171  NumberType *A_loc = this->values.data();
2172  NumberType *B_loc = B.values.data();
2173  int info = 0;
2174  /*
2175  * by setting lwork to -1 a workspace query for optimal length of work is
2176  * performed
2177  */
2178  int lwork = -1;
2179  work.resize(1);
2180 
2181  pgels(&trans,
2182  &n_rows,
2183  &n_columns,
2184  &B.n_columns,
2185  A_loc,
2186  &submatrix_row,
2187  &submatrix_column,
2188  descriptor,
2189  B_loc,
2190  &B.submatrix_row,
2191  &B.submatrix_column,
2192  B.descriptor,
2193  work.data(),
2194  &lwork,
2195  &info);
2196  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("pgels", info));
2197 
2198  lwork = static_cast<int>(work[0]);
2199  work.resize(lwork);
2200 
2201  pgels(&trans,
2202  &n_rows,
2203  &n_columns,
2204  &B.n_columns,
2205  A_loc,
2206  &submatrix_row,
2207  &submatrix_column,
2208  descriptor,
2209  B_loc,
2210  &B.submatrix_row,
2211  &B.submatrix_column,
2212  B.descriptor,
2213  work.data(),
2214  &lwork,
2215  &info);
2216  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("pgels", info));
2217  }
2219 }
2220 
2221 
2222 
2223 template <typename NumberType>
2224 unsigned int
2226 {
2227  Assert(state == LAPACKSupport::matrix,
2228  ExcMessage(
2229  "Matrix has to be in Matrix state before calling this function."));
2230  Assert(row_block_size == column_block_size,
2231  ExcMessage(
2232  "Use identical block sizes for rows and columns of matrix A"));
2233  Assert(
2234  ratio > 0. && ratio < 1.,
2235  ExcMessage(
2236  "input parameter ratio has to be larger than zero and smaller than 1"));
2237 
2239  n_rows,
2240  grid,
2241  row_block_size,
2242  row_block_size,
2244  ScaLAPACKMatrix<NumberType> VT(n_columns,
2245  n_columns,
2246  grid,
2247  row_block_size,
2248  row_block_size,
2250  std::vector<NumberType> sv = this->compute_SVD(&U, &VT);
2252  ExcMessage("Matrix has rank 0"));
2253 
2254  // Get number of singular values fulfilling the following: sv[i] > sv[0] *
2255  // ratio Obviously, 0-th element already satisfies sv[0] > sv[0] * ratio The
2256  // singular values in sv are ordered by descending value so we break out of
2257  // the loop if a singular value is smaller than sv[0] * ratio.
2258  unsigned int n_sv = 1;
2259  std::vector<NumberType> inv_sigma;
2260  inv_sigma.push_back(1 / sv[0]);
2261 
2262  for (unsigned int i = 1; i < sv.size(); ++i)
2263  if (sv[i] > sv[0] * ratio)
2264  {
2265  ++n_sv;
2266  inv_sigma.push_back(1 / sv[i]);
2267  }
2268  else
2269  break;
2270 
2271  // For the matrix multiplication we use only the columns of U and rows of VT
2272  // which are associated with singular values larger than the limit. That saves
2273  // computational time for matrices with rank significantly smaller than
2274  // min(n_rows,n_columns)
2275  ScaLAPACKMatrix<NumberType> U_R(n_rows,
2276  n_sv,
2277  grid,
2278  row_block_size,
2279  row_block_size,
2281  ScaLAPACKMatrix<NumberType> VT_R(n_sv,
2282  n_columns,
2283  grid,
2284  row_block_size,
2285  row_block_size,
2287  U.copy_to(U_R,
2288  std::make_pair(0, 0),
2289  std::make_pair(0, 0),
2290  std::make_pair(n_rows, n_sv));
2291  VT.copy_to(VT_R,
2292  std::make_pair(0, 0),
2293  std::make_pair(0, 0),
2294  std::make_pair(n_sv, n_columns));
2295 
2296  VT_R.scale_rows(inv_sigma);
2297  this->reinit(n_columns,
2298  n_rows,
2299  this->grid,
2300  row_block_size,
2301  column_block_size,
2303  VT_R.mult(1, U_R, 0, *this, true, true);
2305  return n_sv;
2306 }
2307 
2308 
2309 
2310 template <typename NumberType>
2311 NumberType
2313  const NumberType a_norm) const
2314 {
2316  ExcMessage(
2317  "Matrix has to be in Cholesky state before calling this function."));
2318  std::lock_guard<std::mutex> lock(mutex);
2319  NumberType rcond = 0.;
2320 
2321  if (grid->mpi_process_is_active)
2322  {
2323  int liwork = n_local_rows;
2324  iwork.resize(liwork);
2325 
2326  int info = 0;
2327  const NumberType *A_loc = this->values.data();
2328 
2329  // by setting lwork to -1 a workspace query for optimal length of work is
2330  // performed
2331  int lwork = -1;
2332  work.resize(1);
2333  ppocon(&uplo,
2334  &n_columns,
2335  A_loc,
2336  &submatrix_row,
2337  &submatrix_column,
2338  descriptor,
2339  &a_norm,
2340  &rcond,
2341  work.data(),
2342  &lwork,
2343  iwork.data(),
2344  &liwork,
2345  &info);
2346  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("pdpocon", info));
2347  lwork = static_cast<int>(std::ceil(work[0]));
2348  work.resize(lwork);
2349 
2350  // now the actual run:
2351  ppocon(&uplo,
2352  &n_columns,
2353  A_loc,
2354  &submatrix_row,
2355  &submatrix_column,
2356  descriptor,
2357  &a_norm,
2358  &rcond,
2359  work.data(),
2360  &lwork,
2361  iwork.data(),
2362  &liwork,
2363  &info);
2364  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("pdpocon", info));
2365  }
2366  grid->send_to_inactive(&rcond);
2367  return rcond;
2368 }
2369 
2370 
2371 
2372 template <typename NumberType>
2373 NumberType
2375 {
2376  const char type('O');
2377 
2378  if (property == LAPACKSupport::symmetric)
2379  return norm_symmetric(type);
2380  else
2381  return norm_general(type);
2382 }
2383 
2384 
2385 
2386 template <typename NumberType>
2387 NumberType
2389 {
2390  const char type('I');
2391 
2392  if (property == LAPACKSupport::symmetric)
2393  return norm_symmetric(type);
2394  else
2395  return norm_general(type);
2396 }
2397 
2398 
2399 
2400 template <typename NumberType>
2401 NumberType
2403 {
2404  const char type('F');
2405 
2406  if (property == LAPACKSupport::symmetric)
2407  return norm_symmetric(type);
2408  else
2409  return norm_general(type);
2410 }
2411 
2412 
2413 
2414 template <typename NumberType>
2415 NumberType
2417 {
2418  Assert(state == LAPACKSupport::matrix ||
2420  ExcMessage("norms can be called in matrix state only."));
2421  std::lock_guard<std::mutex> lock(mutex);
2422  NumberType res = 0.;
2423 
2424  if (grid->mpi_process_is_active)
2425  {
2426  const int iarow = indxg2p_(&submatrix_row,
2427  &row_block_size,
2428  &(grid->this_process_row),
2429  &first_process_row,
2430  &(grid->n_process_rows));
2431  const int iacol = indxg2p_(&submatrix_column,
2432  &column_block_size,
2433  &(grid->this_process_column),
2434  &first_process_column,
2435  &(grid->n_process_columns));
2436  const int mp0 = numroc_(&n_rows,
2437  &row_block_size,
2438  &(grid->this_process_row),
2439  &iarow,
2440  &(grid->n_process_rows));
2441  const int nq0 = numroc_(&n_columns,
2442  &column_block_size,
2443  &(grid->this_process_column),
2444  &iacol,
2445  &(grid->n_process_columns));
2446 
2447  // type='M': compute largest absolute value
2448  // type='F' || type='E': compute Frobenius norm
2449  // type='0' || type='1': compute infinity norm
2450  int lwork = 0; // for type == 'M' || type == 'F' || type == 'E'
2451  if (type == 'O' || type == '1')
2452  lwork = nq0;
2453  else if (type == 'I')
2454  lwork = mp0;
2455 
2456  work.resize(lwork);
2457  const NumberType *A_loc = this->values.begin();
2458  res = plange(&type,
2459  &n_rows,
2460  &n_columns,
2461  A_loc,
2462  &submatrix_row,
2463  &submatrix_column,
2464  descriptor,
2465  work.data());
2466  }
2467  grid->send_to_inactive(&res);
2468  return res;
2469 }
2470 
2471 
2472 
2473 template <typename NumberType>
2474 NumberType
2476 {
2477  Assert(state == LAPACKSupport::matrix ||
2479  ExcMessage("norms can be called in matrix state only."));
2480  Assert(property == LAPACKSupport::symmetric,
2481  ExcMessage("Matrix has to be symmetric for this operation."));
2482  std::lock_guard<std::mutex> lock(mutex);
2483  NumberType res = 0.;
2484 
2485  if (grid->mpi_process_is_active)
2486  {
2487  // int IROFFA = MOD( IA-1, MB_A )
2488  // int ICOFFA = MOD( JA-1, NB_A )
2489  const int lcm =
2490  ilcm_(&(grid->n_process_rows), &(grid->n_process_columns));
2491  const int v2 = lcm / (grid->n_process_rows);
2492 
2493  const int IAROW = indxg2p_(&submatrix_row,
2494  &row_block_size,
2495  &(grid->this_process_row),
2496  &first_process_row,
2497  &(grid->n_process_rows));
2498  const int IACOL = indxg2p_(&submatrix_column,
2499  &column_block_size,
2500  &(grid->this_process_column),
2501  &first_process_column,
2502  &(grid->n_process_columns));
2503  const int Np0 = numroc_(&n_columns /*+IROFFA*/,
2504  &row_block_size,
2505  &(grid->this_process_row),
2506  &IAROW,
2507  &(grid->n_process_rows));
2508  const int Nq0 = numroc_(&n_columns /*+ICOFFA*/,
2509  &column_block_size,
2510  &(grid->this_process_column),
2511  &IACOL,
2512  &(grid->n_process_columns));
2513 
2514  const int v1 = iceil_(&Np0, &row_block_size);
2515  const int ldw = (n_local_rows == n_local_columns) ?
2516  0 :
2517  row_block_size * iceil_(&v1, &v2);
2518 
2519  const int lwork =
2520  (type == 'M' || type == 'F' || type == 'E') ? 0 : 2 * Nq0 + Np0 + ldw;
2521  work.resize(lwork);
2522  const NumberType *A_loc = this->values.begin();
2523  res = plansy(&type,
2524  &uplo,
2525  &n_columns,
2526  A_loc,
2527  &submatrix_row,
2528  &submatrix_column,
2529  descriptor,
2530  work.data());
2531  }
2532  grid->send_to_inactive(&res);
2533  return res;
2534 }
2535 
2536 
2537 
2538 # ifdef DEAL_II_WITH_HDF5
2539 namespace internal
2540 {
2541  namespace
2542  {
2543  void
2544  create_HDF5_state_enum_id(hid_t &state_enum_id)
2545  {
2546  // create HDF5 enum type for LAPACKSupport::State
2548  state_enum_id = H5Tcreate(H5T_ENUM, sizeof(LAPACKSupport::State));
2550  herr_t status = H5Tenum_insert(state_enum_id, "cholesky", &val);
2551  AssertThrow(status >= 0, ExcInternalError());
2553  status = H5Tenum_insert(state_enum_id, "eigenvalues", &val);
2554  AssertThrow(status >= 0, ExcInternalError());
2556  status = H5Tenum_insert(state_enum_id, "inverse_matrix", &val);
2557  AssertThrow(status >= 0, ExcInternalError());
2559  status = H5Tenum_insert(state_enum_id, "inverse_svd", &val);
2560  AssertThrow(status >= 0, ExcInternalError());
2562  status = H5Tenum_insert(state_enum_id, "lu", &val);
2563  AssertThrow(status >= 0, ExcInternalError());
2565  status = H5Tenum_insert(state_enum_id, "matrix", &val);
2566  AssertThrow(status >= 0, ExcInternalError());
2568  status = H5Tenum_insert(state_enum_id, "svd", &val);
2569  AssertThrow(status >= 0, ExcInternalError());
2571  status = H5Tenum_insert(state_enum_id, "unusable", &val);
2572  AssertThrow(status >= 0, ExcInternalError());
2573  }
2574 
2575  void
2576  create_HDF5_property_enum_id(hid_t &property_enum_id)
2577  {
2578  // create HDF5 enum type for LAPACKSupport::Property
2579  property_enum_id = H5Tcreate(H5T_ENUM, sizeof(LAPACKSupport::Property));
2581  herr_t status = H5Tenum_insert(property_enum_id, "diagonal", &prop);
2582  AssertThrow(status >= 0, ExcInternalError());
2584  status = H5Tenum_insert(property_enum_id, "general", &prop);
2585  AssertThrow(status >= 0, ExcInternalError());
2587  status = H5Tenum_insert(property_enum_id, "hessenberg", &prop);
2588  AssertThrow(status >= 0, ExcInternalError());
2590  status = H5Tenum_insert(property_enum_id, "lower_triangular", &prop);
2591  AssertThrow(status >= 0, ExcInternalError());
2593  status = H5Tenum_insert(property_enum_id, "symmetric", &prop);
2594  AssertThrow(status >= 0, ExcInternalError());
2596  status = H5Tenum_insert(property_enum_id, "upper_triangular", &prop);
2597  AssertThrow(status >= 0, ExcInternalError());
2598  }
2599  } // namespace
2600 } // namespace internal
2601 # endif
2602 
2603 
2604 
2605 template <typename NumberType>
2606 void
2608  const std::string & filename,
2609  const std::pair<unsigned int, unsigned int> &chunk_size) const
2610 {
2611 # ifndef DEAL_II_WITH_HDF5
2612  (void)filename;
2613  (void)chunk_size;
2614  AssertThrow(false, ExcMessage("HDF5 support is disabled."));
2615 # else
2616 
2617  std::pair<unsigned int, unsigned int> chunks_size_ = chunk_size;
2618 
2619  if (chunks_size_.first == numbers::invalid_unsigned_int ||
2620  chunks_size_.second == numbers::invalid_unsigned_int)
2621  {
2622  // default: store the matrix in chunks of columns
2623  chunks_size_.first = n_rows;
2624  chunks_size_.second = 1;
2625  }
2626  Assert(chunks_size_.first > 0,
2627  ExcMessage("The row chunk size must be larger than 0."));
2628  AssertIndexRange(chunks_size_.first, n_rows + 1);
2629  Assert(chunks_size_.second > 0,
2630  ExcMessage("The column chunk size must be larger than 0."));
2631  AssertIndexRange(chunks_size_.second, n_columns + 1);
2632 
2633 # ifdef H5_HAVE_PARALLEL
2634  // implementation for configurations equipped with a parallel file system
2635  save_parallel(filename, chunks_size_);
2636 
2637 # else
2638  // implementation for configurations with no parallel file system
2639  save_serial(filename, chunks_size_);
2640 
2641 # endif
2642 # endif
2643 }
2644 
2645 
2646 
2647 template <typename NumberType>
2648 void
2650  const std::string & filename,
2651  const std::pair<unsigned int, unsigned int> &chunk_size) const
2652 {
2653 # ifndef DEAL_II_WITH_HDF5
2654  (void)filename;
2655  (void)chunk_size;
2656  Assert(false, ExcInternalError());
2657 # else
2658 
2659  /*
2660  * The content of the distributed matrix is copied to a matrix using a 1x1
2661  * process grid. Therefore, one process has all the data and can write it to a
2662  * file.
2663  *
2664  * Create a 1x1 column grid which will be used to initialize
2665  * an effectively serial ScaLAPACK matrix to gather the contents from the
2666  * current object
2667  */
2668  const auto column_grid =
2669  std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,
2670  1,
2671  1);
2672 
2673  const int MB = n_rows, NB = n_columns;
2674  ScaLAPACKMatrix<NumberType> tmp(n_rows, n_columns, column_grid, MB, NB);
2675  copy_to(tmp);
2676 
2677  // the 1x1 grid has only one process and this one writes
2678  // the content of the matrix to the HDF5 file
2679  if (tmp.grid->mpi_process_is_active)
2680  {
2681  herr_t status;
2682 
2683  // create a new file using default properties
2684  hid_t file_id =
2685  H5Fcreate(filename.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
2686 
2687  // modify dataset creation properties, i.e. enable chunking
2688  hsize_t chunk_dims[2];
2689  // revert order of rows and columns as ScaLAPACK uses column-major
2690  // ordering
2691  chunk_dims[0] = chunk_size.second;
2692  chunk_dims[1] = chunk_size.first;
2693  hid_t data_property = H5Pcreate(H5P_DATASET_CREATE);
2694  status = H5Pset_chunk(data_property, 2, chunk_dims);
2695  AssertThrow(status >= 0, ExcIO());
2696 
2697  // create the data space for the dataset
2698  hsize_t dims[2];
2699  // change order of rows and columns as ScaLAPACKMatrix uses column major
2700  // ordering
2701  dims[0] = n_columns;
2702  dims[1] = n_rows;
2703  hid_t dataspace_id = H5Screate_simple(2, dims, nullptr);
2704 
2705  // create the dataset within the file using chunk creation properties
2706  hid_t type_id = hdf5_type_id(tmp.values.data());
2707  hid_t dataset_id = H5Dcreate2(file_id,
2708  "/matrix",
2709  type_id,
2710  dataspace_id,
2711  H5P_DEFAULT,
2712  data_property,
2713  H5P_DEFAULT);
2714 
2715  // write the dataset
2716  status = H5Dwrite(
2717  dataset_id, type_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, tmp.values.data());
2718  AssertThrow(status >= 0, ExcIO());
2719 
2720  // create HDF5 enum type for LAPACKSupport::State and
2721  // LAPACKSupport::Property
2722  hid_t state_enum_id, property_enum_id;
2723  internal::create_HDF5_state_enum_id(state_enum_id);
2724  internal::create_HDF5_property_enum_id(property_enum_id);
2725 
2726  // create the data space for the state enum
2727  hsize_t dims_state[1];
2728  dims_state[0] = 1;
2729  hid_t state_enum_dataspace = H5Screate_simple(1, dims_state, nullptr);
2730  // create the dataset for the state enum
2731  hid_t state_enum_dataset = H5Dcreate2(file_id,
2732  "/state",
2733  state_enum_id,
2734  state_enum_dataspace,
2735  H5P_DEFAULT,
2736  H5P_DEFAULT,
2737  H5P_DEFAULT);
2738  // write the dataset for the state enum
2739  status = H5Dwrite(state_enum_dataset,
2740  state_enum_id,
2741  H5S_ALL,
2742  H5S_ALL,
2743  H5P_DEFAULT,
2744  &state);
2745  AssertThrow(status >= 0, ExcIO());
2746 
2747  // create the data space for the property enum
2748  hsize_t dims_property[1];
2749  dims_property[0] = 1;
2750  hid_t property_enum_dataspace =
2751  H5Screate_simple(1, dims_property, nullptr);
2752  // create the dataset for the property enum
2753  hid_t property_enum_dataset = H5Dcreate2(file_id,
2754  "/property",
2755  property_enum_id,
2756  property_enum_dataspace,
2757  H5P_DEFAULT,
2758  H5P_DEFAULT,
2759  H5P_DEFAULT);
2760  // write the dataset for the property enum
2761  status = H5Dwrite(property_enum_dataset,
2762  property_enum_id,
2763  H5S_ALL,
2764  H5S_ALL,
2765  H5P_DEFAULT,
2766  &property);
2767  AssertThrow(status >= 0, ExcIO());
2768 
2769  // end access to the datasets and release resources used by them
2770  status = H5Dclose(dataset_id);
2771  AssertThrow(status >= 0, ExcIO());
2772  status = H5Dclose(state_enum_dataset);
2773  AssertThrow(status >= 0, ExcIO());
2774  status = H5Dclose(property_enum_dataset);
2775  AssertThrow(status >= 0, ExcIO());
2776 
2777  // terminate access to the data spaces
2778  status = H5Sclose(dataspace_id);
2779  AssertThrow(status >= 0, ExcIO());
2780  status = H5Sclose(state_enum_dataspace);
2781  AssertThrow(status >= 0, ExcIO());
2782  status = H5Sclose(property_enum_dataspace);
2783  AssertThrow(status >= 0, ExcIO());
2784 
2785  // release enum data types
2786  status = H5Tclose(state_enum_id);
2787  AssertThrow(status >= 0, ExcIO());
2788  status = H5Tclose(property_enum_id);
2789  AssertThrow(status >= 0, ExcIO());
2790 
2791  // release the creation property
2792  status = H5Pclose(data_property);
2793  AssertThrow(status >= 0, ExcIO());
2794 
2795  // close the file.
2796  status = H5Fclose(file_id);
2797  AssertThrow(status >= 0, ExcIO());
2798  }
2799 # endif
2800 }
2801 
2802 
2803 
2804 template <typename NumberType>
2805 void
2807  const std::string & filename,
2808  const std::pair<unsigned int, unsigned int> &chunk_size) const
2809 {
2810 # ifndef DEAL_II_WITH_HDF5
2811  (void)filename;
2812  (void)chunk_size;
2813  Assert(false, ExcInternalError());
2814 # else
2815 
2816  const unsigned int n_mpi_processes(
2817  Utilities::MPI::n_mpi_processes(this->grid->mpi_communicator));
2818  MPI_Info info = MPI_INFO_NULL;
2819  /*
2820  * The content of the distributed matrix is copied to a matrix using a
2821  * 1xn_processes process grid. Therefore, the processes hold contiguous chunks
2822  * of the matrix, which they can write to the file
2823  *
2824  * Create a 1xn_processes column grid
2825  */
2826  const auto column_grid =
2827  std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,
2828  1,
2829  n_mpi_processes);
2830 
2831  const int MB = n_rows;
2832  /*
2833  * If the ratio n_columns/n_mpi_processes is smaller than the column block
2834  * size of the original matrix, the redistribution and saving of the matrix
2835  * requires a significant amount of MPI communication. Therefore, it is better
2836  * to set a minimum value for the block size NB, causing only
2837  * ceil(n_columns/NB) processes being actively involved in saving the matrix.
2838  * Example: A 2*10^9 x 400 matrix is distributed on a 80 x 5 process grid
2839  * using block size 32. Instead of distributing the matrix on a 1 x 400
2840  * process grid with a row block size of 2*10^9 and a column block size of 1,
2841  * the minimum value for NB yields that only ceil(400/32)=13 processes will be
2842  * writing the matrix to disk.
2843  */
2844  const int NB = std::max(static_cast<int>(std::ceil(
2845  static_cast<double>(n_columns) / n_mpi_processes)),
2846  column_block_size);
2847 
2848  ScaLAPACKMatrix<NumberType> tmp(n_rows, n_columns, column_grid, MB, NB);
2849  copy_to(tmp);
2850 
2851  // get pointer to data held by the process
2852  NumberType *data = (tmp.values.size() > 0) ? tmp.values.data() : nullptr;
2853 
2854  herr_t status;
2855  // dataset dimensions
2856  hsize_t dims[2];
2857 
2858  // set up file access property list with parallel I/O access
2859  hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
2860  status = H5Pset_fapl_mpio(plist_id, tmp.grid->mpi_communicator, info);
2861  AssertThrow(status >= 0, ExcIO());
2862 
2863  // create a new file collectively and release property list identifier
2864  hid_t file_id =
2865  H5Fcreate(filename.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, plist_id);
2866  status = H5Pclose(plist_id);
2867  AssertThrow(status >= 0, ExcIO());
2868 
2869  // As ScaLAPACK, and therefore the class ScaLAPACKMatrix, uses column-major
2870  // ordering but HDF5 row-major ordering, we have to reverse entries related to
2871  // columns and rows in the following. create the dataspace for the dataset
2872  dims[0] = tmp.n_columns;
2873  dims[1] = tmp.n_rows;
2874 
2875  hid_t filespace = H5Screate_simple(2, dims, nullptr);
2876 
2877  // create the chunked dataset with default properties and close filespace
2878  hsize_t chunk_dims[2];
2879  // revert order of rows and columns as ScaLAPACK uses column-major ordering
2880  chunk_dims[0] = chunk_size.second;
2881  chunk_dims[1] = chunk_size.first;
2882  plist_id = H5Pcreate(H5P_DATASET_CREATE);
2883  H5Pset_chunk(plist_id, 2, chunk_dims);
2884  hid_t type_id = hdf5_type_id(data);
2885  hid_t dset_id = H5Dcreate2(
2886  file_id, "/matrix", type_id, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
2887 
2888  status = H5Sclose(filespace);
2889  AssertThrow(status >= 0, ExcIO());
2890 
2891  status = H5Pclose(plist_id);
2892  AssertThrow(status >= 0, ExcIO());
2893 
2894  // gather the number of local rows and columns from all processes
2895  std::vector<int> proc_n_local_rows(n_mpi_processes),
2896  proc_n_local_columns(n_mpi_processes);
2897  int ierr = MPI_Allgather(&tmp.n_local_rows,
2898  1,
2899  MPI_INT,
2900  proc_n_local_rows.data(),
2901  1,
2902  MPI_INT,
2903  tmp.grid->mpi_communicator);
2904  AssertThrowMPI(ierr);
2905  ierr = MPI_Allgather(&tmp.n_local_columns,
2906  1,
2907  MPI_INT,
2908  proc_n_local_columns.data(),
2909  1,
2910  MPI_INT,
2911  tmp.grid->mpi_communicator);
2912  AssertThrowMPI(ierr);
2913 
2914  const unsigned int my_rank(
2915  Utilities::MPI::this_mpi_process(tmp.grid->mpi_communicator));
2916 
2917  // hyperslab selection parameters
2918  // each process defines dataset in memory and writes it to the hyperslab in
2919  // the file
2920  hsize_t count[2];
2921  count[0] = tmp.n_local_columns;
2922  count[1] = tmp.n_rows;
2923  hid_t memspace = H5Screate_simple(2, count, nullptr);
2924 
2925  hsize_t offset[2] = {0};
2926  for (unsigned int i = 0; i < my_rank; ++i)
2927  offset[0] += proc_n_local_columns[i];
2928 
2929  // select hyperslab in the file.
2930  filespace = H5Dget_space(dset_id);
2931  status = H5Sselect_hyperslab(
2932  filespace, H5S_SELECT_SET, offset, nullptr, count, nullptr);
2933  AssertThrow(status >= 0, ExcIO());
2934 
2935  // create property list for independent dataset write
2936  plist_id = H5Pcreate(H5P_DATASET_XFER);
2937  status = H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_INDEPENDENT);
2938  AssertThrow(status >= 0, ExcIO());
2939 
2940  // process with no data will not participate in writing to the file
2941  if (tmp.values.size() > 0)
2942  {
2943  status = H5Dwrite(dset_id, type_id, memspace, filespace, plist_id, data);
2944  AssertThrow(status >= 0, ExcIO());
2945  }
2946  // close/release sources
2947  status = H5Dclose(dset_id);
2948  AssertThrow(status >= 0, ExcIO());
2949  status = H5Sclose(filespace);
2950  AssertThrow(status >= 0, ExcIO());
2951  status = H5Sclose(memspace);
2952  AssertThrow(status >= 0, ExcIO());
2953  status = H5Pclose(plist_id);
2954  AssertThrow(status >= 0, ExcIO());
2955  status = H5Fclose(file_id);
2956  AssertThrow(status >= 0, ExcIO());
2957 
2958  // before writing the state and property to file wait for
2959  // all processes to finish writing the matrix content to the file
2960  ierr = MPI_Barrier(tmp.grid->mpi_communicator);
2961  AssertThrowMPI(ierr);
2962 
2963  // only root process will write state and property to the file
2964  if (tmp.grid->this_mpi_process == 0)
2965  {
2966  // open file using default properties
2967  hid_t file_id_reopen =
2968  H5Fopen(filename.c_str(), H5F_ACC_RDWR, H5P_DEFAULT);
2969 
2970  // create HDF5 enum type for LAPACKSupport::State and
2971  // LAPACKSupport::Property
2972  hid_t state_enum_id, property_enum_id;
2973  internal::create_HDF5_state_enum_id(state_enum_id);
2974  internal::create_HDF5_property_enum_id(property_enum_id);
2975 
2976  // create the data space for the state enum
2977  hsize_t dims_state[1];
2978  dims_state[0] = 1;
2979  hid_t state_enum_dataspace = H5Screate_simple(1, dims_state, nullptr);
2980  // create the dataset for the state enum
2981  hid_t state_enum_dataset = H5Dcreate2(file_id_reopen,
2982  "/state",
2983  state_enum_id,
2984  state_enum_dataspace,
2985  H5P_DEFAULT,
2986  H5P_DEFAULT,
2987  H5P_DEFAULT);
2988  // write the dataset for the state enum
2989  status = H5Dwrite(state_enum_dataset,
2990  state_enum_id,
2991  H5S_ALL,
2992  H5S_ALL,
2993  H5P_DEFAULT,
2994  &state);
2995  AssertThrow(status >= 0, ExcIO());
2996 
2997  // create the data space for the property enum
2998  hsize_t dims_property[1];
2999  dims_property[0] = 1;
3000  hid_t property_enum_dataspace =
3001  H5Screate_simple(1, dims_property, nullptr);
3002  // create the dataset for the property enum
3003  hid_t property_enum_dataset = H5Dcreate2(file_id_reopen,
3004  "/property",
3005  property_enum_id,
3006  property_enum_dataspace,
3007  H5P_DEFAULT,
3008  H5P_DEFAULT,
3009  H5P_DEFAULT);
3010  // write the dataset for the property enum
3011  status = H5Dwrite(property_enum_dataset,
3012  property_enum_id,
3013  H5S_ALL,
3014  H5S_ALL,
3015  H5P_DEFAULT,
3016  &property);
3017  AssertThrow(status >= 0, ExcIO());
3018 
3019  status = H5Dclose(state_enum_dataset);
3020  AssertThrow(status >= 0, ExcIO());
3021  status = H5Dclose(property_enum_dataset);
3022  AssertThrow(status >= 0, ExcIO());
3023  status = H5Sclose(state_enum_dataspace);
3024  AssertThrow(status >= 0, ExcIO());
3025  status = H5Sclose(property_enum_dataspace);
3026  AssertThrow(status >= 0, ExcIO());
3027  status = H5Tclose(state_enum_id);
3028  AssertThrow(status >= 0, ExcIO());
3029  status = H5Tclose(property_enum_id);
3030  AssertThrow(status >= 0, ExcIO());
3031  status = H5Fclose(file_id_reopen);
3032  AssertThrow(status >= 0, ExcIO());
3033  }
3034 
3035 # endif
3036 }
3037 
3038 
3039 
3040 template <typename NumberType>
3041 void
3042 ScaLAPACKMatrix<NumberType>::load(const std::string &filename)
3043 {
3044 # ifndef DEAL_II_WITH_HDF5
3045  (void)filename;
3046  AssertThrow(false, ExcMessage("HDF5 support is disabled."));
3047 # else
3048 # ifdef H5_HAVE_PARALLEL
3049  // implementation for configurations equipped with a parallel file system
3050  load_parallel(filename);
3051 
3052 # else
3053  // implementation for configurations with no parallel file system
3054  load_serial(filename);
3055 # endif
3056 # endif
3057 }
3058 
3059 
3060 
3061 template <typename NumberType>
3062 void
3063 ScaLAPACKMatrix<NumberType>::load_serial(const std::string &filename)
3064 {
3065 # ifndef DEAL_II_WITH_HDF5
3066  (void)filename;
3067  Assert(false, ExcInternalError());
3068 # else
3069 
3070  /*
3071  * The content of the distributed matrix is copied to a matrix using a 1x1
3072  * process grid. Therefore, one process has all the data and can write it to a
3073  * file
3074  */
3075  // create a 1xP column grid with P being the number of MPI processes
3076  const auto one_grid =
3077  std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,
3078  1,
3079  1);
3080 
3081  const int MB = n_rows, NB = n_columns;
3082  ScaLAPACKMatrix<NumberType> tmp(n_rows, n_columns, one_grid, MB, NB);
3083 
3084  int state_int = -1;
3085  int property_int = -1;
3086 
3087  // the 1x1 grid has only one process and this one reads
3088  // the content of the matrix from the HDF5 file
3089  if (tmp.grid->mpi_process_is_active)
3090  {
3091  herr_t status;
3092 
3093  // open the file in read-only mode
3094  hid_t file_id = H5Fopen(filename.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT);
3095 
3096  // open the dataset in the file
3097  hid_t dataset_id = H5Dopen2(file_id, "/matrix", H5P_DEFAULT);
3098 
3099  // check the datatype of the data in the file
3100  // datatype of source and destination must have the same class
3101  // see HDF User's Guide: 6.10. Data Transfer: Datatype Conversion and
3102  // Selection
3103  hid_t datatype = H5Dget_type(dataset_id);
3104  H5T_class_t t_class_in = H5Tget_class(datatype);
3105  H5T_class_t t_class = H5Tget_class(hdf5_type_id(tmp.values.data()));
3106  AssertThrow(
3107  t_class_in == t_class,
3108  ExcMessage(
3109  "The data type of the matrix to be read does not match the archive"));
3110 
3111  // get dataspace handle
3112  hid_t dataspace_id = H5Dget_space(dataset_id);
3113  // get number of dimensions
3114  const int ndims = H5Sget_simple_extent_ndims(dataspace_id);
3115  AssertThrow(ndims == 2, ExcIO());
3116  // get every dimension
3117  hsize_t dims[2];
3118  H5Sget_simple_extent_dims(dataspace_id, dims, nullptr);
3119  AssertThrow(
3120  static_cast<int>(dims[0]) == n_columns,
3121  ExcMessage(
3122  "The number of columns of the matrix does not match the content of the archive"));
3123  AssertThrow(
3124  static_cast<int>(dims[1]) == n_rows,
3125  ExcMessage(
3126  "The number of rows of the matrix does not match the content of the archive"));
3127 
3128  // read data
3129  status = H5Dread(dataset_id,
3130  hdf5_type_id(tmp.values.data()),
3131  H5S_ALL,
3132  H5S_ALL,
3133  H5P_DEFAULT,
3134  tmp.values.data());
3135  AssertThrow(status >= 0, ExcIO());
3136 
3137  // create HDF5 enum type for LAPACKSupport::State and
3138  // LAPACKSupport::Property
3139  hid_t state_enum_id, property_enum_id;
3140  internal::create_HDF5_state_enum_id(state_enum_id);
3141  internal::create_HDF5_property_enum_id(property_enum_id);
3142 
3143  // open the datasets for the state and property enum in the file
3144  hid_t dataset_state_id = H5Dopen2(file_id, "/state", H5P_DEFAULT);
3145  hid_t datatype_state = H5Dget_type(dataset_state_id);
3146  H5T_class_t t_class_state = H5Tget_class(datatype_state);
3147  AssertThrow(t_class_state == H5T_ENUM, ExcIO());
3148 
3149  hid_t dataset_property_id = H5Dopen2(file_id, "/property", H5P_DEFAULT);
3150  hid_t datatype_property = H5Dget_type(dataset_property_id);
3151  H5T_class_t t_class_property = H5Tget_class(datatype_property);
3152  AssertThrow(t_class_property == H5T_ENUM, ExcIO());
3153 
3154  // get dataspace handles
3155  hid_t dataspace_state = H5Dget_space(dataset_state_id);
3156  hid_t dataspace_property = H5Dget_space(dataset_property_id);
3157  // get number of dimensions
3158  const int ndims_state = H5Sget_simple_extent_ndims(dataspace_state);
3159  AssertThrow(ndims_state == 1, ExcIO());
3160  const int ndims_property = H5Sget_simple_extent_ndims(dataspace_property);
3161  AssertThrow(ndims_property == 1, ExcIO());
3162  // get every dimension
3163  hsize_t dims_state[1];
3164  H5Sget_simple_extent_dims(dataspace_state, dims_state, nullptr);
3165  AssertThrow(static_cast<int>(dims_state[0]) == 1, ExcIO());
3166  hsize_t dims_property[1];
3167  H5Sget_simple_extent_dims(dataspace_property, dims_property, nullptr);
3168  AssertThrow(static_cast<int>(dims_property[0]) == 1, ExcIO());
3169 
3170  // read data
3171  status = H5Dread(dataset_state_id,
3172  state_enum_id,
3173  H5S_ALL,
3174  H5S_ALL,
3175  H5P_DEFAULT,
3176  &tmp.state);
3177  AssertThrow(status >= 0, ExcIO());
3178  // To send the state from the root process to the other processes
3179  // the state enum is casted to an integer, that will be broadcasted and
3180  // subsequently casted back to the enum type
3181  state_int = static_cast<int>(tmp.state);
3182 
3183  status = H5Dread(dataset_property_id,
3184  property_enum_id,
3185  H5S_ALL,
3186  H5S_ALL,
3187  H5P_DEFAULT,
3188  &tmp.property);
3189  AssertThrow(status >= 0, ExcIO());
3190  // To send the property from the root process to the other processes
3191  // the state enum is casted to an integer, that will be broadcasted and
3192  // subsequently casted back to the enum type
3193  property_int = static_cast<int>(tmp.property);
3194 
3195  // terminate access to the data spaces
3196  status = H5Sclose(dataspace_id);
3197  AssertThrow(status >= 0, ExcIO());
3198  status = H5Sclose(dataspace_state);
3199  AssertThrow(status >= 0, ExcIO());
3200  status = H5Sclose(dataspace_property);
3201  AssertThrow(status >= 0, ExcIO());
3202 
3203  // release data type handles
3204  status = H5Tclose(datatype);
3205  AssertThrow(status >= 0, ExcIO());
3206  status = H5Tclose(state_enum_id);
3207  AssertThrow(status >= 0, ExcIO());
3208  status = H5Tclose(property_enum_id);
3209  AssertThrow(status >= 0, ExcIO());
3210 
3211  // end access to the data sets and release resources used by them
3212  status = H5Dclose(dataset_state_id);
3213  AssertThrow(status >= 0, ExcIO());
3214  status = H5Dclose(dataset_id);
3215  AssertThrow(status >= 0, ExcIO());
3216  status = H5Dclose(dataset_property_id);
3217  AssertThrow(status >= 0, ExcIO());
3218 
3219  // close the file.
3220  status = H5Fclose(file_id);
3221  AssertThrow(status >= 0, ExcIO());
3222  }
3223  // so far only the root process has the correct state integer --> broadcasting
3224  tmp.grid->send_to_inactive(&state_int, 1);
3225  // so far only the root process has the correct property integer -->
3226  // broadcasting
3227  tmp.grid->send_to_inactive(&property_int, 1);
3228 
3229  tmp.state = static_cast<LAPACKSupport::State>(state_int);
3230  tmp.property = static_cast<LAPACKSupport::Property>(property_int);
3231 
3232  tmp.copy_to(*this);
3233 
3234 # endif // DEAL_II_WITH_HDF5
3235 }
3236 
3237 
3238 
3239 template <typename NumberType>
3240 void
3242 {
3243 # ifndef DEAL_II_WITH_HDF5
3244  (void)filename;
3245  Assert(false, ExcInternalError());
3246 # else
3247 # ifndef H5_HAVE_PARALLEL
3248  Assert(false, ExcInternalError());
3249 # else
3250 
3251  const unsigned int n_mpi_processes(
3252  Utilities::MPI::n_mpi_processes(this->grid->mpi_communicator));
3253  MPI_Info info = MPI_INFO_NULL;
3254  /*
3255  * The content of the distributed matrix is copied to a matrix using a
3256  * 1xn_processes process grid. Therefore, the processes hold contiguous chunks
3257  * of the matrix, which they can write to the file
3258  */
3259  // create a 1xP column grid with P being the number of MPI processes
3260  const auto column_grid =
3261  std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,
3262  1,
3263  n_mpi_processes);
3264 
3265  const int MB = n_rows;
3266  // for the choice of NB see explanation in save_parallel()
3267  const int NB = std::max(static_cast<int>(std::ceil(
3268  static_cast<double>(n_columns) / n_mpi_processes)),
3269  column_block_size);
3270 
3271  ScaLAPACKMatrix<NumberType> tmp(n_rows, n_columns, column_grid, MB, NB);
3272 
3273  // get pointer to data held by the process
3274  NumberType *data = (tmp.values.size() > 0) ? tmp.values.data() : nullptr;
3275 
3276  herr_t status;
3277 
3278  // set up file access property list with parallel I/O access
3279  hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
3280  status = H5Pset_fapl_mpio(plist_id, tmp.grid->mpi_communicator, info);
3281  AssertThrow(status >= 0, ExcIO());
3282 
3283  // open file collectively in read-only mode and release property list
3284  // identifier
3285  hid_t file_id = H5Fopen(filename.c_str(), H5F_ACC_RDONLY, plist_id);
3286  status = H5Pclose(plist_id);
3287  AssertThrow(status >= 0, ExcIO());
3288 
3289  // open the dataset in the file collectively
3290  hid_t dataset_id = H5Dopen2(file_id, "/matrix", H5P_DEFAULT);
3291 
3292  // check the datatype of the dataset in the file
3293  // if the classes of type of the dataset and the matrix do not match abort
3294  // see HDF User's Guide: 6.10. Data Transfer: Datatype Conversion and
3295  // Selection
3296  hid_t datatype = hdf5_type_id(data);
3297  hid_t datatype_inp = H5Dget_type(dataset_id);
3298  H5T_class_t t_class_inp = H5Tget_class(datatype_inp);
3299  H5T_class_t t_class = H5Tget_class(datatype);
3300  AssertThrow(
3301  t_class_inp == t_class,
3302  ExcMessage(
3303  "The data type of the matrix to be read does not match the archive"));
3304 
3305  // get the dimensions of the matrix stored in the file
3306  // get dataspace handle
3307  hid_t dataspace_id = H5Dget_space(dataset_id);
3308  // get number of dimensions
3309  const int ndims = H5Sget_simple_extent_ndims(dataspace_id);
3310  AssertThrow(ndims == 2, ExcIO());
3311  // get every dimension
3312  hsize_t dims[2];
3313  status = H5Sget_simple_extent_dims(dataspace_id, dims, nullptr);
3314  AssertThrow(status >= 0, ExcIO());
3315  AssertThrow(
3316  static_cast<int>(dims[0]) == n_columns,
3317  ExcMessage(
3318  "The number of columns of the matrix does not match the content of the archive"));
3319  AssertThrow(
3320  static_cast<int>(dims[1]) == n_rows,
3321  ExcMessage(
3322  "The number of rows of the matrix does not match the content of the archive"));
3323 
3324  // gather the number of local rows and columns from all processes
3325  std::vector<int> proc_n_local_rows(n_mpi_processes),
3326  proc_n_local_columns(n_mpi_processes);
3327  int ierr = MPI_Allgather(&tmp.n_local_rows,
3328  1,
3329  MPI_INT,
3330  proc_n_local_rows.data(),
3331  1,
3332  MPI_INT,
3333  tmp.grid->mpi_communicator);
3334  AssertThrowMPI(ierr);
3335  ierr = MPI_Allgather(&tmp.n_local_columns,
3336  1,
3337  MPI_INT,
3338  proc_n_local_columns.data(),
3339  1,
3340  MPI_INT,
3341  tmp.grid->mpi_communicator);
3342  AssertThrowMPI(ierr);
3343 
3344  const unsigned int my_rank(
3345  Utilities::MPI::this_mpi_process(tmp.grid->mpi_communicator));
3346 
3347  // hyperslab selection parameters
3348  // each process defines dataset in memory and writes it to the hyperslab in
3349  // the file
3350  hsize_t count[2];
3351  count[0] = tmp.n_local_columns;
3352  count[1] = tmp.n_local_rows;
3353 
3354  hsize_t offset[2] = {0};
3355  for (unsigned int i = 0; i < my_rank; ++i)
3356  offset[0] += proc_n_local_columns[i];
3357 
3358  // select hyperslab in the file
3359  status = H5Sselect_hyperslab(
3360  dataspace_id, H5S_SELECT_SET, offset, nullptr, count, nullptr);
3361  AssertThrow(status >= 0, ExcIO());
3362 
3363  // create a memory dataspace independently
3364  hid_t memspace = H5Screate_simple(2, count, nullptr);
3365 
3366  // read data independently
3367  status =
3368  H5Dread(dataset_id, datatype, memspace, dataspace_id, H5P_DEFAULT, data);
3369  AssertThrow(status >= 0, ExcIO());
3370 
3371  // create HDF5 enum type for LAPACKSupport::State and LAPACKSupport::Property
3372  hid_t state_enum_id, property_enum_id;
3373  internal::create_HDF5_state_enum_id(state_enum_id);
3374  internal::create_HDF5_property_enum_id(property_enum_id);
3375 
3376  // open the datasets for the state and property enum in the file
3377  hid_t dataset_state_id = H5Dopen2(file_id, "/state", H5P_DEFAULT);
3378  hid_t datatype_state = H5Dget_type(dataset_state_id);
3379  H5T_class_t t_class_state = H5Tget_class(datatype_state);
3380  AssertThrow(t_class_state == H5T_ENUM, ExcIO());
3381 
3382  hid_t dataset_property_id = H5Dopen2(file_id, "/property", H5P_DEFAULT);
3383  hid_t datatype_property = H5Dget_type(dataset_property_id);
3384  H5T_class_t t_class_property = H5Tget_class(datatype_property);
3385  AssertThrow(t_class_property == H5T_ENUM, ExcIO());
3386 
3387  // get dataspace handles
3388  hid_t dataspace_state = H5Dget_space(dataset_state_id);
3389  hid_t dataspace_property = H5Dget_space(dataset_property_id);
3390  // get number of dimensions
3391  const int ndims_state = H5Sget_simple_extent_ndims(dataspace_state);
3392  AssertThrow(ndims_state == 1, ExcIO());
3393  const int ndims_property = H5Sget_simple_extent_ndims(dataspace_property);
3394  AssertThrow(ndims_property == 1, ExcIO());
3395  // get every dimension
3396  hsize_t dims_state[1];
3397  H5Sget_simple_extent_dims(dataspace_state, dims_state, nullptr);
3398  AssertThrow(static_cast<int>(dims_state[0]) == 1, ExcIO());
3399  hsize_t dims_property[1];
3400  H5Sget_simple_extent_dims(dataspace_property, dims_property, nullptr);
3401  AssertThrow(static_cast<int>(dims_property[0]) == 1, ExcIO());
3402 
3403  // read data
3404  status = H5Dread(
3405  dataset_state_id, state_enum_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, &tmp.state);
3406  AssertThrow(status >= 0, ExcIO());
3407 
3408  status = H5Dread(dataset_property_id,
3409  property_enum_id,
3410  H5S_ALL,
3411  H5S_ALL,
3412  H5P_DEFAULT,
3413  &tmp.property);
3414  AssertThrow(status >= 0, ExcIO());
3415 
3416  // close/release sources
3417  status = H5Sclose(memspace);
3418  AssertThrow(status >= 0, ExcIO());
3419  status = H5Dclose(dataset_id);
3420  AssertThrow(status >= 0, ExcIO());
3421  status = H5Dclose(dataset_state_id);
3422  AssertThrow(status >= 0, ExcIO());
3423  status = H5Dclose(dataset_property_id);
3424  AssertThrow(status >= 0, ExcIO());
3425  status = H5Sclose(dataspace_id);
3426  AssertThrow(status >= 0, ExcIO());
3427  status = H5Sclose(dataspace_state);
3428  AssertThrow(status >= 0, ExcIO());
3429  status = H5Sclose(dataspace_property);
3430  AssertThrow(status >= 0, ExcIO());
3431  // status = H5Tclose(datatype);
3432  // AssertThrow(status >= 0, ExcIO());
3433  status = H5Tclose(state_enum_id);
3434  AssertThrow(status >= 0, ExcIO());
3435  status = H5Tclose(property_enum_id);
3436  AssertThrow(status >= 0, ExcIO());
3437  status = H5Fclose(file_id);
3438  AssertThrow(status >= 0, ExcIO());
3439 
3440  // copying the distributed matrices
3441  tmp.copy_to(*this);
3442 
3443 # endif // H5_HAVE_PARALLEL
3444 # endif // DEAL_II_WITH_HDF5
3445 }
3446 
3447 
3448 
3449 namespace internal
3450 {
3451  namespace
3452  {
3453  template <typename NumberType>
3454  void
3455  scale_columns(ScaLAPACKMatrix<NumberType> & matrix,
3456  const ArrayView<const NumberType> &factors)
3457  {
3458  Assert(matrix.n() == factors.size(),
3459  ExcDimensionMismatch(matrix.n(), factors.size()));
3460 
3461  for (unsigned int i = 0; i < matrix.local_n(); ++i)
3462  {
3463  const NumberType s = factors[matrix.global_column(i)];
3464 
3465  for (unsigned int j = 0; j < matrix.local_m(); ++j)
3466  matrix.local_el(j, i) *= s;
3467  }
3468  }
3469 
3470  template <typename NumberType>
3471  void
3472  scale_rows(ScaLAPACKMatrix<NumberType> & matrix,
3473  const ArrayView<const NumberType> &factors)
3474  {
3475  Assert(matrix.m() == factors.size(),
3476  ExcDimensionMismatch(matrix.m(), factors.size()));
3477 
3478  for (unsigned int i = 0; i < matrix.local_m(); ++i)
3479  {
3480  const NumberType s = factors[matrix.global_row(i)];
3481 
3482  for (unsigned int j = 0; j < matrix.local_n(); ++j)
3483  matrix.local_el(i, j) *= s;
3484  }
3485  }
3486 
3487  } // namespace
3488 } // namespace internal
3489 
3490 
3491 
3492 template <typename NumberType>
3493 template <class InputVector>
3494 void
3496 {
3497  if (this->grid->mpi_process_is_active)
3498  internal::scale_columns(*this, make_array_view(factors));
3499 }
3500 
3501 
3502 
3503 template <typename NumberType>
3504 template <class InputVector>
3505 void
3506 ScaLAPACKMatrix<NumberType>::scale_rows(const InputVector &factors)
3507 {
3508  if (this->grid->mpi_process_is_active)
3509  internal::scale_rows(*this, make_array_view(factors));
3510 }
3511 
3512 
3513 
3514 // instantiations
3515 # include "scalapack.inst"
3516 
3517 
3519 
3520 #endif // DEAL_II_WITH_SCALAPACK
array_view.h
LAPACKSupport::svd
@ svd
Matrix contains singular value decomposition,.
Definition: lapack_support.h:70
scalapack.h
LAPACKSupport::diagonal
@ diagonal
Matrix is diagonal.
Definition: lapack_support.h:121
ScaLAPACKMatrix::reciprocal_condition_number
NumberType reciprocal_condition_number(const NumberType a_norm) const
Definition: scalapack.cc:2312
ScaLAPACKMatrix::eigenpairs_symmetric_by_index
std::vector< NumberType > eigenpairs_symmetric_by_index(const std::pair< unsigned int, unsigned int > &index_limits, const bool compute_eigenvectors)
Definition: scalapack.cc:1429
ScaLAPACKMatrix::compute_lu_factorization
void compute_lu_factorization()
Definition: scalapack.cc:1272
ScaLAPACKMatrix::descriptor
int descriptor[9]
Definition: scalapack.h:937
ScaLAPACKMatrix::scale_rows
void scale_rows(const InputVector &factors)
Definition: scalapack.cc:3506
ScaLAPACKMatrix::l1_norm
NumberType l1_norm() const
Definition: scalapack.cc:2374
ScaLAPACKMatrix::compute_cholesky_factorization
void compute_cholesky_factorization()
Definition: scalapack.cc:1239
StandardExceptions::ExcIO
static ::ExceptionBase & ExcIO()
ScaLAPACKMatrix::operator=
ScaLAPACKMatrix< NumberType > & operator=(const FullMatrix< NumberType > &)
Definition: scalapack.cc:330
CUDAWrappers::block_size
constexpr int block_size
Definition: cuda_size.h:29
ScaLAPACKMatrix::grid
std::shared_ptr< const Utilities::MPI::ProcessGrid > grid
Definition: scalapack.h:902
ScaLAPACKMatrix::state
LAPACKSupport::State state
Definition: scalapack.h:889
LAPACKSupport::unusable
@ unusable
Contents is something useless.
Definition: lapack_support.h:74
LAPACKSupport::lower_triangular
@ lower_triangular
Matrix is lower triangular.
Definition: lapack_support.h:119
LAPACKSupport::inverse_svd
@ inverse_svd
Matrix is the inverse of a singular value decomposition.
Definition: lapack_support.h:72
ScaLAPACKMatrix::copy_from
void copy_from(const LAPACKFullMatrix< NumberType > &matrix, const unsigned int rank)
Definition: scalapack.cc:361
ScaLAPACKMatrix::eigenpairs_symmetric_by_value
std::vector< NumberType > eigenpairs_symmetric_by_value(const std::pair< NumberType, NumberType > &value_limits, const bool compute_eigenvectors)
Definition: scalapack.cc:1452
StandardExceptions::ExcNotImplemented
static ::ExceptionBase & ExcNotImplemented()
ScaLAPACKMatrix::norm_general
NumberType norm_general(const char type) const
Definition: scalapack.cc:2416
ScaLAPACKMatrix::ScaLAPACKMatrix
ScaLAPACKMatrix(const size_type n_rows, const size_type n_columns, const std::shared_ptr< const Utilities::MPI::ProcessGrid > &process_grid, const size_type row_block_size=32, const size_type column_block_size=32, const LAPACKSupport::Property property=LAPACKSupport::Property::general)
Definition: scalapack.cc:79
ArrayView
Definition: array_view.h:77
ScaLAPACKMatrix::column_block_size
int column_block_size
Definition: scalapack.h:922
ScaLAPACKMatrix::save_parallel
void save_parallel(const std::string &filename, const std::pair< unsigned int, unsigned int > &chunk_size) const
Definition: scalapack.cc:2806
ScaLAPACKMatrix::mult
void mult(const NumberType b, const ScaLAPACKMatrix< NumberType > &B, const NumberType c, ScaLAPACKMatrix< NumberType > &C, const bool transpose_A=false, const bool transpose_B=false) const
Definition: scalapack.cc:1066
hdf5_type_id
hid_t hdf5_type_id(const number *)
Definition: scalapack.cc:38
ScaLAPACKMatrix::submatrix_row
const int submatrix_row
Definition: scalapack.h:977
LAPACKSupport::U
static const char U
Definition: lapack_support.h:167
SymmetricTensor::eigenvectors
std::array< std::pair< Number, Tensor< 1, dim, Number > >, std::integral_constant< int, dim >::value > eigenvectors(const SymmetricTensor< 2, dim, Number > &T, const SymmetricTensorEigenvectorMethod method=SymmetricTensorEigenvectorMethod::ql_implicit_shifts)
AlignedVector::size
size_type size() const
ScaLAPACKMatrix::save_serial
void save_serial(const std::string &filename, const std::pair< unsigned int, unsigned int > &chunk_size) const
Definition: scalapack.cc:2649
ScaLAPACKMatrix::copy_to
void copy_to(FullMatrix< NumberType > &matrix) const
Definition: scalapack.cc:663
AssertIndexRange
#define AssertIndexRange(index, range)
Definition: exceptions.h:1649
ScaLAPACKMatrix::row_block_size
int row_block_size
Definition: scalapack.h:917
Utilities::MPI::internal::Tags::scalapack_copy_to2
@ scalapack_copy_to2
ScaLAPACKMatrix<NumberType>::copy_to.
Definition: mpi_tags.h:117
Utilities::MPI::ProcessGrid::mpi_communicator
MPI_Comm mpi_communicator
Definition: process_grid.h:162
Physics::Elasticity::Kinematics::C
SymmetricTensor< 2, dim, Number > C(const Tensor< 2, dim, Number > &F)
MPI_Comm
ArrayView::size
std::size_t size() const
Definition: array_view.h:484
ScaLAPACKMatrix::reinit
void reinit(const size_type n_rows, const size_type n_columns, const std::shared_ptr< const Utilities::MPI::ProcessGrid > &process_grid, const size_type row_block_size=32, const size_type column_block_size=32, const LAPACKSupport::Property property=LAPACKSupport::Property::general)
Definition: scalapack.cc:215
LAPACKFullMatrix
Definition: lapack_full_matrix.h:60
LAPACKFullMatrix::m
size_type m() const
Definition: lapack_full_matrix.h:1023
ScaLAPACKMatrix
Definition: scalapack.h:77
LAPACKSupport::cholesky
@ cholesky
Contents is a Cholesky decomposition.
Definition: lapack_support.h:66
ScaLAPACKMatrix::norm_symmetric
NumberType norm_symmetric(const char type) const
Definition: scalapack.cc:2475
LAPACKFullMatrix::n
size_type n() const
Definition: lapack_full_matrix.h:1030
ScaLAPACKMatrix::eigenpairs_symmetric_by_value_MRRR
std::vector< NumberType > eigenpairs_symmetric_by_value_MRRR(const std::pair< NumberType, NumberType > &value_limits, const bool compute_eigenvectors)
Definition: scalapack.cc:1785
ScaLAPACKMatrix::TmTmult
void TmTmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
Definition: scalapack.cc:1225
ScaLAPACKMatrix::copy_transposed
void copy_transposed(const ScaLAPACKMatrix< NumberType > &B)
Definition: scalapack.cc:980
ScaLAPACKMatrix::least_squares
void least_squares(ScaLAPACKMatrix< NumberType > &B, const bool transpose=false)
Definition: scalapack.cc:2133
LAPACKSupport::symmetric
@ symmetric
Matrix is symmetric.
Definition: lapack_support.h:115
ScaLAPACKMatrix::save
void save(const std::string &filename, const std::pair< unsigned int, unsigned int > &chunk_size=std::make_pair(numbers::invalid_unsigned_int, numbers::invalid_unsigned_int)) const
Definition: scalapack.cc:2607
ScaLAPACKMatrix::set_property
void set_property(const LAPACKSupport::Property property)
Definition: scalapack.cc:302
ScaLAPACKMatrix::mmult
void mmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
Definition: scalapack.cc:1183
ScaLAPACKMatrix::property
LAPACKSupport::Property property
Definition: scalapack.h:895
StandardExceptions::ExcIndexRange
static ::ExceptionBase & ExcIndexRange(int arg1, int arg2, int arg3)
AssertIsFinite
#define AssertIsFinite(number)
Definition: exceptions.h:1681
StandardExceptions::ExcMessage
static ::ExceptionBase & ExcMessage(std::string arg1)
ScaLAPACKMatrix::n_columns
int n_columns
Definition: scalapack.h:912
LAPACKSupport::hessenberg
@ hessenberg
Matrix is in upper Hessenberg form.
Definition: lapack_support.h:123
TrilinosWrappers::internal::begin
VectorType::value_type * begin(VectorType &V)
Definition: trilinos_sparse_matrix.cc:51
LAPACKSupport::eigenvalues
@ eigenvalues
Eigenvalue vector is filled.
Definition: lapack_support.h:68
LAPACKSupport::State
State
Definition: lapack_support.h:57
internal::reinit
void reinit(MatrixBlock< MatrixType > &v, const BlockSparsityPattern &p)
Definition: matrix_block.h:621
mpi.h
ScaLAPACKMatrix::global_row
unsigned int global_row(const unsigned int loc_row) const
Definition: scalapack.cc:495
DEAL_II_NAMESPACE_OPEN
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:358
Physics::Elasticity::Kinematics::b
SymmetricTensor< 2, dim, Number > b(const Tensor< 2, dim, Number > &F)
ScaLAPACKMatrix::load_serial
void load_serial(const std::string &filename)
Definition: scalapack.cc:3063
LAPACKSupport::matrix
@ matrix
Contents is actually a matrix.
Definition: lapack_support.h:60
ScaLAPACKMatrix::mTmult
void mTmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
Definition: scalapack.cc:1211
Utilities::MPI::internal::Tags::scalapack_copy_from
@ scalapack_copy_from
ScaLAPACKMatrix<NumberType>::copy_from.
Definition: mpi_tags.h:119
ScaLAPACKMatrix::global_column
unsigned int global_column(const unsigned int loc_column) const
Definition: scalapack.cc:512
Utilities::MPI::create_group
int create_group(const MPI_Comm &comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
Definition: mpi.cc:160
LAPACKSupport::general
@ general
No special properties.
Definition: lapack_support.h:113
TrilinosWrappers::internal::end
VectorType::value_type * end(VectorType &V)
Definition: trilinos_sparse_matrix.cc:65
Utilities::MPI::sum
T sum(const T &t, const MPI_Comm &mpi_communicator)
ScaLAPACKMatrix::eigenpairs_symmetric_MRRR
std::vector< NumberType > eigenpairs_symmetric_MRRR(const bool compute_eigenvectors, const std::pair< unsigned int, unsigned int > &index_limits=std::make_pair(numbers::invalid_unsigned_int, numbers::invalid_unsigned_int), const std::pair< NumberType, NumberType > &value_limits=std::make_pair(std::numeric_limits< NumberType >::quiet_NaN(), std::numeric_limits< NumberType >::quiet_NaN()))
Definition: scalapack.cc:1803
AssertThrowMPI
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1707
LAPACKSupport::ExcErrorCode
static ::ExceptionBase & ExcErrorCode(std::string arg1, types::blas_int arg2)
AlignedVector::data
pointer data()
ArrayView::make_array_view
ArrayView< typename std::remove_reference< typename std::iterator_traits< Iterator >::reference >::type, MemorySpaceType > make_array_view(const Iterator begin, const Iterator end)
Definition: array_view.h:607
LAPACKSupport::inverse_matrix
@ inverse_matrix
Contents is the inverse of a matrix.
Definition: lapack_support.h:62
Differentiation::SD::ceil
Expression ceil(const Expression &x)
Definition: symengine_math.cc:301
ScaLAPACKMatrix::get_state
LAPACKSupport::State get_state() const
Definition: scalapack.cc:321
ScaLAPACKMatrix::scale_columns
void scale_columns(const InputVector &factors)
Definition: scalapack.cc:3495
unsigned int
ScaLAPACKMatrix::load
void load(const std::string &filename)
Definition: scalapack.cc:3042
StandardExceptions::ExcInternalError
static ::ExceptionBase & ExcInternalError()
ScaLAPACKMatrix::invert
void invert()
Definition: scalapack.cc:1313
ScaLAPACKMatrix::Tmmult
void Tmmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
Definition: scalapack.cc:1197
ScaLAPACKMatrix::add
void add(const ScaLAPACKMatrix< NumberType > &B, const NumberType a=0., const NumberType b=1., const bool transpose_B=false)
Definition: scalapack.cc:990
Assert
#define Assert(cond, exc)
Definition: exceptions.h:1419
Utilities::MPI::this_mpi_process
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:128
TransposeTable::reinit
void reinit(const size_type size1, const size_type size2, const bool omit_default_initialization=false)
DEAL_II_MPI_CONST_CAST
#define DEAL_II_MPI_CONST_CAST(expr)
Definition: mpi.h:76
CUDAWrappers::chunk_size
constexpr int chunk_size
Definition: cuda_size.h:35
ScaLAPACKMatrix::pseudoinverse
unsigned int pseudoinverse(const NumberType ratio)
Definition: scalapack.cc:2225
Utilities::MPI::n_mpi_processes
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:117
numbers::invalid_unsigned_int
static const unsigned int invalid_unsigned_int
Definition: types.h:191
Utilities::MPI::internal::Tags::scalapack_copy_to
@ scalapack_copy_to
ScaLAPACKMatrix<NumberType>::copy_to.
Definition: mpi_tags.h:115
ScaLAPACKMatrix::eigenpairs_symmetric
std::vector< NumberType > eigenpairs_symmetric(const bool compute_eigenvectors, const std::pair< unsigned int, unsigned int > &index_limits=std::make_pair(numbers::invalid_unsigned_int, numbers::invalid_unsigned_int), const std::pair< NumberType, NumberType > &value_limits=std::make_pair(std::numeric_limits< NumberType >::quiet_NaN(), std::numeric_limits< NumberType >::quiet_NaN()))
Definition: scalapack.cc:1472
Utilities::MPI::min
T min(const T &t, const MPI_Comm &mpi_communicator)
StandardExceptions::ExcDimensionMismatch
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
ScaLAPACKMatrix::Tadd
void Tadd(const NumberType b, const ScaLAPACKMatrix< NumberType > &B)
Definition: scalapack.cc:1056
ScaLAPACKMatrix::frobenius_norm
NumberType frobenius_norm() const
Definition: scalapack.cc:2402
LAPACKSupport::upper_triangular
@ upper_triangular
Matrix is upper triangular.
Definition: lapack_support.h:117
hdf5.h
ScaLAPACKMatrix::compute_SVD
std::vector< NumberType > compute_SVD(ScaLAPACKMatrix< NumberType > *U=nullptr, ScaLAPACKMatrix< NumberType > *VT=nullptr)
Definition: scalapack.cc:2013
FullMatrix
Definition: full_matrix.h:71
ScaLAPACKMatrix::linfty_norm
NumberType linfty_norm() const
Definition: scalapack.cc:2388
LAPACKSupport::Property
Property
Definition: lapack_support.h:110
internal
Definition: aligned_vector.h:369
ScaLAPACKMatrix::eigenpairs_symmetric_by_index_MRRR
std::vector< NumberType > eigenpairs_symmetric_by_index_MRRR(const std::pair< unsigned int, unsigned int > &index_limits, const bool compute_eigenvectors)
Definition: scalapack.cc:1762
memory.h
DEAL_II_NAMESPACE_CLOSE
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:359
TableBase< 2, NumberType >::values
AlignedVector< NumberType > values
Definition: table.h:672
ScaLAPACKMatrix::n_rows
int n_rows
Definition: scalapack.h:907
ScaLAPACKMatrix::submatrix_column
const int submatrix_column
Definition: scalapack.h:983
ScaLAPACKMatrix::load_parallel
void load_parallel(const std::string &filename)
Definition: scalapack.cc:3241
AssertThrow
#define AssertThrow(cond, exc)
Definition: exceptions.h:1531
ScaLAPACKMatrix::get_property
LAPACKSupport::Property get_property() const
Definition: scalapack.cc:312
DerivativeForm::transpose
DerivativeForm< 1, spacedim, dim, Number > transpose(const DerivativeForm< 1, dim, spacedim, Number > &DF)
Definition: derivative_form.h:470
ScaLAPACKMatrix::n_local_rows
int n_local_rows
Definition: scalapack.h:927
Utilities::MPI::max
T max(const T &t, const MPI_Comm &mpi_communicator)
LAPACKSupport::lu
@ lu
Contents is an LU decomposition.
Definition: lapack_support.h:64
ScaLAPACKMatrix::n_local_columns
int n_local_columns
Definition: scalapack.h:932