Loading [MathJax]/extensions/TeX/AMSsymbols.js
 deal.II version GIT relicensing-3075-gc235bd4825 2025-04-15 08:10:00+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages Concepts
scalapack.cc
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2017 - 2024 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
15
17
18#ifdef DEAL_II_WITH_SCALAPACK
19
21# include <deal.II/base/mpi.h>
22# include <deal.II/base/mpi.templates.h>
23
24# include <deal.II/lac/scalapack.templates.h>
25
26# ifdef DEAL_II_WITH_HDF5
27# include <hdf5.h>
28# endif
29
30# include <limits>
31# include <memory>
32
34
35# ifdef DEAL_II_WITH_HDF5
36
37template <typename number>
38inline hid_t
39hdf5_type_id(const number *)
40{
42 // don't know what to put here; it does not matter
43 return -1;
44}
45
46inline hid_t
47hdf5_type_id(const double *)
48{
49 return H5T_NATIVE_DOUBLE;
50}
51
52inline hid_t
53hdf5_type_id(const float *)
54{
55 return H5T_NATIVE_FLOAT;
56}
57
58inline hid_t
59hdf5_type_id(const int *)
60{
61 return H5T_NATIVE_INT;
62}
63
64inline hid_t
65hdf5_type_id(const unsigned int *)
66{
67 return H5T_NATIVE_UINT;
68}
69
70inline hid_t
71hdf5_type_id(const char *)
72{
73 return H5T_NATIVE_CHAR;
74}
75# endif // DEAL_II_WITH_HDF5
76
77
78
79template <typename NumberType>
81 const size_type n_rows_,
82 const size_type n_columns_,
83 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
84 const size_type row_block_size_,
85 const size_type column_block_size_,
86 const LAPACKSupport::Property property_)
87 : uplo('L')
88 , // for non-symmetric matrices this is not needed
89 first_process_row(0)
90 , first_process_column(0)
91 , submatrix_row(1)
92 , submatrix_column(1)
93{
94 reinit(n_rows_,
95 n_columns_,
96 process_grid,
97 row_block_size_,
98 column_block_size_,
99 property_);
100}
101
102
103
104template <typename NumberType>
106 const size_type size,
107 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
108 const size_type block_size,
109 const LAPACKSupport::Property property)
110 : ScaLAPACKMatrix<NumberType>(size,
111 size,
112 process_grid,
113 block_size,
114 block_size,
115 property)
116{}
117
118
119
120template <typename NumberType>
122 const std::string &filename,
123 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
124 const size_type row_block_size,
125 const size_type column_block_size)
126 : uplo('L')
127 , // for non-symmetric matrices this is not needed
128 first_process_row(0)
129 , first_process_column(0)
130 , submatrix_row(1)
131 , submatrix_column(1)
132{
133# ifndef DEAL_II_WITH_HDF5
134 (void)filename;
135 (void)process_grid;
136 (void)row_block_size;
137 (void)column_block_size;
138 Assert(
139 false,
141 "This function is only available when deal.II is configured with HDF5"));
142# else
143
144 const unsigned int this_mpi_process(
145 Utilities::MPI::this_mpi_process(process_grid->mpi_communicator));
146
147 // Before reading the content from disk the root process determines the
148 // dimensions of the matrix. Subsequently, memory is allocated by a call to
149 // reinit() and the matrix is loaded by a call to load().
150 if (this_mpi_process == 0)
151 {
152 herr_t status = 0;
153
154 // open file in read-only mode
155 hid_t file = H5Fopen(filename.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT);
156 AssertThrow(file >= 0, ExcIO());
157
158 // get data set in file
159 hid_t dataset = H5Dopen2(file, "/matrix", H5P_DEFAULT);
160 AssertThrow(dataset >= 0, ExcIO());
161
162 // determine file space
163 hid_t filespace = H5Dget_space(dataset);
164
165 // get number of dimensions in data set
166 int rank = H5Sget_simple_extent_ndims(filespace);
167 AssertThrow(rank == 2, ExcIO());
168 hsize_t dims[2];
169 status = H5Sget_simple_extent_dims(filespace, dims, nullptr);
170 AssertThrow(status >= 0, ExcIO());
171
172 // due to ScaLAPACK's column-major memory layout the dimensions are
173 // swapped
174 n_rows = dims[1];
175 n_columns = dims[0];
176
177 // close/release resources
178 status = H5Sclose(filespace);
179 AssertThrow(status >= 0, ExcIO());
180 status = H5Dclose(dataset);
181 AssertThrow(status >= 0, ExcIO());
182 status = H5Fclose(file);
183 AssertThrow(status >= 0, ExcIO());
184 }
185 int ierr = MPI_Bcast(&n_rows,
186 1,
188 0 /*from root*/,
189 process_grid->mpi_communicator);
190 AssertThrowMPI(ierr);
191
192 ierr = MPI_Bcast(&n_columns,
193 1,
195 0 /*from root*/,
196 process_grid->mpi_communicator);
197 AssertThrowMPI(ierr);
198
199 // the property will be overwritten by the subsequent call to load()
201 n_columns,
202 process_grid,
206
207 load(filename.c_str());
208
209# endif // DEAL_II_WITH_HDF5
210}
211
212
213
214template <typename NumberType>
215void
217 const size_type n_rows_,
218 const size_type n_columns_,
219 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
220 const size_type row_block_size_,
221 const size_type column_block_size_,
222 const LAPACKSupport::Property property_)
223{
224 Assert(row_block_size_ > 0, ExcMessage("Row block size has to be positive."));
225 Assert(column_block_size_ > 0,
226 ExcMessage("Column block size has to be positive."));
227 Assert(
228 row_block_size_ <= n_rows_,
230 "Row block size can not be greater than the number of rows of the matrix"));
231 Assert(
232 column_block_size_ <= n_columns_,
234 "Column block size can not be greater than the number of columns of the matrix"));
235
237 property = property_;
238 grid = process_grid;
239 n_rows = n_rows_;
240 n_columns = n_columns_;
241 row_block_size = row_block_size_;
242 column_block_size = column_block_size_;
243
244 if (grid->mpi_process_is_active)
245 {
246 // Get local sizes:
247 n_local_rows = numroc_(&n_rows,
248 &row_block_size,
249 &(grid->this_process_row),
250 &first_process_row,
251 &(grid->n_process_rows));
252 n_local_columns = numroc_(&n_columns,
253 &column_block_size,
254 &(grid->this_process_column),
255 &first_process_column,
256 &(grid->n_process_columns));
257
258 // LLD_A = MAX(1,NUMROC(M_A, MB_A, MYROW, RSRC_A, NPROW)), different
259 // between processes
260 int lda = std::max(1, n_local_rows);
261
262 int info = 0;
263 descinit_(descriptor,
264 &n_rows,
265 &n_columns,
266 &row_block_size,
267 &column_block_size,
268 &first_process_row,
269 &first_process_column,
270 &(grid->blacs_context),
271 &lda,
272 &info);
273 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("descinit_", info));
274
275 this->TransposeTable<NumberType>::reinit(n_local_rows, n_local_columns);
276 }
277 else
278 {
279 // set process-local variables to something telling:
280 n_local_rows = -1;
281 n_local_columns = -1;
282 std::fill(std::begin(descriptor), std::end(descriptor), -1);
283 }
284}
285
286
287
288template <typename NumberType>
289void
291 const size_type size,
292 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
293 const size_type block_size,
294 const LAPACKSupport::Property property)
295{
296 reinit(size, size, process_grid, block_size, block_size, property);
297}
298
299
300
301template <typename NumberType>
302void
304 const LAPACKSupport::Property property_)
305{
306 property = property_;
307}
308
309
310
311template <typename NumberType>
314{
315 return property;
316}
317
318
319
320template <typename NumberType>
323{
324 return state;
325}
326
327
328
329template <typename NumberType>
332{
333 // FIXME: another way to copy is to use pdgeadd_ PBLAS routine.
334 // This routine computes the sum of two matrices B := a*A + b*B.
335 // Matrices can have different distribution,in particular matrix A can
336 // be owned by only one process, so we can set a=1 and b=0 to copy
337 // non-distributed matrix A into distributed matrix B.
338 Assert(n_rows == int(matrix.m()), ExcDimensionMismatch(n_rows, matrix.m()));
339 Assert(n_columns == int(matrix.n()),
340 ExcDimensionMismatch(n_columns, matrix.n()));
341
342 if (grid->mpi_process_is_active)
343 {
344 for (int i = 0; i < n_local_rows; ++i)
345 {
346 const int glob_i = global_row(i);
347 for (int j = 0; j < n_local_columns; ++j)
348 {
349 const int glob_j = global_column(j);
350 local_el(i, j) = matrix(glob_i, glob_j);
351 }
352 }
353 }
354 state = LAPACKSupport::matrix;
355 return *this;
356}
357
358
359
360template <typename NumberType>
361void
363 const unsigned int rank)
364{
365 if (n_rows * n_columns == 0)
366 return;
367
368 const unsigned int this_mpi_process(
369 Utilities::MPI::this_mpi_process(this->grid->mpi_communicator));
370
371 if constexpr (running_in_debug_mode())
372 {
373 Assert(Utilities::MPI::max(rank, this->grid->mpi_communicator) == rank,
375 "All processes have to call routine with identical rank"));
376 Assert(Utilities::MPI::min(rank, this->grid->mpi_communicator) == rank,
378 "All processes have to call routine with identical rank"));
379 }
380
381 // root process has to be active in the grid of A
382 if (this_mpi_process == rank)
383 {
384 Assert(grid->mpi_process_is_active, ExcInternalError());
385 Assert(n_rows == int(B.m()), ExcDimensionMismatch(n_rows, B.m()));
386 Assert(n_columns == int(B.n()), ExcDimensionMismatch(n_columns, B.n()));
387 }
388 // Create 1x1 grid for matrix B.
389 // The underlying grid for matrix B only contains the process #rank.
390 // This grid will be used to copy the serial matrix B to the distributed
391 // matrix using the ScaLAPACK routine pgemr2d.
392 MPI_Group group_A;
393 MPI_Comm_group(this->grid->mpi_communicator, &group_A);
394 const int n = 1;
395 const std::vector<int> ranks(n, rank);
396 MPI_Group group_B;
397 MPI_Group_incl(group_A, n, ranks.data(), &group_B);
398 MPI_Comm communicator_B;
399
401 const int ierr = MPI_Comm_create_group(this->grid->mpi_communicator,
402 group_B,
403 mpi_tag,
404 &communicator_B);
405 AssertThrowMPI(ierr);
406 int n_proc_rows_B = 1, n_proc_cols_B = 1;
407 int this_process_row_B = -1, this_process_column_B = -1;
408 int blacs_context_B = -1;
409 if (MPI_COMM_NULL != communicator_B)
410 {
411 // Initialize Cblas context from the provided communicator
412 blacs_context_B = Csys2blacs_handle(communicator_B);
413 const char *order = "Col";
414 Cblacs_gridinit(&blacs_context_B, order, n_proc_rows_B, n_proc_cols_B);
415 Cblacs_gridinfo(blacs_context_B,
416 &n_proc_rows_B,
417 &n_proc_cols_B,
418 &this_process_row_B,
419 &this_process_column_B);
420 Assert(n_proc_rows_B * n_proc_cols_B == 1, ExcInternalError());
421 // the active process of grid B has to be process #rank of the
422 // communicator attached to A
423 Assert(this_mpi_process == rank, ExcInternalError());
424 }
425 const bool mpi_process_is_active_B =
426 (this_process_row_B >= 0 && this_process_column_B >= 0);
427
428 // create descriptor for matrix B
429 std::vector<int> descriptor_B(9, -1);
430 const int first_process_row_B = 0, first_process_col_B = 0;
431
432 if (mpi_process_is_active_B)
433 {
434 // Get local sizes:
435 int n_local_rows_B = numroc_(&n_rows,
436 &n_rows,
437 &this_process_row_B,
438 &first_process_row_B,
439 &n_proc_rows_B);
440 int n_local_cols_B = numroc_(&n_columns,
441 &n_columns,
442 &this_process_column_B,
443 &first_process_col_B,
444 &n_proc_cols_B);
445 Assert(n_local_rows_B == n_rows, ExcInternalError());
446 Assert(n_local_cols_B == n_columns, ExcInternalError());
447 (void)n_local_cols_B;
448
449 int lda = std::max(1, n_local_rows_B);
450 int info = 0;
451 descinit_(descriptor_B.data(),
452 &n_rows,
453 &n_columns,
454 &n_rows,
455 &n_columns,
456 &first_process_row_B,
457 &first_process_col_B,
458 &blacs_context_B,
459 &lda,
460 &info);
461 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("descinit_", info));
462 }
463 if (this->grid->mpi_process_is_active)
464 {
465 const int ii = 1;
466 NumberType *loc_vals_A =
467 this->values.size() > 0 ? this->values.data() : nullptr;
468 const NumberType *loc_vals_B =
469 mpi_process_is_active_B ? &(B(0, 0)) : nullptr;
470
471 // pgemr2d has to be called only for processes active on grid attached to
472 // matrix A
473 pgemr2d(&n_rows,
474 &n_columns,
475 loc_vals_B,
476 &ii,
477 &ii,
478 descriptor_B.data(),
479 loc_vals_A,
480 &ii,
481 &ii,
482 this->descriptor,
483 &(this->grid->blacs_context));
484 }
485 if (mpi_process_is_active_B)
486 Cblacs_gridexit(blacs_context_B);
487
488 MPI_Group_free(&group_A);
489 MPI_Group_free(&group_B);
490 if (MPI_COMM_NULL != communicator_B)
491 Utilities::MPI::free_communicator(communicator_B);
492
493 state = LAPACKSupport::matrix;
494}
495
496
497
498template <typename NumberType>
499unsigned int
500ScaLAPACKMatrix<NumberType>::global_row(const unsigned int loc_row) const
501{
502 Assert(n_local_rows >= 0 && loc_row < static_cast<unsigned int>(n_local_rows),
503 ExcIndexRange(loc_row, 0, n_local_rows));
504 const int i = loc_row + 1;
505 return indxl2g_(&i,
506 &row_block_size,
507 &(grid->this_process_row),
508 &first_process_row,
509 &(grid->n_process_rows)) -
510 1;
511}
512
513
514
515template <typename NumberType>
516unsigned int
517ScaLAPACKMatrix<NumberType>::global_column(const unsigned int loc_column) const
518{
519 Assert(n_local_columns >= 0 &&
520 loc_column < static_cast<unsigned int>(n_local_columns),
521 ExcIndexRange(loc_column, 0, n_local_columns));
522 const int j = loc_column + 1;
523 return indxl2g_(&j,
524 &column_block_size,
525 &(grid->this_process_column),
526 &first_process_column,
527 &(grid->n_process_columns)) -
528 1;
529}
530
531
532
533template <typename NumberType>
534void
536 const unsigned int rank) const
537{
538 if (n_rows * n_columns == 0)
539 return;
540
541 const unsigned int this_mpi_process(
542 Utilities::MPI::this_mpi_process(this->grid->mpi_communicator));
543
544 if constexpr (running_in_debug_mode())
545 {
546 Assert(Utilities::MPI::max(rank, this->grid->mpi_communicator) == rank,
548 "All processes have to call routine with identical rank"));
549 Assert(Utilities::MPI::min(rank, this->grid->mpi_communicator) == rank,
551 "All processes have to call routine with identical rank"));
552 }
553
554 if (this_mpi_process == rank)
555 {
556 // the process which gets the serial copy has to be in the process grid
557 Assert(this->grid->is_process_active(), ExcInternalError());
558 Assert(n_rows == int(B.m()), ExcDimensionMismatch(n_rows, B.m()));
559 Assert(n_columns == int(B.n()), ExcDimensionMismatch(n_columns, B.n()));
560 }
561
562 // Create 1x1 grid for matrix B.
563 // The underlying grid for matrix B only contains the process #rank.
564 // This grid will be used to copy to the distributed matrix to the serial
565 // matrix B using the ScaLAPACK routine pgemr2d.
566 MPI_Group group_A;
567 MPI_Comm_group(this->grid->mpi_communicator, &group_A);
568 const int n = 1;
569 const std::vector<int> ranks(n, rank);
570 MPI_Group group_B;
571 MPI_Group_incl(group_A, n, ranks.data(), &group_B);
572 MPI_Comm communicator_B;
573
575 const int ierr = MPI_Comm_create_group(this->grid->mpi_communicator,
576 group_B,
577 mpi_tag,
578 &communicator_B);
579 AssertThrowMPI(ierr);
580 int n_proc_rows_B = 1, n_proc_cols_B = 1;
581 int this_process_row_B = -1, this_process_column_B = -1;
582 int blacs_context_B = -1;
583 if (MPI_COMM_NULL != communicator_B)
584 {
585 // Initialize Cblas context from the provided communicator
586 blacs_context_B = Csys2blacs_handle(communicator_B);
587 const char *order = "Col";
588 Cblacs_gridinit(&blacs_context_B, order, n_proc_rows_B, n_proc_cols_B);
589 Cblacs_gridinfo(blacs_context_B,
590 &n_proc_rows_B,
591 &n_proc_cols_B,
592 &this_process_row_B,
593 &this_process_column_B);
594 Assert(n_proc_rows_B * n_proc_cols_B == 1, ExcInternalError());
595 // the active process of grid B has to be process #rank of the
596 // communicator attached to A
597 Assert(this_mpi_process == rank, ExcInternalError());
598 }
599 const bool mpi_process_is_active_B =
600 (this_process_row_B >= 0 && this_process_column_B >= 0);
601
602 // create descriptor for matrix B
603 std::vector<int> descriptor_B(9, -1);
604 const int first_process_row_B = 0, first_process_col_B = 0;
605
606 if (mpi_process_is_active_B)
607 {
608 // Get local sizes:
609 int n_local_rows_B = numroc_(&n_rows,
610 &n_rows,
611 &this_process_row_B,
612 &first_process_row_B,
613 &n_proc_rows_B);
614 int n_local_cols_B = numroc_(&n_columns,
615 &n_columns,
616 &this_process_column_B,
617 &first_process_col_B,
618 &n_proc_cols_B);
619 Assert(n_local_rows_B == n_rows, ExcInternalError());
620 Assert(n_local_cols_B == n_columns, ExcInternalError());
621 (void)n_local_cols_B;
622
623 int lda = std::max(1, n_local_rows_B);
624 int info = 0;
625 // fill descriptor for matrix B
626 descinit_(descriptor_B.data(),
627 &n_rows,
628 &n_columns,
629 &n_rows,
630 &n_columns,
631 &first_process_row_B,
632 &first_process_col_B,
633 &blacs_context_B,
634 &lda,
635 &info);
636 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("descinit_", info));
637 }
638 // pgemr2d has to be called only for processes active on grid attached to
639 // matrix A
640 if (this->grid->mpi_process_is_active)
641 {
642 const int ii = 1;
643 const NumberType *loc_vals_A =
644 this->values.size() > 0 ? this->values.data() : nullptr;
645 NumberType *loc_vals_B = mpi_process_is_active_B ? &(B(0, 0)) : nullptr;
646
647 pgemr2d(&n_rows,
648 &n_columns,
649 loc_vals_A,
650 &ii,
651 &ii,
652 this->descriptor,
653 loc_vals_B,
654 &ii,
655 &ii,
656 descriptor_B.data(),
657 &(this->grid->blacs_context));
658 }
659 if (mpi_process_is_active_B)
660 Cblacs_gridexit(blacs_context_B);
661
662 MPI_Group_free(&group_A);
663 MPI_Group_free(&group_B);
664 if (MPI_COMM_NULL != communicator_B)
665 Utilities::MPI::free_communicator(communicator_B);
666}
667
668
669
670template <typename NumberType>
671void
673{
674 // FIXME: use PDGEMR2d for copying?
675 // PDGEMR2d copies a submatrix of A on a submatrix of B.
676 // A and B can have different distributions
677 // see http://icl.cs.utk.edu/lapack-forum/viewtopic.php?t=50
678 Assert(n_rows == int(matrix.m()), ExcDimensionMismatch(n_rows, matrix.m()));
679 Assert(n_columns == int(matrix.n()),
680 ExcDimensionMismatch(n_columns, matrix.n()));
681
682 matrix = 0.;
683 if (grid->mpi_process_is_active)
684 {
685 for (int i = 0; i < n_local_rows; ++i)
686 {
687 const int glob_i = global_row(i);
688 for (int j = 0; j < n_local_columns; ++j)
689 {
690 const int glob_j = global_column(j);
691 matrix(glob_i, glob_j) = local_el(i, j);
692 }
693 }
694 }
695 Utilities::MPI::sum(matrix, grid->mpi_communicator, matrix);
696
697 // we could move the following lines under the main loop above,
698 // but they would be dependent on glob_i and glob_j, which
699 // won't make it much prettier
700 if (state == LAPACKSupport::cholesky)
701 {
702 if (property == LAPACKSupport::lower_triangular)
703 for (unsigned int i = 0; i < matrix.n(); ++i)
704 for (unsigned int j = i + 1; j < matrix.m(); ++j)
705 matrix(i, j) = 0.;
706 else if (property == LAPACKSupport::upper_triangular)
707 for (unsigned int i = 0; i < matrix.n(); ++i)
708 for (unsigned int j = 0; j < i; ++j)
709 matrix(i, j) = 0.;
710 }
711 else if (property == LAPACKSupport::symmetric &&
713 {
714 if (uplo == 'L')
715 for (unsigned int i = 0; i < matrix.n(); ++i)
716 for (unsigned int j = i + 1; j < matrix.m(); ++j)
717 matrix(i, j) = matrix(j, i);
718 else if (uplo == 'U')
719 for (unsigned int i = 0; i < matrix.n(); ++i)
720 for (unsigned int j = 0; j < i; ++j)
721 matrix(i, j) = matrix(j, i);
722 }
723}
724
725
726
727template <typename NumberType>
728void
731 const std::pair<unsigned int, unsigned int> &offset_A,
732 const std::pair<unsigned int, unsigned int> &offset_B,
733 const std::pair<unsigned int, unsigned int> &submatrix_size) const
734{
735 // submatrix is empty
736 if (submatrix_size.first == 0 || submatrix_size.second == 0)
737 return;
738
739 // range checking for matrix A
740 AssertIndexRange(offset_A.first, n_rows - submatrix_size.first + 1);
741 AssertIndexRange(offset_A.second, n_columns - submatrix_size.second + 1);
742
743 // range checking for matrix B
744 AssertIndexRange(offset_B.first, B.n_rows - submatrix_size.first + 1);
745 AssertIndexRange(offset_B.second, B.n_columns - submatrix_size.second + 1);
746
747 // Currently, copying of matrices will only be supported if A and B share the
748 // same MPI communicator
749 int ierr, comparison;
750 ierr = MPI_Comm_compare(grid->mpi_communicator,
751 B.grid->mpi_communicator,
752 &comparison);
753 AssertThrowMPI(ierr);
754 Assert(comparison == MPI_IDENT,
755 ExcMessage("Matrix A and B must have a common MPI Communicator"));
756
757 /*
758 * The routine pgemr2d requires a BLACS context resembling at least the union
759 * of process grids described by the BLACS contexts held by the ProcessGrids
760 * of matrix A and B. As A and B share the same MPI communicator, there is no
761 * need to create a union MPI communicator to initialize the BLACS context
762 */
763 int union_blacs_context = Csys2blacs_handle(this->grid->mpi_communicator);
764 const char *order = "Col";
765 int union_n_process_rows =
766 Utilities::MPI::n_mpi_processes(this->grid->mpi_communicator);
767 int union_n_process_columns = 1;
768 Cblacs_gridinit(&union_blacs_context,
769 order,
770 union_n_process_rows,
771 union_n_process_columns);
772
773 int n_grid_rows_A, n_grid_columns_A, my_row_A, my_column_A;
774 Cblacs_gridinfo(this->grid->blacs_context,
775 &n_grid_rows_A,
776 &n_grid_columns_A,
777 &my_row_A,
778 &my_column_A);
779
780 // check whether process is in the BLACS context of matrix A
781 const bool in_context_A =
782 (my_row_A >= 0 && my_row_A < n_grid_rows_A) &&
783 (my_column_A >= 0 && my_column_A < n_grid_columns_A);
784
785 int n_grid_rows_B, n_grid_columns_B, my_row_B, my_column_B;
786 Cblacs_gridinfo(B.grid->blacs_context,
787 &n_grid_rows_B,
788 &n_grid_columns_B,
789 &my_row_B,
790 &my_column_B);
791
792 // check whether process is in the BLACS context of matrix B
793 const bool in_context_B =
794 (my_row_B >= 0 && my_row_B < n_grid_rows_B) &&
795 (my_column_B >= 0 && my_column_B < n_grid_columns_B);
796
797 const int n_rows_submatrix = submatrix_size.first;
798 const int n_columns_submatrix = submatrix_size.second;
799
800 // due to Fortran indexing one has to be added
801 int ia = offset_A.first + 1, ja = offset_A.second + 1;
802 int ib = offset_B.first + 1, jb = offset_B.second + 1;
803
804 std::array<int, 9> desc_A, desc_B;
805
806 const NumberType *loc_vals_A = nullptr;
807 NumberType *loc_vals_B = nullptr;
808
809 // Note: the function pgemr2d has to be called for all processes in the union
810 // BLACS context If the calling process is not part of the BLACS context of A,
811 // desc_A[1] has to be -1 and all other parameters do not have to be set If
812 // the calling process is not part of the BLACS context of B, desc_B[1] has to
813 // be -1 and all other parameters do not have to be set
814 if (in_context_A)
815 {
816 if (this->values.size() != 0)
817 loc_vals_A = this->values.data();
818
819 for (unsigned int i = 0; i < desc_A.size(); ++i)
820 desc_A[i] = this->descriptor[i];
821 }
822 else
823 desc_A[1] = -1;
824
825 if (in_context_B)
826 {
827 if (B.values.size() != 0)
828 loc_vals_B = B.values.data();
829
830 for (unsigned int i = 0; i < desc_B.size(); ++i)
831 desc_B[i] = B.descriptor[i];
832 }
833 else
834 desc_B[1] = -1;
835
836 pgemr2d(&n_rows_submatrix,
837 &n_columns_submatrix,
838 loc_vals_A,
839 &ia,
840 &ja,
841 desc_A.data(),
842 loc_vals_B,
843 &ib,
844 &jb,
845 desc_B.data(),
846 &union_blacs_context);
847
849
850 // releasing the union BLACS context
851 Cblacs_gridexit(union_blacs_context);
852}
853
854
855
856template <typename NumberType>
857void
859{
860 Assert(n_rows == dest.n_rows, ExcDimensionMismatch(n_rows, dest.n_rows));
861 Assert(n_columns == dest.n_columns,
862 ExcDimensionMismatch(n_columns, dest.n_columns));
863
864 if (this->grid->mpi_process_is_active)
866 this->descriptor[0] == 1,
868 "Copying of ScaLAPACK matrices only implemented for dense matrices"));
869 if (dest.grid->mpi_process_is_active)
871 dest.descriptor[0] == 1,
873 "Copying of ScaLAPACK matrices only implemented for dense matrices"));
874
875 /*
876 * just in case of different process grids or block-cyclic distributions
877 * inter-process communication is necessary
878 * if distributed matrices have the same process grid and block sizes, local
879 * copying is enough
880 */
881 if ((this->grid != dest.grid) || (row_block_size != dest.row_block_size) ||
882 (column_block_size != dest.column_block_size))
883 {
884 /*
885 * get the MPI communicator, which is the union of the source and
886 * destination MPI communicator
887 */
888 int ierr = 0;
889 MPI_Group group_source, group_dest, group_union;
890 ierr = MPI_Comm_group(this->grid->mpi_communicator, &group_source);
891 AssertThrowMPI(ierr);
892 ierr = MPI_Comm_group(dest.grid->mpi_communicator, &group_dest);
893 AssertThrowMPI(ierr);
894 ierr = MPI_Group_union(group_source, group_dest, &group_union);
895 AssertThrowMPI(ierr);
896 MPI_Comm mpi_communicator_union;
897
898 // to create a communicator representing the union of the source
899 // and destination MPI communicator we need a communicator that
900 // is guaranteed to contain all desired processes -- i.e.,
901 // MPI_COMM_WORLD. on the other hand, as documented in the MPI
902 // standard, MPI_Comm_create_group is not collective on all
903 // processes in the first argument, but instead is collective on
904 // only those processes listed in the group. in other words,
905 // there is really no harm in passing MPI_COMM_WORLD as the
906 // first argument, even if the program we are currently running
907 // and that is calling this function only works on a subset of
908 // processes. the same holds for the wrapper/fallback we are using here.
909
911 ierr = MPI_Comm_create_group(MPI_COMM_WORLD,
912 group_union,
913 mpi_tag,
914 &mpi_communicator_union);
915 AssertThrowMPI(ierr);
916
917 /*
918 * The routine pgemr2d requires a BLACS context resembling at least the
919 * union of process grids described by the BLACS contexts of matrix A and
920 * B
921 */
922 int union_blacs_context = Csys2blacs_handle(mpi_communicator_union);
923 const char *order = "Col";
924 int union_n_process_rows =
925 Utilities::MPI::n_mpi_processes(mpi_communicator_union);
926 int union_n_process_columns = 1;
927 Cblacs_gridinit(&union_blacs_context,
928 order,
929 union_n_process_rows,
930 union_n_process_columns);
931
932 const NumberType *loc_vals_source = nullptr;
933 NumberType *loc_vals_dest = nullptr;
934
935 if (this->grid->mpi_process_is_active && (this->values.size() > 0))
936 {
937 AssertThrow(this->values.size() > 0,
939 "source: process is active but local matrix empty"));
940 loc_vals_source = this->values.data();
941 }
942 if (dest.grid->mpi_process_is_active && (dest.values.size() > 0))
943 {
945 dest.values.size() > 0,
947 "destination: process is active but local matrix empty"));
948 loc_vals_dest = dest.values.data();
949 }
950 pgemr2d(&n_rows,
951 &n_columns,
952 loc_vals_source,
953 &submatrix_row,
954 &submatrix_column,
955 descriptor,
956 loc_vals_dest,
957 &dest.submatrix_row,
958 &dest.submatrix_column,
959 dest.descriptor,
960 &union_blacs_context);
961
962 Cblacs_gridexit(union_blacs_context);
963
964 if (mpi_communicator_union != MPI_COMM_NULL)
965 Utilities::MPI::free_communicator(mpi_communicator_union);
966 ierr = MPI_Group_free(&group_source);
967 AssertThrowMPI(ierr);
968 ierr = MPI_Group_free(&group_dest);
969 AssertThrowMPI(ierr);
970 ierr = MPI_Group_free(&group_union);
971 AssertThrowMPI(ierr);
972 }
973 else
974 // process is active in the process grid
975 if (this->grid->mpi_process_is_active)
976 dest.values = this->values;
977
978 dest.state = state;
979 dest.property = property;
980}
981
982
983
984template <typename NumberType>
985void
988{
989 add(B, 0, 1, true);
990}
991
992
993
994template <typename NumberType>
995void
997 const NumberType alpha,
998 const NumberType beta,
999 const bool transpose_B)
1000{
1001 if (transpose_B)
1002 {
1003 Assert(n_rows == B.n_columns, ExcDimensionMismatch(n_rows, B.n_columns));
1004 Assert(n_columns == B.n_rows, ExcDimensionMismatch(n_columns, B.n_rows));
1005 Assert(column_block_size == B.row_block_size,
1006 ExcDimensionMismatch(column_block_size, B.row_block_size));
1007 Assert(row_block_size == B.column_block_size,
1008 ExcDimensionMismatch(row_block_size, B.column_block_size));
1009 }
1010 else
1011 {
1012 Assert(n_rows == B.n_rows, ExcDimensionMismatch(n_rows, B.n_rows));
1013 Assert(n_columns == B.n_columns,
1014 ExcDimensionMismatch(n_columns, B.n_columns));
1015 Assert(column_block_size == B.column_block_size,
1016 ExcDimensionMismatch(column_block_size, B.column_block_size));
1017 Assert(row_block_size == B.row_block_size,
1018 ExcDimensionMismatch(row_block_size, B.row_block_size));
1019 }
1020 Assert(this->grid == B.grid,
1021 ExcMessage("The matrices A and B need to have the same process grid"));
1022
1023 if (this->grid->mpi_process_is_active)
1024 {
1025 char trans_b = transpose_B ? 'T' : 'N';
1026 NumberType *A_loc =
1027 (this->values.size() > 0) ? this->values.data() : nullptr;
1028 const NumberType *B_loc =
1029 (B.values.size() > 0) ? B.values.data() : nullptr;
1030
1031 pgeadd(&trans_b,
1032 &n_rows,
1033 &n_columns,
1034 &beta,
1035 B_loc,
1036 &B.submatrix_row,
1038 B.descriptor,
1039 &alpha,
1040 A_loc,
1041 &submatrix_row,
1042 &submatrix_column,
1043 descriptor);
1044 }
1045 state = LAPACKSupport::matrix;
1046}
1047
1048
1049
1050template <typename NumberType>
1051void
1054{
1055 add(B, 1, a, false);
1056}
1057
1058
1059
1060template <typename NumberType>
1061void
1064{
1065 add(B, 1, a, true);
1066}
1067
1068
1069
1070template <typename NumberType>
1071void
1074 const NumberType c,
1076 const bool transpose_A,
1077 const bool transpose_B) const
1078{
1079 Assert(this->grid == B.grid,
1080 ExcMessage("The matrices A and B need to have the same process grid"));
1081 Assert(C.grid == B.grid,
1082 ExcMessage("The matrices B and C need to have the same process grid"));
1083
1084 // see for further info:
1085 // https://www.ibm.com/support/knowledgecenter/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_lgemm.htm
1086 if (!transpose_A && !transpose_B)
1087 {
1088 Assert(this->n_columns == B.n_rows,
1089 ExcDimensionMismatch(this->n_columns, B.n_rows));
1090 Assert(this->n_rows == C.n_rows,
1091 ExcDimensionMismatch(this->n_rows, C.n_rows));
1092 Assert(B.n_columns == C.n_columns,
1093 ExcDimensionMismatch(B.n_columns, C.n_columns));
1094 Assert(this->row_block_size == C.row_block_size,
1095 ExcDimensionMismatch(this->row_block_size, C.row_block_size));
1096 Assert(this->column_block_size == B.row_block_size,
1097 ExcDimensionMismatch(this->column_block_size, B.row_block_size));
1098 Assert(B.column_block_size == C.column_block_size,
1099 ExcDimensionMismatch(B.column_block_size, C.column_block_size));
1100 }
1101 else if (transpose_A && !transpose_B)
1102 {
1103 Assert(this->n_rows == B.n_rows,
1104 ExcDimensionMismatch(this->n_rows, B.n_rows));
1105 Assert(this->n_columns == C.n_rows,
1106 ExcDimensionMismatch(this->n_columns, C.n_rows));
1107 Assert(B.n_columns == C.n_columns,
1108 ExcDimensionMismatch(B.n_columns, C.n_columns));
1109 Assert(this->column_block_size == C.row_block_size,
1110 ExcDimensionMismatch(this->column_block_size, C.row_block_size));
1111 Assert(this->row_block_size == B.row_block_size,
1112 ExcDimensionMismatch(this->row_block_size, B.row_block_size));
1113 Assert(B.column_block_size == C.column_block_size,
1114 ExcDimensionMismatch(B.column_block_size, C.column_block_size));
1115 }
1116 else if (!transpose_A && transpose_B)
1117 {
1118 Assert(this->n_columns == B.n_columns,
1119 ExcDimensionMismatch(this->n_columns, B.n_columns));
1120 Assert(this->n_rows == C.n_rows,
1121 ExcDimensionMismatch(this->n_rows, C.n_rows));
1122 Assert(B.n_rows == C.n_columns,
1123 ExcDimensionMismatch(B.n_rows, C.n_columns));
1124 Assert(this->row_block_size == C.row_block_size,
1125 ExcDimensionMismatch(this->row_block_size, C.row_block_size));
1126 Assert(this->column_block_size == B.column_block_size,
1127 ExcDimensionMismatch(this->column_block_size,
1129 Assert(B.row_block_size == C.column_block_size,
1130 ExcDimensionMismatch(B.row_block_size, C.column_block_size));
1131 }
1132 else // if (transpose_A && transpose_B)
1133 {
1134 Assert(this->n_rows == B.n_columns,
1135 ExcDimensionMismatch(this->n_rows, B.n_columns));
1136 Assert(this->n_columns == C.n_rows,
1137 ExcDimensionMismatch(this->n_columns, C.n_rows));
1138 Assert(B.n_rows == C.n_columns,
1139 ExcDimensionMismatch(B.n_rows, C.n_columns));
1140 Assert(this->column_block_size == C.row_block_size,
1141 ExcDimensionMismatch(this->row_block_size, C.row_block_size));
1142 Assert(this->row_block_size == B.column_block_size,
1143 ExcDimensionMismatch(this->column_block_size, B.row_block_size));
1144 Assert(B.row_block_size == C.column_block_size,
1145 ExcDimensionMismatch(B.column_block_size, C.column_block_size));
1146 }
1147
1148 if (this->grid->mpi_process_is_active)
1149 {
1150 char trans_a = transpose_A ? 'T' : 'N';
1151 char trans_b = transpose_B ? 'T' : 'N';
1152
1153 const NumberType *A_loc =
1154 (this->values.size() > 0) ? this->values.data() : nullptr;
1155 const NumberType *B_loc =
1156 (B.values.size() > 0) ? B.values.data() : nullptr;
1157 NumberType *C_loc = (C.values.size() > 0) ? C.values.data() : nullptr;
1158 int m = C.n_rows;
1159 int n = C.n_columns;
1160 int k = transpose_A ? this->n_rows : this->n_columns;
1161
1162 pgemm(&trans_a,
1163 &trans_b,
1164 &m,
1165 &n,
1166 &k,
1167 &b,
1168 A_loc,
1169 &(this->submatrix_row),
1170 &(this->submatrix_column),
1171 this->descriptor,
1172 B_loc,
1173 &B.submatrix_row,
1175 B.descriptor,
1176 &c,
1177 C_loc,
1178 &C.submatrix_row,
1179 &C.submatrix_column,
1180 C.descriptor);
1181 }
1182 C.state = LAPACKSupport::matrix;
1183}
1184
1185
1186
1187template <typename NumberType>
1188void
1191 const bool adding) const
1192{
1193 if (adding)
1194 mult(1., B, 1., C, false, false);
1195 else
1196 mult(1., B, 0, C, false, false);
1197}
1198
1199
1200
1201template <typename NumberType>
1202void
1205 const bool adding) const
1206{
1207 if (adding)
1208 mult(1., B, 1., C, true, false);
1209 else
1210 mult(1., B, 0, C, true, false);
1211}
1212
1213
1214
1215template <typename NumberType>
1216void
1219 const bool adding) const
1220{
1221 if (adding)
1222 mult(1., B, 1., C, false, true);
1223 else
1224 mult(1., B, 0, C, false, true);
1225}
1226
1227
1228
1229template <typename NumberType>
1230void
1233 const bool adding) const
1234{
1235 if (adding)
1236 mult(1., B, 1., C, true, true);
1237 else
1238 mult(1., B, 0, C, true, true);
1239}
1240
1241
1242
1243template <typename NumberType>
1244void
1246{
1247 Assert(
1248 n_columns == n_rows && property == LAPACKSupport::Property::symmetric,
1249 ExcMessage(
1250 "Cholesky factorization can be applied to symmetric matrices only."));
1252 ExcMessage(
1253 "Matrix has to be in Matrix state before calling this function."));
1254
1255 if (grid->mpi_process_is_active)
1256 {
1257 int info = 0;
1258 NumberType *A_loc = this->values.data();
1259 // pdpotrf_(&uplo,&n_columns,A_loc,&submatrix_row,&submatrix_column,descriptor,&info);
1260 ppotrf(&uplo,
1261 &n_columns,
1262 A_loc,
1263 &submatrix_row,
1264 &submatrix_column,
1265 descriptor,
1266 &info);
1267 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("ppotrf", info));
1268 }
1270 property = (uplo == 'L' ? LAPACKSupport::lower_triangular :
1272}
1273
1274
1275
1276template <typename NumberType>
1277void
1279{
1281 ExcMessage(
1282 "Matrix has to be in Matrix state before calling this function."));
1283
1284 if (grid->mpi_process_is_active)
1285 {
1286 int info = 0;
1287 NumberType *A_loc = this->values.data();
1288
1289 const int iarow = indxg2p_(&submatrix_row,
1290 &row_block_size,
1291 &(grid->this_process_row),
1292 &first_process_row,
1293 &(grid->n_process_rows));
1294 const int mp = numroc_(&n_rows,
1295 &row_block_size,
1296 &(grid->this_process_row),
1297 &iarow,
1298 &(grid->n_process_rows));
1299 ipiv.resize(mp + row_block_size);
1300
1301 pgetrf(&n_rows,
1302 &n_columns,
1303 A_loc,
1304 &submatrix_row,
1305 &submatrix_column,
1306 descriptor,
1307 ipiv.data(),
1308 &info);
1309 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("pgetrf", info));
1310 }
1313}
1314
1315
1316
1317template <typename NumberType>
1318void
1320{
1321 // Check whether matrix is symmetric and save flag.
1322 // If a Cholesky factorization has been applied previously,
1323 // the original matrix was symmetric.
1324 const bool is_symmetric = (property == LAPACKSupport::symmetric ||
1326
1327 // Check whether matrix is triangular and is in an unfactorized state.
1328 const bool is_triangular = (property == LAPACKSupport::upper_triangular ||
1329 property == LAPACKSupport::lower_triangular) &&
1330 (state == LAPACKSupport::State::matrix ||
1332
1333 if (is_triangular)
1334 {
1335 if (grid->mpi_process_is_active)
1336 {
1337 const char uploTriangular =
1338 property == LAPACKSupport::upper_triangular ? 'U' : 'L';
1339 const char diag = 'N';
1340 int info = 0;
1341 NumberType *A_loc = this->values.data();
1342 ptrtri(&uploTriangular,
1343 &diag,
1344 &n_columns,
1345 A_loc,
1346 &submatrix_row,
1347 &submatrix_column,
1348 descriptor,
1349 &info);
1350 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("ptrtri", info));
1351 // The inversion is stored in the same part as the triangular matrix,
1352 // so we don't need to re-set the property here.
1353 }
1354 }
1355 else
1356 {
1357 // Matrix is neither in Cholesky nor LU state.
1358 // Compute the required factorizations based on the property of the
1359 // matrix.
1360 if (!(state == LAPACKSupport::State::lu ||
1362 {
1363 if (is_symmetric)
1364 compute_cholesky_factorization();
1365 else
1366 compute_lu_factorization();
1367 }
1368 if (grid->mpi_process_is_active)
1369 {
1370 int info = 0;
1371 NumberType *A_loc = this->values.data();
1372
1373 if (is_symmetric)
1374 {
1375 ppotri(&uplo,
1376 &n_columns,
1377 A_loc,
1378 &submatrix_row,
1379 &submatrix_column,
1380 descriptor,
1381 &info);
1382 AssertThrow(info == 0,
1383 LAPACKSupport::ExcErrorCode("ppotri", info));
1385 }
1386 else
1387 {
1388 int lwork = -1, liwork = -1;
1389 work.resize(1);
1390 iwork.resize(1);
1391
1392 pgetri(&n_columns,
1393 A_loc,
1394 &submatrix_row,
1395 &submatrix_column,
1396 descriptor,
1397 ipiv.data(),
1398 work.data(),
1399 &lwork,
1400 iwork.data(),
1401 &liwork,
1402 &info);
1403
1404 AssertThrow(info == 0,
1405 LAPACKSupport::ExcErrorCode("pgetri", info));
1406 lwork = static_cast<int>(work[0]);
1407 liwork = iwork[0];
1408 work.resize(lwork);
1409 iwork.resize(liwork);
1410
1411 pgetri(&n_columns,
1412 A_loc,
1413 &submatrix_row,
1414 &submatrix_column,
1415 descriptor,
1416 ipiv.data(),
1417 work.data(),
1418 &lwork,
1419 iwork.data(),
1420 &liwork,
1421 &info);
1422
1423 AssertThrow(info == 0,
1424 LAPACKSupport::ExcErrorCode("pgetri", info));
1425 }
1426 }
1427 }
1429}
1430
1431
1432
1433template <typename NumberType>
1434std::vector<NumberType>
1436 const std::pair<unsigned int, unsigned int> &index_limits,
1437 const bool compute_eigenvectors)
1438{
1439 // check validity of index limits
1440 AssertIndexRange(index_limits.first, n_rows);
1441 AssertIndexRange(index_limits.second, n_rows);
1442
1443 std::pair<unsigned int, unsigned int> idx =
1444 std::make_pair(std::min(index_limits.first, index_limits.second),
1445 std::max(index_limits.first, index_limits.second));
1446
1447 // compute all eigenvalues/eigenvectors
1448 if (idx.first == 0 && idx.second == static_cast<unsigned int>(n_rows - 1))
1449 return eigenpairs_symmetric(compute_eigenvectors);
1450 else
1451 return eigenpairs_symmetric(compute_eigenvectors, idx);
1452}
1453
1454
1455
1456template <typename NumberType>
1457std::vector<NumberType>
1459 const std::pair<NumberType, NumberType> &value_limits,
1460 const bool compute_eigenvectors)
1461{
1462 Assert(!std::isnan(value_limits.first),
1463 ExcMessage("value_limits.first is NaN"));
1464 Assert(!std::isnan(value_limits.second),
1465 ExcMessage("value_limits.second is NaN"));
1466
1467 std::pair<unsigned int, unsigned int> indices =
1468 std::make_pair(numbers::invalid_unsigned_int,
1470
1471 return eigenpairs_symmetric(compute_eigenvectors, indices, value_limits);
1472}
1473
1474
1475
1476template <typename NumberType>
1477std::vector<NumberType>
1479 const bool compute_eigenvectors,
1480 const std::pair<unsigned int, unsigned int> &eigenvalue_idx,
1481 const std::pair<NumberType, NumberType> &eigenvalue_limits)
1482{
1484 ExcMessage(
1485 "Matrix has to be in Matrix state before calling this function."));
1486 Assert(property == LAPACKSupport::symmetric,
1487 ExcMessage("Matrix has to be symmetric for this operation."));
1488
1489 std::lock_guard<std::mutex> lock(mutex);
1490
1491 const bool use_values = (std::isnan(eigenvalue_limits.first) ||
1492 std::isnan(eigenvalue_limits.second)) ?
1493 false :
1494 true;
1495 const bool use_indices =
1496 ((eigenvalue_idx.first == numbers::invalid_unsigned_int) ||
1497 (eigenvalue_idx.second == numbers::invalid_unsigned_int)) ?
1498 false :
1499 true;
1500
1501 Assert(
1502 !(use_values && use_indices),
1503 ExcMessage(
1504 "Prescribing both the index and value range for the eigenvalues is ambiguous"));
1505
1506 // if computation of eigenvectors is not required use a sufficiently small
1507 // distributed matrix
1508 std::unique_ptr<ScaLAPACKMatrix<NumberType>> eigenvectors =
1509 compute_eigenvectors ?
1510 std::make_unique<ScaLAPACKMatrix<NumberType>>(n_rows,
1511 grid,
1512 row_block_size) :
1513 std::make_unique<ScaLAPACKMatrix<NumberType>>(
1514 grid->n_process_rows, grid->n_process_columns, grid, 1, 1);
1515
1516 eigenvectors->property = property;
1517 // number of eigenvalues to be returned from psyevx; upon successful exit ev
1518 // contains the m seclected eigenvalues in ascending order set to all
1519 // eigenvaleus in case we will be using psyev.
1520 int m = n_rows;
1521 std::vector<NumberType> ev(n_rows);
1522
1523 if (grid->mpi_process_is_active)
1524 {
1525 int info = 0;
1526 /*
1527 * for jobz==N only eigenvalues are computed, for jobz='V' also the
1528 * eigenvectors of the matrix are computed
1529 */
1530 char jobz = compute_eigenvectors ? 'V' : 'N';
1531 char range = 'A';
1532 // default value is to compute all eigenvalues and optionally eigenvectors
1533 bool all_eigenpairs = true;
1534 NumberType vl = NumberType(), vu = NumberType();
1535 int il = 1, iu = 1;
1536 // number of eigenvectors to be returned;
1537 // upon successful exit the first m=nz columns contain the selected
1538 // eigenvectors (only if jobz=='V')
1539 int nz = 0;
1540 NumberType abstol = NumberType();
1541
1542 // orfac decides which eigenvectors should be reorthogonalized
1543 // see
1544 // http://www.netlib.org/scalapack/explore-html/df/d1a/pdsyevx_8f_source.html
1545 // for explanation to keeps simple no reorthogonalized will be done by
1546 // setting orfac to 0
1547 NumberType orfac = 0;
1548 // contains the indices of eigenvectors that failed to converge
1549 std::vector<int> ifail;
1550 // This array contains indices of eigenvectors corresponding to
1551 // a cluster of eigenvalues that could not be reorthogonalized
1552 // due to insufficient workspace
1553 // see
1554 // http://www.netlib.org/scalapack/explore-html/df/d1a/pdsyevx_8f_source.html
1555 // for explanation
1556 std::vector<int> iclustr;
1557 // This array contains the gap between eigenvalues whose
1558 // eigenvectors could not be reorthogonalized.
1559 // see
1560 // http://www.netlib.org/scalapack/explore-html/df/d1a/pdsyevx_8f_source.html
1561 // for explanation
1562 std::vector<NumberType> gap(n_local_rows * n_local_columns);
1563
1564 // index range for eigenvalues is not specified
1565 if (!use_indices)
1566 {
1567 // interval for eigenvalues is not specified and consequently all
1568 // eigenvalues/eigenpairs will be computed
1569 if (!use_values)
1570 {
1571 range = 'A';
1572 all_eigenpairs = true;
1573 }
1574 else
1575 {
1576 range = 'V';
1577 all_eigenpairs = false;
1578 vl = std::min(eigenvalue_limits.first, eigenvalue_limits.second);
1579 vu = std::max(eigenvalue_limits.first, eigenvalue_limits.second);
1580 }
1581 }
1582 else
1583 {
1584 range = 'I';
1585 all_eigenpairs = false;
1586 // as Fortran starts counting/indexing from 1 unlike C/C++, where it
1587 // starts from 0
1588 il = std::min(eigenvalue_idx.first, eigenvalue_idx.second) + 1;
1589 iu = std::max(eigenvalue_idx.first, eigenvalue_idx.second) + 1;
1590 }
1591 NumberType *A_loc = this->values.data();
1592 /*
1593 * by setting lwork to -1 a workspace query for optimal length of work is
1594 * performed
1595 */
1596 int lwork = -1;
1597 int liwork = -1;
1598 NumberType *eigenvectors_loc =
1599 (compute_eigenvectors ? eigenvectors->values.data() : nullptr);
1600 /*
1601 * According to the official "documentation" found on the internet
1602 * (aka source file ppsyevx.f [1]) the work array has to have a
1603 * minimal size of max(3, lwork). Because we query for optimal size
1604 * (lwork == -1) we have to guarantee at least three doubles. The
1605 * necessary size of iwork is not specified, so let's use three as
1606 * well.
1607 * [1]
1608 * https://netlib.org/scalapack/explore-html/df/d1a/pdsyevx_8f_source.html
1609 */
1610 work.resize(3);
1611 iwork.resize(3);
1612
1613 if (all_eigenpairs)
1614 {
1615 psyev(&jobz,
1616 &uplo,
1617 &n_rows,
1618 A_loc,
1619 &submatrix_row,
1620 &submatrix_column,
1621 descriptor,
1622 ev.data(),
1623 eigenvectors_loc,
1624 &eigenvectors->submatrix_row,
1625 &eigenvectors->submatrix_column,
1626 eigenvectors->descriptor,
1627 work.data(),
1628 &lwork,
1629 &info);
1630 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("psyev", info));
1631 }
1632 else
1633 {
1634 char cmach = compute_eigenvectors ? 'U' : 'S';
1635 plamch(&(this->grid->blacs_context), &cmach, abstol);
1636 abstol *= 2;
1637 ifail.resize(n_rows);
1638 iclustr.resize(2 * grid->n_process_rows * grid->n_process_columns);
1639 gap.resize(grid->n_process_rows * grid->n_process_columns);
1640
1641 psyevx(&jobz,
1642 &range,
1643 &uplo,
1644 &n_rows,
1645 A_loc,
1646 &submatrix_row,
1647 &submatrix_column,
1648 descriptor,
1649 &vl,
1650 &vu,
1651 &il,
1652 &iu,
1653 &abstol,
1654 &m,
1655 &nz,
1656 ev.data(),
1657 &orfac,
1658 eigenvectors_loc,
1659 &eigenvectors->submatrix_row,
1660 &eigenvectors->submatrix_column,
1661 eigenvectors->descriptor,
1662 work.data(),
1663 &lwork,
1664 iwork.data(),
1665 &liwork,
1666 ifail.data(),
1667 iclustr.data(),
1668 gap.data(),
1669 &info);
1670 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("psyevx", info));
1671 }
1672 lwork = static_cast<int>(work[0]);
1673 work.resize(lwork);
1674
1675 if (all_eigenpairs)
1676 {
1677 psyev(&jobz,
1678 &uplo,
1679 &n_rows,
1680 A_loc,
1681 &submatrix_row,
1682 &submatrix_column,
1683 descriptor,
1684 ev.data(),
1685 eigenvectors_loc,
1686 &eigenvectors->submatrix_row,
1687 &eigenvectors->submatrix_column,
1688 eigenvectors->descriptor,
1689 work.data(),
1690 &lwork,
1691 &info);
1692
1693 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("psyev", info));
1694 }
1695 else
1696 {
1697 liwork = iwork[0];
1698 AssertThrow(liwork > 0, ExcInternalError());
1699 iwork.resize(liwork);
1700
1701 psyevx(&jobz,
1702 &range,
1703 &uplo,
1704 &n_rows,
1705 A_loc,
1706 &submatrix_row,
1707 &submatrix_column,
1708 descriptor,
1709 &vl,
1710 &vu,
1711 &il,
1712 &iu,
1713 &abstol,
1714 &m,
1715 &nz,
1716 ev.data(),
1717 &orfac,
1718 eigenvectors_loc,
1719 &eigenvectors->submatrix_row,
1720 &eigenvectors->submatrix_column,
1721 eigenvectors->descriptor,
1722 work.data(),
1723 &lwork,
1724 iwork.data(),
1725 &liwork,
1726 ifail.data(),
1727 iclustr.data(),
1728 gap.data(),
1729 &info);
1730
1731 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("psyevx", info));
1732 }
1733 // if eigenvectors are queried copy eigenvectors to original matrix
1734 // as the temporary matrix eigenvectors has identical dimensions and
1735 // block-cyclic distribution we simply swap the local array
1736 if (compute_eigenvectors)
1737 this->values.swap(eigenvectors->values);
1738
1739 // adapt the size of ev to fit m upon return
1740 while (ev.size() > static_cast<size_type>(m))
1741 ev.pop_back();
1742 }
1743 /*
1744 * send number of computed eigenvalues to inactive processes
1745 */
1746 grid->send_to_inactive(&m, 1);
1747
1748 /*
1749 * inactive processes have to resize array of eigenvalues
1750 */
1751 if (!grid->mpi_process_is_active)
1752 ev.resize(m);
1753 /*
1754 * send the eigenvalues to processors not being part of the process grid
1755 */
1756 grid->send_to_inactive(ev.data(), ev.size());
1757
1758 /*
1759 * if only eigenvalues are queried the content of the matrix will be destroyed
1760 * if the eigenpairs are queried matrix A on exit stores the eigenvectors in
1761 * the columns
1762 */
1763 if (compute_eigenvectors)
1764 {
1767 }
1768 else
1770
1771 return ev;
1772}
1773
1774
1775
1776template <typename NumberType>
1777std::vector<NumberType>
1779 const std::pair<unsigned int, unsigned int> &index_limits,
1780 const bool compute_eigenvectors)
1781{
1782 // Check validity of index limits.
1783 AssertIndexRange(index_limits.first, static_cast<unsigned int>(n_rows));
1784 AssertIndexRange(index_limits.second, static_cast<unsigned int>(n_rows));
1785
1786 const std::pair<unsigned int, unsigned int> idx =
1787 std::make_pair(std::min(index_limits.first, index_limits.second),
1788 std::max(index_limits.first, index_limits.second));
1789
1790 // Compute all eigenvalues/eigenvectors.
1791 if (idx.first == 0 && idx.second == static_cast<unsigned int>(n_rows - 1))
1792 return eigenpairs_symmetric_MRRR(compute_eigenvectors);
1793 else
1794 return eigenpairs_symmetric_MRRR(compute_eigenvectors, idx);
1795}
1796
1797
1798
1799template <typename NumberType>
1800std::vector<NumberType>
1802 const std::pair<NumberType, NumberType> &value_limits,
1803 const bool compute_eigenvectors)
1804{
1805 AssertIsFinite(value_limits.first);
1806 AssertIsFinite(value_limits.second);
1807
1808 const std::pair<unsigned int, unsigned int> indices =
1809 std::make_pair(numbers::invalid_unsigned_int,
1811
1812 return eigenpairs_symmetric_MRRR(compute_eigenvectors, indices, value_limits);
1813}
1814
1815
1816
1817template <typename NumberType>
1818std::vector<NumberType>
1820 const bool compute_eigenvectors,
1821 const std::pair<unsigned int, unsigned int> &eigenvalue_idx,
1822 const std::pair<NumberType, NumberType> &eigenvalue_limits)
1823{
1825 ExcMessage(
1826 "Matrix has to be in Matrix state before calling this function."));
1827 Assert(property == LAPACKSupport::symmetric,
1828 ExcMessage("Matrix has to be symmetric for this operation."));
1829
1830 std::lock_guard<std::mutex> lock(mutex);
1831
1832 const bool use_values = (std::isnan(eigenvalue_limits.first) ||
1833 std::isnan(eigenvalue_limits.second)) ?
1834 false :
1835 true;
1836 const bool use_indices =
1837 ((eigenvalue_idx.first == numbers::invalid_unsigned_int) ||
1838 (eigenvalue_idx.second == numbers::invalid_unsigned_int)) ?
1839 false :
1840 true;
1841
1842 Assert(
1843 !(use_values && use_indices),
1844 ExcMessage(
1845 "Prescribing both the index and value range for the eigenvalues is ambiguous"));
1846
1847 // If computation of eigenvectors is not required, use a sufficiently small
1848 // distributed matrix.
1849 std::unique_ptr<ScaLAPACKMatrix<NumberType>> eigenvectors =
1850 compute_eigenvectors ?
1851 std::make_unique<ScaLAPACKMatrix<NumberType>>(n_rows,
1852 grid,
1853 row_block_size) :
1854 std::make_unique<ScaLAPACKMatrix<NumberType>>(
1855 grid->n_process_rows, grid->n_process_columns, grid, 1, 1);
1856
1857 eigenvectors->property = property;
1858 // Number of eigenvalues to be returned from psyevr; upon successful exit ev
1859 // contains the m seclected eigenvalues in ascending order.
1860 int m = n_rows;
1861 std::vector<NumberType> ev(n_rows);
1862
1863 // Number of eigenvectors to be returned;
1864 // Upon successful exit the first m=nz columns contain the selected
1865 // eigenvectors (only if jobz=='V').
1866 int nz = 0;
1867
1868 if (grid->mpi_process_is_active)
1869 {
1870 int info = 0;
1871 /*
1872 * For jobz==N only eigenvalues are computed, for jobz='V' also the
1873 * eigenvectors of the matrix are computed.
1874 */
1875 char jobz = compute_eigenvectors ? 'V' : 'N';
1876 // Default value is to compute all eigenvalues and optionally
1877 // eigenvectors.
1878 char range = 'A';
1879 NumberType vl = NumberType(), vu = NumberType();
1880 int il = 1, iu = 1;
1881
1882 // Index range for eigenvalues is not specified.
1883 if (!use_indices)
1884 {
1885 // Interval for eigenvalues is not specified and consequently all
1886 // eigenvalues/eigenpairs will be computed.
1887 if (!use_values)
1888 {
1889 range = 'A';
1890 }
1891 else
1892 {
1893 range = 'V';
1894 vl = std::min(eigenvalue_limits.first, eigenvalue_limits.second);
1895 vu = std::max(eigenvalue_limits.first, eigenvalue_limits.second);
1896 }
1897 }
1898 else
1899 {
1900 range = 'I';
1901 // As Fortran starts counting/indexing from 1 unlike C/C++, where it
1902 // starts from 0.
1903 il = std::min(eigenvalue_idx.first, eigenvalue_idx.second) + 1;
1904 iu = std::max(eigenvalue_idx.first, eigenvalue_idx.second) + 1;
1905 }
1906 NumberType *A_loc = this->values.data();
1907
1908 /*
1909 * By setting lwork to -1 a workspace query for optimal length of work is
1910 * performed.
1911 */
1912 int lwork = -1;
1913 int liwork = -1;
1914 NumberType *eigenvectors_loc =
1915 (compute_eigenvectors ? eigenvectors->values.data() : nullptr);
1916 work.resize(1);
1917 iwork.resize(1);
1918
1919 psyevr(&jobz,
1920 &range,
1921 &uplo,
1922 &n_rows,
1923 A_loc,
1924 &submatrix_row,
1925 &submatrix_column,
1926 descriptor,
1927 &vl,
1928 &vu,
1929 &il,
1930 &iu,
1931 &m,
1932 &nz,
1933 ev.data(),
1934 eigenvectors_loc,
1935 &eigenvectors->submatrix_row,
1936 &eigenvectors->submatrix_column,
1937 eigenvectors->descriptor,
1938 work.data(),
1939 &lwork,
1940 iwork.data(),
1941 &liwork,
1942 &info);
1943
1944 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("psyevr", info));
1945
1946 lwork = static_cast<int>(work[0]);
1947 work.resize(lwork);
1948 liwork = iwork[0];
1949 iwork.resize(liwork);
1950
1951 psyevr(&jobz,
1952 &range,
1953 &uplo,
1954 &n_rows,
1955 A_loc,
1956 &submatrix_row,
1957 &submatrix_column,
1958 descriptor,
1959 &vl,
1960 &vu,
1961 &il,
1962 &iu,
1963 &m,
1964 &nz,
1965 ev.data(),
1966 eigenvectors_loc,
1967 &eigenvectors->submatrix_row,
1968 &eigenvectors->submatrix_column,
1969 eigenvectors->descriptor,
1970 work.data(),
1971 &lwork,
1972 iwork.data(),
1973 &liwork,
1974 &info);
1975
1976 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("psyevr", info));
1977
1978 if (compute_eigenvectors)
1980 m == nz,
1981 ExcMessage(
1982 "psyevr failed to compute all eigenvectors for the selected eigenvalues"));
1983
1984 // If eigenvectors are queried, copy eigenvectors to original matrix.
1985 // As the temporary matrix eigenvectors has identical dimensions and
1986 // block-cyclic distribution we simply swap the local array.
1987 if (compute_eigenvectors)
1988 this->values.swap(eigenvectors->values);
1989
1990 // Adapt the size of ev to fit m upon return.
1991 while (ev.size() > static_cast<size_type>(m))
1992 ev.pop_back();
1993 }
1994 /*
1995 * Send number of computed eigenvalues to inactive processes.
1996 */
1997 grid->send_to_inactive(&m, 1);
1998
1999 /*
2000 * Inactive processes have to resize array of eigenvalues.
2001 */
2002 if (!grid->mpi_process_is_active)
2003 ev.resize(m);
2004 /*
2005 * Send the eigenvalues to processors not being part of the process grid.
2006 */
2007 grid->send_to_inactive(ev.data(), ev.size());
2008
2009 /*
2010 * If only eigenvalues are queried, the content of the matrix will be
2011 * destroyed. If the eigenpairs are queried, matrix A on exit stores the
2012 * eigenvectors in the columns.
2013 */
2014 if (compute_eigenvectors)
2015 {
2018 }
2019 else
2021
2022 return ev;
2023}
2024
2025
2026
2027template <typename NumberType>
2028std::vector<NumberType>
2031{
2033 ExcMessage(
2034 "Matrix has to be in Matrix state before calling this function."));
2035 Assert(row_block_size == column_block_size,
2036 ExcDimensionMismatch(row_block_size, column_block_size));
2037
2038 const bool left_singular_vectors = (U != nullptr) ? true : false;
2039 const bool right_singular_vectors = (VT != nullptr) ? true : false;
2040
2041 if (left_singular_vectors)
2042 {
2043 Assert(n_rows == U->n_rows, ExcDimensionMismatch(n_rows, U->n_rows));
2044 Assert(U->n_rows == U->n_columns,
2045 ExcDimensionMismatch(U->n_rows, U->n_columns));
2046 Assert(row_block_size == U->row_block_size,
2047 ExcDimensionMismatch(row_block_size, U->row_block_size));
2048 Assert(column_block_size == U->column_block_size,
2049 ExcDimensionMismatch(column_block_size, U->column_block_size));
2050 Assert(grid->blacs_context == U->grid->blacs_context,
2051 ExcDimensionMismatch(grid->blacs_context, U->grid->blacs_context));
2052 }
2053 if (right_singular_vectors)
2054 {
2055 Assert(n_columns == VT->n_rows,
2056 ExcDimensionMismatch(n_columns, VT->n_rows));
2057 Assert(VT->n_rows == VT->n_columns,
2059 Assert(row_block_size == VT->row_block_size,
2060 ExcDimensionMismatch(row_block_size, VT->row_block_size));
2061 Assert(column_block_size == VT->column_block_size,
2062 ExcDimensionMismatch(column_block_size, VT->column_block_size));
2063 Assert(grid->blacs_context == VT->grid->blacs_context,
2064 ExcDimensionMismatch(grid->blacs_context,
2065 VT->grid->blacs_context));
2066 }
2067 std::lock_guard<std::mutex> lock(mutex);
2068
2069 std::vector<NumberType> sv(std::min(n_rows, n_columns));
2070
2071 if (grid->mpi_process_is_active)
2072 {
2073 char jobu = left_singular_vectors ? 'V' : 'N';
2074 char jobvt = right_singular_vectors ? 'V' : 'N';
2075 NumberType *A_loc = this->values.data();
2076 NumberType *U_loc = left_singular_vectors ? U->values.data() : nullptr;
2077 NumberType *VT_loc = right_singular_vectors ? VT->values.data() : nullptr;
2078 int info = 0;
2079 /*
2080 * by setting lwork to -1 a workspace query for optimal length of work is
2081 * performed
2082 */
2083 int lwork = -1;
2084 work.resize(1);
2085
2086 pgesvd(&jobu,
2087 &jobvt,
2088 &n_rows,
2089 &n_columns,
2090 A_loc,
2091 &submatrix_row,
2092 &submatrix_column,
2093 descriptor,
2094 &*sv.begin(),
2095 U_loc,
2096 &U->submatrix_row,
2097 &U->submatrix_column,
2098 U->descriptor,
2099 VT_loc,
2100 &VT->submatrix_row,
2101 &VT->submatrix_column,
2102 VT->descriptor,
2103 work.data(),
2104 &lwork,
2105 &info);
2106 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("pgesvd", info));
2107
2108 lwork = static_cast<int>(work[0]);
2109 work.resize(lwork);
2110
2111 pgesvd(&jobu,
2112 &jobvt,
2113 &n_rows,
2114 &n_columns,
2115 A_loc,
2116 &submatrix_row,
2117 &submatrix_column,
2118 descriptor,
2119 &*sv.begin(),
2120 U_loc,
2121 &U->submatrix_row,
2122 &U->submatrix_column,
2123 U->descriptor,
2124 VT_loc,
2125 &VT->submatrix_row,
2126 &VT->submatrix_column,
2127 VT->descriptor,
2128 work.data(),
2129 &lwork,
2130 &info);
2131 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("pgesvd", info));
2132 }
2133
2134 /*
2135 * send the singular values to processors not being part of the process grid
2136 */
2137 grid->send_to_inactive(sv.data(), sv.size());
2138
2141
2142 return sv;
2143}
2144
2145
2146
2147template <typename NumberType>
2148void
2150 const bool transpose)
2151{
2152 Assert(grid == B.grid,
2153 ExcMessage("The matrices A and B need to have the same process grid"));
2155 ExcMessage(
2156 "Matrix has to be in Matrix state before calling this function."));
2158 ExcMessage(
2159 "Matrix B has to be in Matrix state before calling this function."));
2160
2161 if (transpose)
2162 {
2163 Assert(n_columns == B.n_rows, ExcDimensionMismatch(n_columns, B.n_rows));
2164 }
2165 else
2166 {
2167 Assert(n_rows == B.n_rows, ExcDimensionMismatch(n_rows, B.n_rows));
2168 }
2169
2170 // see
2171 // https://www.ibm.com/support/knowledgecenter/en/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_lgels.htm
2172 Assert(row_block_size == column_block_size,
2173 ExcMessage(
2174 "Use identical block sizes for rows and columns of matrix A"));
2176 ExcMessage(
2177 "Use identical block sizes for rows and columns of matrix B"));
2178 Assert(row_block_size == B.row_block_size,
2179 ExcMessage(
2180 "Use identical block-cyclic distribution for matrices A and B"));
2181
2182 std::lock_guard<std::mutex> lock(mutex);
2183
2184 if (grid->mpi_process_is_active)
2185 {
2186 char trans = transpose ? 'T' : 'N';
2187 NumberType *A_loc = this->values.data();
2188 NumberType *B_loc = B.values.data();
2189 int info = 0;
2190 /*
2191 * by setting lwork to -1 a workspace query for optimal length of work is
2192 * performed
2193 */
2194 int lwork = -1;
2195 work.resize(1);
2196
2197 pgels(&trans,
2198 &n_rows,
2199 &n_columns,
2200 &B.n_columns,
2201 A_loc,
2202 &submatrix_row,
2203 &submatrix_column,
2204 descriptor,
2205 B_loc,
2206 &B.submatrix_row,
2208 B.descriptor,
2209 work.data(),
2210 &lwork,
2211 &info);
2212 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("pgels", info));
2213
2214 lwork = static_cast<int>(work[0]);
2215 work.resize(lwork);
2216
2217 pgels(&trans,
2218 &n_rows,
2219 &n_columns,
2220 &B.n_columns,
2221 A_loc,
2222 &submatrix_row,
2223 &submatrix_column,
2224 descriptor,
2225 B_loc,
2226 &B.submatrix_row,
2228 B.descriptor,
2229 work.data(),
2230 &lwork,
2231 &info);
2232 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("pgels", info));
2233 }
2235}
2236
2237
2238
2239template <typename NumberType>
2240unsigned int
2242{
2244 ExcMessage(
2245 "Matrix has to be in Matrix state before calling this function."));
2246 Assert(row_block_size == column_block_size,
2247 ExcMessage(
2248 "Use identical block sizes for rows and columns of matrix A"));
2249 Assert(
2250 ratio > 0. && ratio < 1.,
2251 ExcMessage(
2252 "input parameter ratio has to be larger than zero and smaller than 1"));
2253
2255 n_rows,
2256 grid,
2257 row_block_size,
2258 row_block_size,
2260 ScaLAPACKMatrix<NumberType> VT(n_columns,
2261 n_columns,
2262 grid,
2263 row_block_size,
2264 row_block_size,
2266 std::vector<NumberType> sv = this->compute_SVD(&U, &VT);
2267 AssertThrow(sv[0] > std::numeric_limits<NumberType>::min(),
2268 ExcMessage("Matrix has rank 0"));
2269
2270 // Get number of singular values fulfilling the following: sv[i] > sv[0] *
2271 // ratio Obviously, 0-th element already satisfies sv[0] > sv[0] * ratio The
2272 // singular values in sv are ordered by descending value so we break out of
2273 // the loop if a singular value is smaller than sv[0] * ratio.
2274 unsigned int n_sv = 1;
2275 std::vector<NumberType> inv_sigma;
2276 inv_sigma.push_back(1 / sv[0]);
2277
2278 for (unsigned int i = 1; i < sv.size(); ++i)
2279 if (sv[i] > sv[0] * ratio)
2280 {
2281 ++n_sv;
2282 inv_sigma.push_back(1 / sv[i]);
2283 }
2284 else
2285 break;
2286
2287 // For the matrix multiplication we use only the columns of U and rows of VT
2288 // which are associated with singular values larger than the limit. That saves
2289 // computational time for matrices with rank significantly smaller than
2290 // min(n_rows,n_columns)
2291 ScaLAPACKMatrix<NumberType> U_R(n_rows,
2292 n_sv,
2293 grid,
2294 row_block_size,
2295 row_block_size,
2298 n_columns,
2299 grid,
2300 row_block_size,
2301 row_block_size,
2303 U.copy_to(U_R,
2304 std::make_pair(0, 0),
2305 std::make_pair(0, 0),
2306 std::make_pair(n_rows, n_sv));
2307 VT.copy_to(VT_R,
2308 std::make_pair(0, 0),
2309 std::make_pair(0, 0),
2310 std::make_pair(n_sv, n_columns));
2311
2312 VT_R.scale_rows(inv_sigma);
2313 this->reinit(n_columns,
2314 n_rows,
2315 this->grid,
2316 row_block_size,
2317 column_block_size,
2319 VT_R.mult(1, U_R, 0, *this, true, true);
2321 return n_sv;
2322}
2323
2324
2325
2326template <typename NumberType>
2327NumberType
2329 const NumberType a_norm) const
2330{
2332 ExcMessage(
2333 "Matrix has to be in Cholesky state before calling this function."));
2334 std::lock_guard<std::mutex> lock(mutex);
2335 NumberType rcond = 0.;
2336
2337 if (grid->mpi_process_is_active)
2338 {
2339 int liwork = n_local_rows;
2340 iwork.resize(liwork);
2341
2342 int info = 0;
2343 const NumberType *A_loc = this->values.data();
2344
2345 // by setting lwork to -1 a workspace query for optimal length of work is
2346 // performed
2347 int lwork = -1;
2348 work.resize(1);
2349 ppocon(&uplo,
2350 &n_columns,
2351 A_loc,
2352 &submatrix_row,
2353 &submatrix_column,
2354 descriptor,
2355 &a_norm,
2356 &rcond,
2357 work.data(),
2358 &lwork,
2359 iwork.data(),
2360 &liwork,
2361 &info);
2362 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("pdpocon", info));
2363 lwork = static_cast<int>(std::ceil(work[0]));
2364 work.resize(lwork);
2365
2366 // now the actual run:
2367 ppocon(&uplo,
2368 &n_columns,
2369 A_loc,
2370 &submatrix_row,
2371 &submatrix_column,
2372 descriptor,
2373 &a_norm,
2374 &rcond,
2375 work.data(),
2376 &lwork,
2377 iwork.data(),
2378 &liwork,
2379 &info);
2380 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("pdpocon", info));
2381 }
2382 grid->send_to_inactive(&rcond);
2383 return rcond;
2384}
2385
2386
2387
2388template <typename NumberType>
2389NumberType
2391{
2392 const char type('O');
2393
2394 if (property == LAPACKSupport::symmetric)
2395 return norm_symmetric(type);
2396 else
2397 return norm_general(type);
2398}
2399
2400
2401
2402template <typename NumberType>
2403NumberType
2405{
2406 const char type('I');
2407
2408 if (property == LAPACKSupport::symmetric)
2409 return norm_symmetric(type);
2410 else
2411 return norm_general(type);
2412}
2413
2414
2415
2416template <typename NumberType>
2417NumberType
2419{
2420 const char type('F');
2421
2422 if (property == LAPACKSupport::symmetric)
2423 return norm_symmetric(type);
2424 else
2425 return norm_general(type);
2426}
2427
2428
2429
2430template <typename NumberType>
2431NumberType
2433{
2434 Assert(state == LAPACKSupport::matrix ||
2436 ExcMessage("norms can be called in matrix state only."));
2437 std::lock_guard<std::mutex> lock(mutex);
2438 NumberType res = 0.;
2439
2440 if (grid->mpi_process_is_active)
2441 {
2442 const int iarow = indxg2p_(&submatrix_row,
2443 &row_block_size,
2444 &(grid->this_process_row),
2445 &first_process_row,
2446 &(grid->n_process_rows));
2447 const int iacol = indxg2p_(&submatrix_column,
2448 &column_block_size,
2449 &(grid->this_process_column),
2450 &first_process_column,
2451 &(grid->n_process_columns));
2452 const int mp0 = numroc_(&n_rows,
2453 &row_block_size,
2454 &(grid->this_process_row),
2455 &iarow,
2456 &(grid->n_process_rows));
2457 const int nq0 = numroc_(&n_columns,
2458 &column_block_size,
2459 &(grid->this_process_column),
2460 &iacol,
2461 &(grid->n_process_columns));
2462
2463 // type='M': compute largest absolute value
2464 // type='F' || type='E': compute Frobenius norm
2465 // type='0' || type='1': compute infinity norm
2466 int lwork = 0; // for type == 'M' || type == 'F' || type == 'E'
2467 if (type == 'O' || type == '1')
2468 lwork = nq0;
2469 else if (type == 'I')
2470 lwork = mp0;
2471
2472 work.resize(lwork);
2473 const NumberType *A_loc = this->values.begin();
2474 res = plange(&type,
2475 &n_rows,
2476 &n_columns,
2477 A_loc,
2478 &submatrix_row,
2479 &submatrix_column,
2480 descriptor,
2481 work.data());
2482 }
2483 grid->send_to_inactive(&res);
2484 return res;
2485}
2486
2487
2488
2489template <typename NumberType>
2490NumberType
2492{
2493 Assert(state == LAPACKSupport::matrix ||
2495 ExcMessage("norms can be called in matrix state only."));
2496 Assert(property == LAPACKSupport::symmetric,
2497 ExcMessage("Matrix has to be symmetric for this operation."));
2498 std::lock_guard<std::mutex> lock(mutex);
2499 NumberType res = 0.;
2500
2501 if (grid->mpi_process_is_active)
2502 {
2503 // int IROFFA = MOD( IA-1, MB_A )
2504 // int ICOFFA = MOD( JA-1, NB_A )
2505 const int lcm =
2506 ilcm_(&(grid->n_process_rows), &(grid->n_process_columns));
2507 const int v2 = lcm / (grid->n_process_rows);
2508
2509 const int IAROW = indxg2p_(&submatrix_row,
2510 &row_block_size,
2511 &(grid->this_process_row),
2512 &first_process_row,
2513 &(grid->n_process_rows));
2514 const int IACOL = indxg2p_(&submatrix_column,
2515 &column_block_size,
2516 &(grid->this_process_column),
2517 &first_process_column,
2518 &(grid->n_process_columns));
2519 const int Np0 = numroc_(&n_columns /*+IROFFA*/,
2520 &row_block_size,
2521 &(grid->this_process_row),
2522 &IAROW,
2523 &(grid->n_process_rows));
2524 const int Nq0 = numroc_(&n_columns /*+ICOFFA*/,
2525 &column_block_size,
2526 &(grid->this_process_column),
2527 &IACOL,
2528 &(grid->n_process_columns));
2529
2530 const int v1 = iceil_(&Np0, &row_block_size);
2531 const int ldw = (n_local_rows == n_local_columns) ?
2532 0 :
2533 row_block_size * iceil_(&v1, &v2);
2534
2535 const int lwork =
2536 (type == 'M' || type == 'F' || type == 'E') ? 0 : 2 * Nq0 + Np0 + ldw;
2537 work.resize(lwork);
2538 const NumberType *A_loc = this->values.begin();
2539 res = plansy(&type,
2540 &uplo,
2541 &n_columns,
2542 A_loc,
2543 &submatrix_row,
2544 &submatrix_column,
2545 descriptor,
2546 work.data());
2547 }
2548 grid->send_to_inactive(&res);
2549 return res;
2550}
2551
2552
2553
2554# ifdef DEAL_II_WITH_HDF5
2555namespace internal
2556{
2557 namespace
2558 {
2559 void
2560 create_HDF5_state_enum_id(hid_t &state_enum_id)
2561 {
2562 // create HDF5 enum type for LAPACKSupport::State
2564 state_enum_id = H5Tcreate(H5T_ENUM, sizeof(LAPACKSupport::State));
2566 herr_t status = H5Tenum_insert(state_enum_id, "cholesky", &val);
2567 AssertThrow(status >= 0, ExcInternalError());
2569 status = H5Tenum_insert(state_enum_id, "eigenvalues", &val);
2570 AssertThrow(status >= 0, ExcInternalError());
2572 status = H5Tenum_insert(state_enum_id, "inverse_matrix", &val);
2573 AssertThrow(status >= 0, ExcInternalError());
2575 status = H5Tenum_insert(state_enum_id, "inverse_svd", &val);
2576 AssertThrow(status >= 0, ExcInternalError());
2578 status = H5Tenum_insert(state_enum_id, "lu", &val);
2579 AssertThrow(status >= 0, ExcInternalError());
2581 status = H5Tenum_insert(state_enum_id, "matrix", &val);
2582 AssertThrow(status >= 0, ExcInternalError());
2584 status = H5Tenum_insert(state_enum_id, "svd", &val);
2585 AssertThrow(status >= 0, ExcInternalError());
2587 status = H5Tenum_insert(state_enum_id, "unusable", &val);
2588 AssertThrow(status >= 0, ExcInternalError());
2589 }
2590
2591 void
2592 create_HDF5_property_enum_id(hid_t &property_enum_id)
2593 {
2594 // create HDF5 enum type for LAPACKSupport::Property
2595 property_enum_id = H5Tcreate(H5T_ENUM, sizeof(LAPACKSupport::Property));
2597 herr_t status = H5Tenum_insert(property_enum_id, "diagonal", &prop);
2598 AssertThrow(status >= 0, ExcInternalError());
2600 status = H5Tenum_insert(property_enum_id, "general", &prop);
2601 AssertThrow(status >= 0, ExcInternalError());
2603 status = H5Tenum_insert(property_enum_id, "hessenberg", &prop);
2604 AssertThrow(status >= 0, ExcInternalError());
2606 status = H5Tenum_insert(property_enum_id, "lower_triangular", &prop);
2607 AssertThrow(status >= 0, ExcInternalError());
2609 status = H5Tenum_insert(property_enum_id, "symmetric", &prop);
2610 AssertThrow(status >= 0, ExcInternalError());
2612 status = H5Tenum_insert(property_enum_id, "upper_triangular", &prop);
2613 AssertThrow(status >= 0, ExcInternalError());
2614 }
2615 } // namespace
2616} // namespace internal
2617# endif
2618
2619
2620
2621template <typename NumberType>
2622void
2624 const std::string &filename,
2625 const std::pair<unsigned int, unsigned int> &chunk_size) const
2626{
2627# ifndef DEAL_II_WITH_HDF5
2628 (void)filename;
2629 (void)chunk_size;
2630 AssertThrow(false, ExcNeedsHDF5());
2631# else
2632
2633 std::pair<unsigned int, unsigned int> chunks_size_ = chunk_size;
2634
2635 if (chunks_size_.first == numbers::invalid_unsigned_int ||
2636 chunks_size_.second == numbers::invalid_unsigned_int)
2637 {
2638 // default: store the matrix in chunks of columns
2639 chunks_size_.first = n_rows;
2640 chunks_size_.second = 1;
2641 }
2642 Assert(chunks_size_.first > 0,
2643 ExcMessage("The row chunk size must be larger than 0."));
2644 AssertIndexRange(chunks_size_.first, n_rows + 1);
2645 Assert(chunks_size_.second > 0,
2646 ExcMessage("The column chunk size must be larger than 0."));
2647 AssertIndexRange(chunks_size_.second, n_columns + 1);
2648
2649# ifdef H5_HAVE_PARALLEL
2650 // implementation for configurations equipped with a parallel file system
2651 save_parallel(filename, chunks_size_);
2652
2653# else
2654 // implementation for configurations with no parallel file system
2655 save_serial(filename, chunks_size_);
2656
2657# endif
2658# endif
2659}
2660
2661
2662
2663template <typename NumberType>
2664void
2666 const std::string &filename,
2667 const std::pair<unsigned int, unsigned int> &chunk_size) const
2668{
2669# ifndef DEAL_II_WITH_HDF5
2670 (void)filename;
2671 (void)chunk_size;
2673# else
2674
2675 /*
2676 * The content of the distributed matrix is copied to a matrix using a 1x1
2677 * process grid. Therefore, one process has all the data and can write it to a
2678 * file.
2679 *
2680 * Create a 1x1 column grid which will be used to initialize
2681 * an effectively serial ScaLAPACK matrix to gather the contents from the
2682 * current object
2683 */
2684 const auto column_grid =
2685 std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,
2686 1,
2687 1);
2688
2689 const int MB = n_rows, NB = n_columns;
2690 ScaLAPACKMatrix<NumberType> tmp(n_rows, n_columns, column_grid, MB, NB);
2691 copy_to(tmp);
2692
2693 // the 1x1 grid has only one process and this one writes
2694 // the content of the matrix to the HDF5 file
2695 if (tmp.grid->mpi_process_is_active)
2696 {
2697 herr_t status;
2698
2699 // create a new file using default properties
2700 hid_t file_id =
2701 H5Fcreate(filename.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
2702
2703 // modify dataset creation properties, i.e. enable chunking
2704 hsize_t chunk_dims[2];
2705 // revert order of rows and columns as ScaLAPACK uses column-major
2706 // ordering
2707 chunk_dims[0] = chunk_size.second;
2708 chunk_dims[1] = chunk_size.first;
2709 hid_t data_property = H5Pcreate(H5P_DATASET_CREATE);
2710 status = H5Pset_chunk(data_property, 2, chunk_dims);
2711 AssertThrow(status >= 0, ExcIO());
2712
2713 // create the data space for the dataset
2714 hsize_t dims[2];
2715 // change order of rows and columns as ScaLAPACKMatrix uses column major
2716 // ordering
2717 dims[0] = n_columns;
2718 dims[1] = n_rows;
2719 hid_t dataspace_id = H5Screate_simple(2, dims, nullptr);
2720
2721 // create the dataset within the file using chunk creation properties
2722 hid_t type_id = hdf5_type_id(tmp.values.data());
2723 hid_t dataset_id = H5Dcreate2(file_id,
2724 "/matrix",
2725 type_id,
2726 dataspace_id,
2727 H5P_DEFAULT,
2728 data_property,
2729 H5P_DEFAULT);
2730
2731 // write the dataset
2732 status = H5Dwrite(
2733 dataset_id, type_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, tmp.values.data());
2734 AssertThrow(status >= 0, ExcIO());
2735
2736 // create HDF5 enum type for LAPACKSupport::State and
2737 // LAPACKSupport::Property
2738 hid_t state_enum_id, property_enum_id;
2739 internal::create_HDF5_state_enum_id(state_enum_id);
2740 internal::create_HDF5_property_enum_id(property_enum_id);
2741
2742 // create the data space for the state enum
2743 hsize_t dims_state[1];
2744 dims_state[0] = 1;
2745 hid_t state_enum_dataspace = H5Screate_simple(1, dims_state, nullptr);
2746 // create the dataset for the state enum
2747 hid_t state_enum_dataset = H5Dcreate2(file_id,
2748 "/state",
2749 state_enum_id,
2750 state_enum_dataspace,
2751 H5P_DEFAULT,
2752 H5P_DEFAULT,
2753 H5P_DEFAULT);
2754 // write the dataset for the state enum
2755 status = H5Dwrite(state_enum_dataset,
2756 state_enum_id,
2757 H5S_ALL,
2758 H5S_ALL,
2759 H5P_DEFAULT,
2760 &state);
2761 AssertThrow(status >= 0, ExcIO());
2762
2763 // create the data space for the property enum
2764 hsize_t dims_property[1];
2765 dims_property[0] = 1;
2766 hid_t property_enum_dataspace =
2767 H5Screate_simple(1, dims_property, nullptr);
2768 // create the dataset for the property enum
2769 hid_t property_enum_dataset = H5Dcreate2(file_id,
2770 "/property",
2771 property_enum_id,
2772 property_enum_dataspace,
2773 H5P_DEFAULT,
2774 H5P_DEFAULT,
2775 H5P_DEFAULT);
2776 // write the dataset for the property enum
2777 status = H5Dwrite(property_enum_dataset,
2778 property_enum_id,
2779 H5S_ALL,
2780 H5S_ALL,
2781 H5P_DEFAULT,
2782 &property);
2783 AssertThrow(status >= 0, ExcIO());
2784
2785 // end access to the datasets and release resources used by them
2786 status = H5Dclose(dataset_id);
2787 AssertThrow(status >= 0, ExcIO());
2788 status = H5Dclose(state_enum_dataset);
2789 AssertThrow(status >= 0, ExcIO());
2790 status = H5Dclose(property_enum_dataset);
2791 AssertThrow(status >= 0, ExcIO());
2792
2793 // terminate access to the data spaces
2794 status = H5Sclose(dataspace_id);
2795 AssertThrow(status >= 0, ExcIO());
2796 status = H5Sclose(state_enum_dataspace);
2797 AssertThrow(status >= 0, ExcIO());
2798 status = H5Sclose(property_enum_dataspace);
2799 AssertThrow(status >= 0, ExcIO());
2800
2801 // release enum data types
2802 status = H5Tclose(state_enum_id);
2803 AssertThrow(status >= 0, ExcIO());
2804 status = H5Tclose(property_enum_id);
2805 AssertThrow(status >= 0, ExcIO());
2806
2807 // release the creation property
2808 status = H5Pclose(data_property);
2809 AssertThrow(status >= 0, ExcIO());
2810
2811 // close the file.
2812 status = H5Fclose(file_id);
2813 AssertThrow(status >= 0, ExcIO());
2814 }
2815# endif
2816}
2817
2818
2819
2820template <typename NumberType>
2821void
2823 const std::string &filename,
2824 const std::pair<unsigned int, unsigned int> &chunk_size) const
2825{
2826# ifndef DEAL_II_WITH_HDF5
2827 (void)filename;
2828 (void)chunk_size;
2830# else
2831
2832 const unsigned int n_mpi_processes(
2833 Utilities::MPI::n_mpi_processes(this->grid->mpi_communicator));
2834 MPI_Info info = MPI_INFO_NULL;
2835 /*
2836 * The content of the distributed matrix is copied to a matrix using a
2837 * 1xn_processes process grid. Therefore, the processes hold contiguous chunks
2838 * of the matrix, which they can write to the file
2839 *
2840 * Create a 1xn_processes column grid
2841 */
2842 const auto column_grid =
2843 std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,
2844 1,
2845 n_mpi_processes);
2846
2847 const int MB = n_rows;
2848 /*
2849 * If the ratio n_columns/n_mpi_processes is smaller than the column block
2850 * size of the original matrix, the redistribution and saving of the matrix
2851 * requires a significant amount of MPI communication. Therefore, it is better
2852 * to set a minimum value for the block size NB, causing only
2853 * ceil(n_columns/NB) processes being actively involved in saving the matrix.
2854 * Example: A 2*10^9 x 400 matrix is distributed on a 80 x 5 process grid
2855 * using block size 32. Instead of distributing the matrix on a 1 x 400
2856 * process grid with a row block size of 2*10^9 and a column block size of 1,
2857 * the minimum value for NB yields that only ceil(400/32)=13 processes will be
2858 * writing the matrix to disk.
2859 */
2860 const int NB = std::max(static_cast<int>(std::ceil(
2861 static_cast<double>(n_columns) / n_mpi_processes)),
2862 column_block_size);
2863
2864 ScaLAPACKMatrix<NumberType> tmp(n_rows, n_columns, column_grid, MB, NB);
2865 copy_to(tmp);
2866
2867 // get pointer to data held by the process
2868 NumberType *data = (tmp.values.size() > 0) ? tmp.values.data() : nullptr;
2869
2870 herr_t status;
2871 // dataset dimensions
2872 hsize_t dims[2];
2873
2874 // set up file access property list with parallel I/O access
2875 hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
2876 status = H5Pset_fapl_mpio(plist_id, tmp.grid->mpi_communicator, info);
2877 AssertThrow(status >= 0, ExcIO());
2878
2879 // create a new file collectively and release property list identifier
2880 hid_t file_id =
2881 H5Fcreate(filename.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, plist_id);
2882 status = H5Pclose(plist_id);
2883 AssertThrow(status >= 0, ExcIO());
2884
2885 // As ScaLAPACK, and therefore the class ScaLAPACKMatrix, uses column-major
2886 // ordering but HDF5 row-major ordering, we have to reverse entries related to
2887 // columns and rows in the following. create the dataspace for the dataset
2888 dims[0] = tmp.n_columns;
2889 dims[1] = tmp.n_rows;
2890
2891 hid_t filespace = H5Screate_simple(2, dims, nullptr);
2892
2893 // create the chunked dataset with default properties and close filespace
2894 hsize_t chunk_dims[2];
2895 // revert order of rows and columns as ScaLAPACK uses column-major ordering
2896 chunk_dims[0] = chunk_size.second;
2897 chunk_dims[1] = chunk_size.first;
2898 plist_id = H5Pcreate(H5P_DATASET_CREATE);
2899 H5Pset_chunk(plist_id, 2, chunk_dims);
2900 hid_t type_id = hdf5_type_id(data);
2901 hid_t dset_id = H5Dcreate2(
2902 file_id, "/matrix", type_id, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
2903
2904 status = H5Sclose(filespace);
2905 AssertThrow(status >= 0, ExcIO());
2906
2907 status = H5Pclose(plist_id);
2908 AssertThrow(status >= 0, ExcIO());
2909
2910 // gather the number of local rows and columns from all processes
2911 std::vector<int> proc_n_local_rows(n_mpi_processes),
2912 proc_n_local_columns(n_mpi_processes);
2913 int ierr = MPI_Allgather(&tmp.n_local_rows,
2914 1,
2915 MPI_INT,
2916 proc_n_local_rows.data(),
2917 1,
2918 MPI_INT,
2919 tmp.grid->mpi_communicator);
2920 AssertThrowMPI(ierr);
2921 ierr = MPI_Allgather(&tmp.n_local_columns,
2922 1,
2923 MPI_INT,
2924 proc_n_local_columns.data(),
2925 1,
2926 MPI_INT,
2927 tmp.grid->mpi_communicator);
2928 AssertThrowMPI(ierr);
2929
2930 const unsigned int my_rank(
2931 Utilities::MPI::this_mpi_process(tmp.grid->mpi_communicator));
2932
2933 // hyperslab selection parameters
2934 // each process defines dataset in memory and writes it to the hyperslab in
2935 // the file
2936 hsize_t count[2];
2937 count[0] = tmp.n_local_columns;
2938 count[1] = tmp.n_rows;
2939 hid_t memspace = H5Screate_simple(2, count, nullptr);
2940
2941 hsize_t offset[2] = {0};
2942 for (unsigned int i = 0; i < my_rank; ++i)
2943 offset[0] += proc_n_local_columns[i];
2944
2945 // select hyperslab in the file.
2946 filespace = H5Dget_space(dset_id);
2947 status = H5Sselect_hyperslab(
2948 filespace, H5S_SELECT_SET, offset, nullptr, count, nullptr);
2949 AssertThrow(status >= 0, ExcIO());
2950
2951 // create property list for independent dataset write
2952 plist_id = H5Pcreate(H5P_DATASET_XFER);
2953 status = H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_INDEPENDENT);
2954 AssertThrow(status >= 0, ExcIO());
2955
2956 // process with no data will not participate in writing to the file
2957 if (tmp.values.size() > 0)
2958 {
2959 status = H5Dwrite(dset_id, type_id, memspace, filespace, plist_id, data);
2960 AssertThrow(status >= 0, ExcIO());
2961 }
2962 // close/release sources
2963 status = H5Dclose(dset_id);
2964 AssertThrow(status >= 0, ExcIO());
2965 status = H5Sclose(filespace);
2966 AssertThrow(status >= 0, ExcIO());
2967 status = H5Sclose(memspace);
2968 AssertThrow(status >= 0, ExcIO());
2969 status = H5Pclose(plist_id);
2970 AssertThrow(status >= 0, ExcIO());
2971 status = H5Fclose(file_id);
2972 AssertThrow(status >= 0, ExcIO());
2973
2974 // before writing the state and property to file wait for
2975 // all processes to finish writing the matrix content to the file
2976 ierr = MPI_Barrier(tmp.grid->mpi_communicator);
2977 AssertThrowMPI(ierr);
2978
2979 // only root process will write state and property to the file
2980 if (tmp.grid->this_mpi_process == 0)
2981 {
2982 // open file using default properties
2983 hid_t file_id_reopen =
2984 H5Fopen(filename.c_str(), H5F_ACC_RDWR, H5P_DEFAULT);
2985
2986 // create HDF5 enum type for LAPACKSupport::State and
2987 // LAPACKSupport::Property
2988 hid_t state_enum_id, property_enum_id;
2989 internal::create_HDF5_state_enum_id(state_enum_id);
2990 internal::create_HDF5_property_enum_id(property_enum_id);
2991
2992 // create the data space for the state enum
2993 hsize_t dims_state[1];
2994 dims_state[0] = 1;
2995 hid_t state_enum_dataspace = H5Screate_simple(1, dims_state, nullptr);
2996 // create the dataset for the state enum
2997 hid_t state_enum_dataset = H5Dcreate2(file_id_reopen,
2998 "/state",
2999 state_enum_id,
3000 state_enum_dataspace,
3001 H5P_DEFAULT,
3002 H5P_DEFAULT,
3003 H5P_DEFAULT);
3004 // write the dataset for the state enum
3005 status = H5Dwrite(state_enum_dataset,
3006 state_enum_id,
3007 H5S_ALL,
3008 H5S_ALL,
3009 H5P_DEFAULT,
3010 &state);
3011 AssertThrow(status >= 0, ExcIO());
3012
3013 // create the data space for the property enum
3014 hsize_t dims_property[1];
3015 dims_property[0] = 1;
3016 hid_t property_enum_dataspace =
3017 H5Screate_simple(1, dims_property, nullptr);
3018 // create the dataset for the property enum
3019 hid_t property_enum_dataset = H5Dcreate2(file_id_reopen,
3020 "/property",
3021 property_enum_id,
3022 property_enum_dataspace,
3023 H5P_DEFAULT,
3024 H5P_DEFAULT,
3025 H5P_DEFAULT);
3026 // write the dataset for the property enum
3027 status = H5Dwrite(property_enum_dataset,
3028 property_enum_id,
3029 H5S_ALL,
3030 H5S_ALL,
3031 H5P_DEFAULT,
3032 &property);
3033 AssertThrow(status >= 0, ExcIO());
3034
3035 status = H5Dclose(state_enum_dataset);
3036 AssertThrow(status >= 0, ExcIO());
3037 status = H5Dclose(property_enum_dataset);
3038 AssertThrow(status >= 0, ExcIO());
3039 status = H5Sclose(state_enum_dataspace);
3040 AssertThrow(status >= 0, ExcIO());
3041 status = H5Sclose(property_enum_dataspace);
3042 AssertThrow(status >= 0, ExcIO());
3043 status = H5Tclose(state_enum_id);
3044 AssertThrow(status >= 0, ExcIO());
3045 status = H5Tclose(property_enum_id);
3046 AssertThrow(status >= 0, ExcIO());
3047 status = H5Fclose(file_id_reopen);
3048 AssertThrow(status >= 0, ExcIO());
3049 }
3050
3051# endif
3052}
3053
3054
3055
3056template <typename NumberType>
3057void
3058ScaLAPACKMatrix<NumberType>::load(const std::string &filename)
3059{
3060# ifndef DEAL_II_WITH_HDF5
3061 (void)filename;
3062 AssertThrow(false, ExcNeedsHDF5());
3063# else
3064# ifdef H5_HAVE_PARALLEL
3065 // implementation for configurations equipped with a parallel file system
3066 load_parallel(filename);
3067
3068# else
3069 // implementation for configurations with no parallel file system
3070 load_serial(filename);
3071# endif
3072# endif
3073}
3074
3075
3076
3077template <typename NumberType>
3078void
3080{
3081# ifndef DEAL_II_WITH_HDF5
3082 (void)filename;
3084# else
3085
3086 /*
3087 * The content of the distributed matrix is copied to a matrix using a 1x1
3088 * process grid. Therefore, one process has all the data and can write it to a
3089 * file
3090 */
3091 // create a 1xP column grid with P being the number of MPI processes
3092 const auto one_grid =
3093 std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,
3094 1,
3095 1);
3096
3097 const int MB = n_rows, NB = n_columns;
3098 ScaLAPACKMatrix<NumberType> tmp(n_rows, n_columns, one_grid, MB, NB);
3099
3100 int state_int = -1;
3101 int property_int = -1;
3102
3103 // the 1x1 grid has only one process and this one reads
3104 // the content of the matrix from the HDF5 file
3105 if (tmp.grid->mpi_process_is_active)
3106 {
3107 herr_t status;
3108
3109 // open the file in read-only mode
3110 hid_t file_id = H5Fopen(filename.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT);
3111
3112 // open the dataset in the file
3113 hid_t dataset_id = H5Dopen2(file_id, "/matrix", H5P_DEFAULT);
3114
3115 // check the datatype of the data in the file
3116 // datatype of source and destination must have the same class
3117 // see HDF User's Guide: 6.10. Data Transfer: Datatype Conversion and
3118 // Selection
3119 hid_t datatype = H5Dget_type(dataset_id);
3120 H5T_class_t t_class_in = H5Tget_class(datatype);
3121 H5T_class_t t_class = H5Tget_class(hdf5_type_id(tmp.values.data()));
3123 t_class_in == t_class,
3124 ExcMessage(
3125 "The data type of the matrix to be read does not match the archive"));
3126
3127 // get dataspace handle
3128 hid_t dataspace_id = H5Dget_space(dataset_id);
3129 // get number of dimensions
3130 const int ndims = H5Sget_simple_extent_ndims(dataspace_id);
3131 AssertThrow(ndims == 2, ExcIO());
3132 // get every dimension
3133 hsize_t dims[2];
3134 H5Sget_simple_extent_dims(dataspace_id, dims, nullptr);
3136 static_cast<int>(dims[0]) == n_columns,
3137 ExcMessage(
3138 "The number of columns of the matrix does not match the content of the archive"));
3140 static_cast<int>(dims[1]) == n_rows,
3141 ExcMessage(
3142 "The number of rows of the matrix does not match the content of the archive"));
3143
3144 // read data
3145 status = H5Dread(dataset_id,
3146 hdf5_type_id(tmp.values.data()),
3147 H5S_ALL,
3148 H5S_ALL,
3149 H5P_DEFAULT,
3150 tmp.values.data());
3151 AssertThrow(status >= 0, ExcIO());
3152
3153 // create HDF5 enum type for LAPACKSupport::State and
3154 // LAPACKSupport::Property
3155 hid_t state_enum_id, property_enum_id;
3156 internal::create_HDF5_state_enum_id(state_enum_id);
3157 internal::create_HDF5_property_enum_id(property_enum_id);
3158
3159 // open the datasets for the state and property enum in the file
3160 hid_t dataset_state_id = H5Dopen2(file_id, "/state", H5P_DEFAULT);
3161 hid_t datatype_state = H5Dget_type(dataset_state_id);
3162 H5T_class_t t_class_state = H5Tget_class(datatype_state);
3163 AssertThrow(t_class_state == H5T_ENUM, ExcIO());
3164
3165 hid_t dataset_property_id = H5Dopen2(file_id, "/property", H5P_DEFAULT);
3166 hid_t datatype_property = H5Dget_type(dataset_property_id);
3167 H5T_class_t t_class_property = H5Tget_class(datatype_property);
3168 AssertThrow(t_class_property == H5T_ENUM, ExcIO());
3169
3170 // get dataspace handles
3171 hid_t dataspace_state = H5Dget_space(dataset_state_id);
3172 hid_t dataspace_property = H5Dget_space(dataset_property_id);
3173 // get number of dimensions
3174 const int ndims_state = H5Sget_simple_extent_ndims(dataspace_state);
3175 AssertThrow(ndims_state == 1, ExcIO());
3176 const int ndims_property = H5Sget_simple_extent_ndims(dataspace_property);
3177 AssertThrow(ndims_property == 1, ExcIO());
3178 // get every dimension
3179 hsize_t dims_state[1];
3180 H5Sget_simple_extent_dims(dataspace_state, dims_state, nullptr);
3181 AssertThrow(static_cast<int>(dims_state[0]) == 1, ExcIO());
3182 hsize_t dims_property[1];
3183 H5Sget_simple_extent_dims(dataspace_property, dims_property, nullptr);
3184 AssertThrow(static_cast<int>(dims_property[0]) == 1, ExcIO());
3185
3186 // read data
3187 status = H5Dread(dataset_state_id,
3188 state_enum_id,
3189 H5S_ALL,
3190 H5S_ALL,
3191 H5P_DEFAULT,
3192 &tmp.state);
3193 AssertThrow(status >= 0, ExcIO());
3194 // To send the state from the root process to the other processes
3195 // the state enum is casted to an integer, that will be broadcasted and
3196 // subsequently casted back to the enum type
3197 state_int = static_cast<int>(tmp.state);
3198
3199 status = H5Dread(dataset_property_id,
3200 property_enum_id,
3201 H5S_ALL,
3202 H5S_ALL,
3203 H5P_DEFAULT,
3204 &tmp.property);
3205 AssertThrow(status >= 0, ExcIO());
3206 // To send the property from the root process to the other processes
3207 // the state enum is casted to an integer, that will be broadcasted and
3208 // subsequently casted back to the enum type
3209 property_int = static_cast<int>(tmp.property);
3210
3211 // terminate access to the data spaces
3212 status = H5Sclose(dataspace_id);
3213 AssertThrow(status >= 0, ExcIO());
3214 status = H5Sclose(dataspace_state);
3215 AssertThrow(status >= 0, ExcIO());
3216 status = H5Sclose(dataspace_property);
3217 AssertThrow(status >= 0, ExcIO());
3218
3219 // release data type handles
3220 status = H5Tclose(datatype);
3221 AssertThrow(status >= 0, ExcIO());
3222 status = H5Tclose(state_enum_id);
3223 AssertThrow(status >= 0, ExcIO());
3224 status = H5Tclose(property_enum_id);
3225 AssertThrow(status >= 0, ExcIO());
3226
3227 // end access to the data sets and release resources used by them
3228 status = H5Dclose(dataset_state_id);
3229 AssertThrow(status >= 0, ExcIO());
3230 status = H5Dclose(dataset_id);
3231 AssertThrow(status >= 0, ExcIO());
3232 status = H5Dclose(dataset_property_id);
3233 AssertThrow(status >= 0, ExcIO());
3234
3235 // close the file.
3236 status = H5Fclose(file_id);
3237 AssertThrow(status >= 0, ExcIO());
3238 }
3239 // so far only the root process has the correct state integer --> broadcasting
3240 tmp.grid->send_to_inactive(&state_int, 1);
3241 // so far only the root process has the correct property integer -->
3242 // broadcasting
3243 tmp.grid->send_to_inactive(&property_int, 1);
3244
3245 tmp.state = static_cast<LAPACKSupport::State>(state_int);
3246 tmp.property = static_cast<LAPACKSupport::Property>(property_int);
3247
3248 tmp.copy_to(*this);
3249
3250# endif // DEAL_II_WITH_HDF5
3251}
3252
3253
3254
3255template <typename NumberType>
3256void
3258{
3259# ifndef DEAL_II_WITH_HDF5
3260 (void)filename;
3262# else
3263# ifndef H5_HAVE_PARALLEL
3265# else
3266
3267 const unsigned int n_mpi_processes(
3268 Utilities::MPI::n_mpi_processes(this->grid->mpi_communicator));
3269 MPI_Info info = MPI_INFO_NULL;
3270 /*
3271 * The content of the distributed matrix is copied to a matrix using a
3272 * 1xn_processes process grid. Therefore, the processes hold contiguous chunks
3273 * of the matrix, which they can write to the file
3274 */
3275 // create a 1xP column grid with P being the number of MPI processes
3276 const auto column_grid =
3277 std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,
3278 1,
3279 n_mpi_processes);
3280
3281 const int MB = n_rows;
3282 // for the choice of NB see explanation in save_parallel()
3283 const int NB = std::max(static_cast<int>(std::ceil(
3284 static_cast<double>(n_columns) / n_mpi_processes)),
3285 column_block_size);
3286
3287 ScaLAPACKMatrix<NumberType> tmp(n_rows, n_columns, column_grid, MB, NB);
3288
3289 // get pointer to data held by the process
3290 NumberType *data = (tmp.values.size() > 0) ? tmp.values.data() : nullptr;
3291
3292 herr_t status;
3293
3294 // set up file access property list with parallel I/O access
3295 hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
3296 status = H5Pset_fapl_mpio(plist_id, tmp.grid->mpi_communicator, info);
3297 AssertThrow(status >= 0, ExcIO());
3298
3299 // open file collectively in read-only mode and release property list
3300 // identifier
3301 hid_t file_id = H5Fopen(filename.c_str(), H5F_ACC_RDONLY, plist_id);
3302 status = H5Pclose(plist_id);
3303 AssertThrow(status >= 0, ExcIO());
3304
3305 // open the dataset in the file collectively
3306 hid_t dataset_id = H5Dopen2(file_id, "/matrix", H5P_DEFAULT);
3307
3308 // check the datatype of the dataset in the file
3309 // if the classes of type of the dataset and the matrix do not match abort
3310 // see HDF User's Guide: 6.10. Data Transfer: Datatype Conversion and
3311 // Selection
3312 hid_t datatype = hdf5_type_id(data);
3313 hid_t datatype_inp = H5Dget_type(dataset_id);
3314 H5T_class_t t_class_inp = H5Tget_class(datatype_inp);
3315 H5T_class_t t_class = H5Tget_class(datatype);
3317 t_class_inp == t_class,
3318 ExcMessage(
3319 "The data type of the matrix to be read does not match the archive"));
3320
3321 // get the dimensions of the matrix stored in the file
3322 // get dataspace handle
3323 hid_t dataspace_id = H5Dget_space(dataset_id);
3324 // get number of dimensions
3325 const int ndims = H5Sget_simple_extent_ndims(dataspace_id);
3326 AssertThrow(ndims == 2, ExcIO());
3327 // get every dimension
3328 hsize_t dims[2];
3329 status = H5Sget_simple_extent_dims(dataspace_id, dims, nullptr);
3330 AssertThrow(status >= 0, ExcIO());
3332 static_cast<int>(dims[0]) == n_columns,
3333 ExcMessage(
3334 "The number of columns of the matrix does not match the content of the archive"));
3336 static_cast<int>(dims[1]) == n_rows,
3337 ExcMessage(
3338 "The number of rows of the matrix does not match the content of the archive"));
3339
3340 // gather the number of local rows and columns from all processes
3341 std::vector<int> proc_n_local_rows(n_mpi_processes),
3342 proc_n_local_columns(n_mpi_processes);
3343 int ierr = MPI_Allgather(&tmp.n_local_rows,
3344 1,
3345 MPI_INT,
3346 proc_n_local_rows.data(),
3347 1,
3348 MPI_INT,
3349 tmp.grid->mpi_communicator);
3350 AssertThrowMPI(ierr);
3351 ierr = MPI_Allgather(&tmp.n_local_columns,
3352 1,
3353 MPI_INT,
3354 proc_n_local_columns.data(),
3355 1,
3356 MPI_INT,
3357 tmp.grid->mpi_communicator);
3358 AssertThrowMPI(ierr);
3359
3360 const unsigned int my_rank(
3361 Utilities::MPI::this_mpi_process(tmp.grid->mpi_communicator));
3362
3363 // hyperslab selection parameters
3364 // each process defines dataset in memory and writes it to the hyperslab in
3365 // the file
3366 hsize_t count[2];
3367 count[0] = tmp.n_local_columns;
3368 count[1] = tmp.n_local_rows;
3369
3370 hsize_t offset[2] = {0};
3371 for (unsigned int i = 0; i < my_rank; ++i)
3372 offset[0] += proc_n_local_columns[i];
3373
3374 // select hyperslab in the file
3375 status = H5Sselect_hyperslab(
3376 dataspace_id, H5S_SELECT_SET, offset, nullptr, count, nullptr);
3377 AssertThrow(status >= 0, ExcIO());
3378
3379 // create a memory dataspace independently
3380 hid_t memspace = H5Screate_simple(2, count, nullptr);
3381
3382 // read data independently
3383 status =
3384 H5Dread(dataset_id, datatype, memspace, dataspace_id, H5P_DEFAULT, data);
3385 AssertThrow(status >= 0, ExcIO());
3386
3387 // create HDF5 enum type for LAPACKSupport::State and LAPACKSupport::Property
3388 hid_t state_enum_id, property_enum_id;
3389 internal::create_HDF5_state_enum_id(state_enum_id);
3390 internal::create_HDF5_property_enum_id(property_enum_id);
3391
3392 // open the datasets for the state and property enum in the file
3393 hid_t dataset_state_id = H5Dopen2(file_id, "/state", H5P_DEFAULT);
3394 hid_t datatype_state = H5Dget_type(dataset_state_id);
3395 H5T_class_t t_class_state = H5Tget_class(datatype_state);
3396 AssertThrow(t_class_state == H5T_ENUM, ExcIO());
3397
3398 hid_t dataset_property_id = H5Dopen2(file_id, "/property", H5P_DEFAULT);
3399 hid_t datatype_property = H5Dget_type(dataset_property_id);
3400 H5T_class_t t_class_property = H5Tget_class(datatype_property);
3401 AssertThrow(t_class_property == H5T_ENUM, ExcIO());
3402
3403 // get dataspace handles
3404 hid_t dataspace_state = H5Dget_space(dataset_state_id);
3405 hid_t dataspace_property = H5Dget_space(dataset_property_id);
3406 // get number of dimensions
3407 const int ndims_state = H5Sget_simple_extent_ndims(dataspace_state);
3408 AssertThrow(ndims_state == 1, ExcIO());
3409 const int ndims_property = H5Sget_simple_extent_ndims(dataspace_property);
3410 AssertThrow(ndims_property == 1, ExcIO());
3411 // get every dimension
3412 hsize_t dims_state[1];
3413 H5Sget_simple_extent_dims(dataspace_state, dims_state, nullptr);
3414 AssertThrow(static_cast<int>(dims_state[0]) == 1, ExcIO());
3415 hsize_t dims_property[1];
3416 H5Sget_simple_extent_dims(dataspace_property, dims_property, nullptr);
3417 AssertThrow(static_cast<int>(dims_property[0]) == 1, ExcIO());
3418
3419 // read data
3420 status = H5Dread(
3421 dataset_state_id, state_enum_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, &tmp.state);
3422 AssertThrow(status >= 0, ExcIO());
3423
3424 status = H5Dread(dataset_property_id,
3425 property_enum_id,
3426 H5S_ALL,
3427 H5S_ALL,
3428 H5P_DEFAULT,
3429 &tmp.property);
3430 AssertThrow(status >= 0, ExcIO());
3431
3432 // close/release sources
3433 status = H5Sclose(memspace);
3434 AssertThrow(status >= 0, ExcIO());
3435 status = H5Dclose(dataset_id);
3436 AssertThrow(status >= 0, ExcIO());
3437 status = H5Dclose(dataset_state_id);
3438 AssertThrow(status >= 0, ExcIO());
3439 status = H5Dclose(dataset_property_id);
3440 AssertThrow(status >= 0, ExcIO());
3441 status = H5Sclose(dataspace_id);
3442 AssertThrow(status >= 0, ExcIO());
3443 status = H5Sclose(dataspace_state);
3444 AssertThrow(status >= 0, ExcIO());
3445 status = H5Sclose(dataspace_property);
3446 AssertThrow(status >= 0, ExcIO());
3447 // status = H5Tclose(datatype);
3448 // AssertThrow(status >= 0, ExcIO());
3449 status = H5Tclose(state_enum_id);
3450 AssertThrow(status >= 0, ExcIO());
3451 status = H5Tclose(property_enum_id);
3452 AssertThrow(status >= 0, ExcIO());
3453 status = H5Fclose(file_id);
3454 AssertThrow(status >= 0, ExcIO());
3455
3456 // copying the distributed matrices
3457 tmp.copy_to(*this);
3458
3459# endif // H5_HAVE_PARALLEL
3460# endif // DEAL_II_WITH_HDF5
3461}
3462
3463
3464
3465namespace internal
3466{
3467 namespace
3468 {
3469 template <typename NumberType>
3470 void
3471 scale_columns(ScaLAPACKMatrix<NumberType> &matrix,
3472 const ArrayView<const NumberType> &factors)
3473 {
3474 Assert(matrix.n() == factors.size(),
3475 ExcDimensionMismatch(matrix.n(), factors.size()));
3476
3477 for (unsigned int i = 0; i < matrix.local_n(); ++i)
3478 {
3479 const NumberType s = factors[matrix.global_column(i)];
3480
3481 for (unsigned int j = 0; j < matrix.local_m(); ++j)
3482 matrix.local_el(j, i) *= s;
3483 }
3484 }
3485
3486 template <typename NumberType>
3487 void
3488 scale_rows(ScaLAPACKMatrix<NumberType> &matrix,
3489 const ArrayView<const NumberType> &factors)
3490 {
3491 Assert(matrix.m() == factors.size(),
3492 ExcDimensionMismatch(matrix.m(), factors.size()));
3493
3494 for (unsigned int i = 0; i < matrix.local_m(); ++i)
3495 {
3496 const NumberType s = factors[matrix.global_row(i)];
3497
3498 for (unsigned int j = 0; j < matrix.local_n(); ++j)
3499 matrix.local_el(i, j) *= s;
3500 }
3501 }
3502
3503 } // namespace
3504} // namespace internal
3505
3506
3507
3508template <typename NumberType>
3509template <class InputVector>
3510void
3512{
3513 if (this->grid->mpi_process_is_active)
3514 internal::scale_columns(*this, make_array_view(factors));
3515}
3516
3517
3518
3519template <typename NumberType>
3520template <class InputVector>
3521void
3523{
3524 if (this->grid->mpi_process_is_active)
3525 internal::scale_rows(*this, make_array_view(factors));
3526}
3527
3528
3529
3530// instantiations
3531# include "lac/scalapack.inst"
3532
3533
3535
3536#endif // DEAL_II_WITH_SCALAPACK
ArrayView< std::remove_reference_t< typename std::iterator_traits< Iterator >::reference >, MemorySpaceType > make_array_view(const Iterator begin, const Iterator end)
Definition array_view.h:954
std::size_t size() const
Definition array_view.h:689
size_type m() const
size_type n() const
int descriptor[9]
Definition scalapack.h:934
std::vector< NumberType > compute_SVD(ScaLAPACKMatrix< NumberType > *U=nullptr, ScaLAPACKMatrix< NumberType > *VT=nullptr)
std::vector< NumberType > eigenpairs_symmetric_by_value(const std::pair< NumberType, NumberType > &value_limits, const bool compute_eigenvectors)
NumberType frobenius_norm() const
unsigned int pseudoinverse(const NumberType ratio)
std::vector< NumberType > eigenpairs_symmetric_by_value_MRRR(const std::pair< NumberType, NumberType > &value_limits, const bool compute_eigenvectors)
void copy_from(const LAPACKFullMatrix< NumberType > &matrix, const unsigned int rank)
Definition scalapack.cc:362
void save_parallel(const std::string &filename, const std::pair< unsigned int, unsigned int > &chunk_size) const
void least_squares(ScaLAPACKMatrix< NumberType > &B, const bool transpose=false)
ScaLAPACKMatrix< NumberType > & operator=(const FullMatrix< NumberType > &)
Definition scalapack.cc:331
void Tadd(const NumberType b, const ScaLAPACKMatrix< NumberType > &B)
void mTmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
void add(const ScaLAPACKMatrix< NumberType > &B, const NumberType a=0., const NumberType b=1., const bool transpose_B=false)
Definition scalapack.cc:996
LAPACKSupport::State get_state() const
Definition scalapack.cc:322
LAPACKSupport::Property get_property() const
Definition scalapack.cc:313
void Tmmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
std::vector< NumberType > eigenpairs_symmetric_MRRR(const bool compute_eigenvectors, const std::pair< unsigned int, unsigned int > &index_limits=std::make_pair(numbers::invalid_unsigned_int, numbers::invalid_unsigned_int), const std::pair< NumberType, NumberType > &value_limits=std::make_pair(std::numeric_limits< NumberType >::quiet_NaN(), std::numeric_limits< NumberType >::quiet_NaN()))
void scale_rows(const InputVector &factors)
std::shared_ptr< const Utilities::MPI::ProcessGrid > grid
Definition scalapack.h:899
ScaLAPACKMatrix(const size_type n_rows, const size_type n_columns, const std::shared_ptr< const Utilities::MPI::ProcessGrid > &process_grid, const size_type row_block_size=32, const size_type column_block_size=32, const LAPACKSupport::Property property=LAPACKSupport::Property::general)
Definition scalapack.cc:80
void load(const std::string &filename)
const int submatrix_column
Definition scalapack.h:980
const int submatrix_row
Definition scalapack.h:974
void mmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
std::vector< NumberType > eigenpairs_symmetric(const bool compute_eigenvectors, const std::pair< unsigned int, unsigned int > &index_limits=std::make_pair(numbers::invalid_unsigned_int, numbers::invalid_unsigned_int), const std::pair< NumberType, NumberType > &value_limits=std::make_pair(std::numeric_limits< NumberType >::quiet_NaN(), std::numeric_limits< NumberType >::quiet_NaN()))
void save_serial(const std::string &filename, const std::pair< unsigned int, unsigned int > &chunk_size) const
NumberType norm_general(const char type) const
void save(const std::string &filename, const std::pair< unsigned int, unsigned int > &chunk_size=std::make_pair(numbers::invalid_unsigned_int, numbers::invalid_unsigned_int)) const
void load_parallel(const std::string &filename)
NumberType l1_norm() const
void compute_lu_factorization()
NumberType norm_symmetric(const char type) const
void mult(const NumberType b, const ScaLAPACKMatrix< NumberType > &B, const NumberType c, ScaLAPACKMatrix< NumberType > &C, const bool transpose_A=false, const bool transpose_B=false) const
LAPACKSupport::Property property
Definition scalapack.h:892
void set_property(const LAPACKSupport::Property property)
Definition scalapack.cc:303
void reinit(const size_type n_rows, const size_type n_columns, const std::shared_ptr< const Utilities::MPI::ProcessGrid > &process_grid, const size_type row_block_size=32, const size_type column_block_size=32, const LAPACKSupport::Property property=LAPACKSupport::Property::general)
Definition scalapack.cc:216
void load_serial(const std::string &filename)
std::vector< NumberType > eigenpairs_symmetric_by_index_MRRR(const std::pair< unsigned int, unsigned int > &index_limits, const bool compute_eigenvectors)
NumberType reciprocal_condition_number(const NumberType a_norm) const
LAPACKSupport::State state
Definition scalapack.h:886
void TmTmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
std::vector< NumberType > eigenpairs_symmetric_by_index(const std::pair< unsigned int, unsigned int > &index_limits, const bool compute_eigenvectors)
unsigned int global_column(const unsigned int loc_column) const
Definition scalapack.cc:517
void copy_to(FullMatrix< NumberType > &matrix) const
Definition scalapack.cc:672
unsigned int global_row(const unsigned int loc_row) const
Definition scalapack.cc:500
void compute_cholesky_factorization()
void copy_transposed(const ScaLAPACKMatrix< NumberType > &B)
Definition scalapack.cc:986
NumberType linfty_norm() const
void scale_columns(const InputVector &factors)
AlignedVector< T > values
Definition table.h:795
void reinit(const size_type size1, const size_type size2, const bool omit_default_initialization=false)
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:35
constexpr bool running_in_debug_mode()
Definition config.h:73
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:36
DerivativeForm< 1, spacedim, dim, Number > transpose(const DerivativeForm< 1, dim, spacedim, Number > &DF)
#define DEAL_II_ASSERT_UNREACHABLE()
#define DEAL_II_NOT_IMPLEMENTED()
const unsigned int v1
static ::ExceptionBase & ExcIO()
static ::ExceptionBase & ExcErrorCode(std::string arg1, types::blas_int arg2)
#define Assert(cond, exc)
#define AssertIsFinite(number)
#define AssertThrowMPI(error_code)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcNeedsHDF5()
static ::ExceptionBase & ExcIndexRange(std::size_t arg1, std::size_t arg2, std::size_t arg3)
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
const unsigned int my_rank
Definition mpi.cc:929
std::vector< index_type > data
Definition mpi.cc:746
std::size_t size
Definition mpi.cc:745
@ cholesky
Contents is a Cholesky decomposition.
@ lu
Contents is an LU decomposition.
@ matrix
Contents is actually a matrix.
@ unusable
Contents is something useless.
@ inverse_matrix
Contents is the inverse of a matrix.
@ svd
Matrix contains singular value decomposition,.
@ inverse_svd
Matrix is the inverse of a singular value decomposition.
@ eigenvalues
Eigenvalue vector is filled.
@ symmetric
Matrix is symmetric.
@ hessenberg
Matrix is in upper Hessenberg form.
@ diagonal
Matrix is diagonal.
@ upper_triangular
Matrix is upper triangular.
@ lower_triangular
Matrix is lower triangular.
@ general
No special properties.
@ scalapack_copy_from
ScaLAPACKMatrix<NumberType>::copy_from.
Definition mpi_tags.h:121
@ scalapack_copy_to2
ScaLAPACKMatrix<NumberType>::copy_to.
Definition mpi_tags.h:119
@ scalapack_copy_to
ScaLAPACKMatrix<NumberType>::copy_to.
Definition mpi_tags.h:117
T sum(const T &t, const MPI_Comm mpi_communicator)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition mpi.cc:99
T max(const T &t, const MPI_Comm mpi_communicator)
T min(const T &t, const MPI_Comm mpi_communicator)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:114
const MPI_Datatype mpi_type_id_for_type
Definition mpi.h:1634
void free_communicator(MPI_Comm mpi_communicator)
Definition mpi.cc:161
constexpr unsigned int invalid_unsigned_int
Definition types.h:238
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
hid_t hdf5_type_id(const number *)
Definition scalapack.cc:39
std::array< std::pair< Number, Tensor< 1, dim, Number > >, dim > eigenvectors(const SymmetricTensor< 2, dim, Number > &T, const SymmetricTensorEigenvectorMethod method=SymmetricTensorEigenvectorMethod::ql_implicit_shifts)