deal.II version GIT relicensing-2289-g1e5549a87a 2024-12-21 21:30:00+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
scalapack.cc
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2017 - 2024 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
15
17
18#ifdef DEAL_II_WITH_SCALAPACK
19
21# include <deal.II/base/mpi.h>
22# include <deal.II/base/mpi.templates.h>
23
24# include <deal.II/lac/scalapack.templates.h>
25
26# ifdef DEAL_II_WITH_HDF5
27# include <hdf5.h>
28# endif
29
30# include <limits>
31# include <memory>
32
34
35# ifdef DEAL_II_WITH_HDF5
36
37template <typename number>
38inline hid_t
39hdf5_type_id(const number *)
40{
42 // don't know what to put here; it does not matter
43 return -1;
44}
45
46inline hid_t
47hdf5_type_id(const double *)
48{
49 return H5T_NATIVE_DOUBLE;
50}
51
52inline hid_t
53hdf5_type_id(const float *)
54{
55 return H5T_NATIVE_FLOAT;
56}
57
58inline hid_t
59hdf5_type_id(const int *)
60{
61 return H5T_NATIVE_INT;
62}
63
64inline hid_t
65hdf5_type_id(const unsigned int *)
66{
67 return H5T_NATIVE_UINT;
68}
69
70inline hid_t
71hdf5_type_id(const char *)
72{
73 return H5T_NATIVE_CHAR;
74}
75# endif // DEAL_II_WITH_HDF5
76
77
78
79template <typename NumberType>
81 const size_type n_rows_,
82 const size_type n_columns_,
83 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
84 const size_type row_block_size_,
85 const size_type column_block_size_,
86 const LAPACKSupport::Property property_)
87 : uplo('L')
88 , // for non-symmetric matrices this is not needed
89 first_process_row(0)
90 , first_process_column(0)
91 , submatrix_row(1)
92 , submatrix_column(1)
93{
94 reinit(n_rows_,
95 n_columns_,
96 process_grid,
97 row_block_size_,
98 column_block_size_,
99 property_);
100}
101
102
103
104template <typename NumberType>
106 const size_type size,
107 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
108 const size_type block_size,
109 const LAPACKSupport::Property property)
110 : ScaLAPACKMatrix<NumberType>(size,
111 size,
112 process_grid,
113 block_size,
114 block_size,
115 property)
116{}
117
118
119
120template <typename NumberType>
122 const std::string &filename,
123 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
124 const size_type row_block_size,
125 const size_type column_block_size)
126 : uplo('L')
127 , // for non-symmetric matrices this is not needed
128 first_process_row(0)
129 , first_process_column(0)
130 , submatrix_row(1)
131 , submatrix_column(1)
132{
133# ifndef DEAL_II_WITH_HDF5
134 (void)filename;
135 (void)process_grid;
136 (void)row_block_size;
137 (void)column_block_size;
138 Assert(
139 false,
141 "This function is only available when deal.II is configured with HDF5"));
142# else
143
144 const unsigned int this_mpi_process(
145 Utilities::MPI::this_mpi_process(process_grid->mpi_communicator));
146
147 // Before reading the content from disk the root process determines the
148 // dimensions of the matrix. Subsequently, memory is allocated by a call to
149 // reinit() and the matrix is loaded by a call to load().
150 if (this_mpi_process == 0)
151 {
152 herr_t status = 0;
153
154 // open file in read-only mode
155 hid_t file = H5Fopen(filename.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT);
156 AssertThrow(file >= 0, ExcIO());
157
158 // get data set in file
159 hid_t dataset = H5Dopen2(file, "/matrix", H5P_DEFAULT);
160 AssertThrow(dataset >= 0, ExcIO());
161
162 // determine file space
163 hid_t filespace = H5Dget_space(dataset);
164
165 // get number of dimensions in data set
166 int rank = H5Sget_simple_extent_ndims(filespace);
167 AssertThrow(rank == 2, ExcIO());
168 hsize_t dims[2];
169 status = H5Sget_simple_extent_dims(filespace, dims, nullptr);
170 AssertThrow(status >= 0, ExcIO());
171
172 // due to ScaLAPACK's column-major memory layout the dimensions are
173 // swapped
174 n_rows = dims[1];
175 n_columns = dims[0];
176
177 // close/release resources
178 status = H5Sclose(filespace);
179 AssertThrow(status >= 0, ExcIO());
180 status = H5Dclose(dataset);
181 AssertThrow(status >= 0, ExcIO());
182 status = H5Fclose(file);
183 AssertThrow(status >= 0, ExcIO());
184 }
185 int ierr = MPI_Bcast(&n_rows,
186 1,
188 0 /*from root*/,
189 process_grid->mpi_communicator);
190 AssertThrowMPI(ierr);
191
192 ierr = MPI_Bcast(&n_columns,
193 1,
195 0 /*from root*/,
196 process_grid->mpi_communicator);
197 AssertThrowMPI(ierr);
198
199 // the property will be overwritten by the subsequent call to load()
201 n_columns,
202 process_grid,
206
207 load(filename.c_str());
208
209# endif // DEAL_II_WITH_HDF5
210}
211
212
213
214template <typename NumberType>
215void
217 const size_type n_rows_,
218 const size_type n_columns_,
219 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
220 const size_type row_block_size_,
221 const size_type column_block_size_,
222 const LAPACKSupport::Property property_)
223{
224 Assert(row_block_size_ > 0, ExcMessage("Row block size has to be positive."));
225 Assert(column_block_size_ > 0,
226 ExcMessage("Column block size has to be positive."));
227 Assert(
228 row_block_size_ <= n_rows_,
230 "Row block size can not be greater than the number of rows of the matrix"));
231 Assert(
232 column_block_size_ <= n_columns_,
234 "Column block size can not be greater than the number of columns of the matrix"));
235
237 property = property_;
238 grid = process_grid;
239 n_rows = n_rows_;
240 n_columns = n_columns_;
241 row_block_size = row_block_size_;
242 column_block_size = column_block_size_;
243
244 if (grid->mpi_process_is_active)
245 {
246 // Get local sizes:
247 n_local_rows = numroc_(&n_rows,
248 &row_block_size,
249 &(grid->this_process_row),
250 &first_process_row,
251 &(grid->n_process_rows));
252 n_local_columns = numroc_(&n_columns,
253 &column_block_size,
254 &(grid->this_process_column),
255 &first_process_column,
256 &(grid->n_process_columns));
257
258 // LLD_A = MAX(1,NUMROC(M_A, MB_A, MYROW, RSRC_A, NPROW)), different
259 // between processes
260 int lda = std::max(1, n_local_rows);
261
262 int info = 0;
263 descinit_(descriptor,
264 &n_rows,
265 &n_columns,
266 &row_block_size,
267 &column_block_size,
268 &first_process_row,
269 &first_process_column,
270 &(grid->blacs_context),
271 &lda,
272 &info);
273 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("descinit_", info));
274
275 this->TransposeTable<NumberType>::reinit(n_local_rows, n_local_columns);
276 }
277 else
278 {
279 // set process-local variables to something telling:
280 n_local_rows = -1;
281 n_local_columns = -1;
282 std::fill(std::begin(descriptor), std::end(descriptor), -1);
283 }
284}
285
286
287
288template <typename NumberType>
289void
291 const size_type size,
292 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
293 const size_type block_size,
294 const LAPACKSupport::Property property)
295{
296 reinit(size, size, process_grid, block_size, block_size, property);
297}
298
299
300
301template <typename NumberType>
302void
304 const LAPACKSupport::Property property_)
305{
306 property = property_;
307}
308
309
310
311template <typename NumberType>
314{
315 return property;
316}
317
318
319
320template <typename NumberType>
323{
324 return state;
325}
326
327
328
329template <typename NumberType>
332{
333 // FIXME: another way to copy is to use pdgeadd_ PBLAS routine.
334 // This routine computes the sum of two matrices B := a*A + b*B.
335 // Matrices can have different distribution,in particular matrix A can
336 // be owned by only one process, so we can set a=1 and b=0 to copy
337 // non-distributed matrix A into distributed matrix B.
338 Assert(n_rows == int(matrix.m()), ExcDimensionMismatch(n_rows, matrix.m()));
339 Assert(n_columns == int(matrix.n()),
340 ExcDimensionMismatch(n_columns, matrix.n()));
341
342 if (grid->mpi_process_is_active)
343 {
344 for (int i = 0; i < n_local_rows; ++i)
345 {
346 const int glob_i = global_row(i);
347 for (int j = 0; j < n_local_columns; ++j)
348 {
349 const int glob_j = global_column(j);
350 local_el(i, j) = matrix(glob_i, glob_j);
351 }
352 }
353 }
354 state = LAPACKSupport::matrix;
355 return *this;
356}
357
358
359
360template <typename NumberType>
361void
363 const unsigned int rank)
364{
365 if (n_rows * n_columns == 0)
366 return;
367
368 const unsigned int this_mpi_process(
369 Utilities::MPI::this_mpi_process(this->grid->mpi_communicator));
370
371# ifdef DEBUG
372 Assert(Utilities::MPI::max(rank, this->grid->mpi_communicator) == rank,
373 ExcMessage("All processes have to call routine with identical rank"));
374 Assert(Utilities::MPI::min(rank, this->grid->mpi_communicator) == rank,
375 ExcMessage("All processes have to call routine with identical rank"));
376# endif
377
378 // root process has to be active in the grid of A
379 if (this_mpi_process == rank)
380 {
381 Assert(grid->mpi_process_is_active, ExcInternalError());
382 Assert(n_rows == int(B.m()), ExcDimensionMismatch(n_rows, B.m()));
383 Assert(n_columns == int(B.n()), ExcDimensionMismatch(n_columns, B.n()));
384 }
385 // Create 1x1 grid for matrix B.
386 // The underlying grid for matrix B only contains the process #rank.
387 // This grid will be used to copy the serial matrix B to the distributed
388 // matrix using the ScaLAPACK routine pgemr2d.
389 MPI_Group group_A;
390 MPI_Comm_group(this->grid->mpi_communicator, &group_A);
391 const int n = 1;
392 const std::vector<int> ranks(n, rank);
393 MPI_Group group_B;
394 MPI_Group_incl(group_A, n, ranks.data(), &group_B);
395 MPI_Comm communicator_B;
396
398 const int ierr = MPI_Comm_create_group(this->grid->mpi_communicator,
399 group_B,
400 mpi_tag,
401 &communicator_B);
402 AssertThrowMPI(ierr);
403 int n_proc_rows_B = 1, n_proc_cols_B = 1;
404 int this_process_row_B = -1, this_process_column_B = -1;
405 int blacs_context_B = -1;
406 if (MPI_COMM_NULL != communicator_B)
407 {
408 // Initialize Cblas context from the provided communicator
409 blacs_context_B = Csys2blacs_handle(communicator_B);
410 const char *order = "Col";
411 Cblacs_gridinit(&blacs_context_B, order, n_proc_rows_B, n_proc_cols_B);
412 Cblacs_gridinfo(blacs_context_B,
413 &n_proc_rows_B,
414 &n_proc_cols_B,
415 &this_process_row_B,
416 &this_process_column_B);
417 Assert(n_proc_rows_B * n_proc_cols_B == 1, ExcInternalError());
418 // the active process of grid B has to be process #rank of the
419 // communicator attached to A
420 Assert(this_mpi_process == rank, ExcInternalError());
421 }
422 const bool mpi_process_is_active_B =
423 (this_process_row_B >= 0 && this_process_column_B >= 0);
424
425 // create descriptor for matrix B
426 std::vector<int> descriptor_B(9, -1);
427 const int first_process_row_B = 0, first_process_col_B = 0;
428
429 if (mpi_process_is_active_B)
430 {
431 // Get local sizes:
432 int n_local_rows_B = numroc_(&n_rows,
433 &n_rows,
434 &this_process_row_B,
435 &first_process_row_B,
436 &n_proc_rows_B);
437 int n_local_cols_B = numroc_(&n_columns,
438 &n_columns,
439 &this_process_column_B,
440 &first_process_col_B,
441 &n_proc_cols_B);
442 Assert(n_local_rows_B == n_rows, ExcInternalError());
443 Assert(n_local_cols_B == n_columns, ExcInternalError());
444 (void)n_local_cols_B;
445
446 int lda = std::max(1, n_local_rows_B);
447 int info = 0;
448 descinit_(descriptor_B.data(),
449 &n_rows,
450 &n_columns,
451 &n_rows,
452 &n_columns,
453 &first_process_row_B,
454 &first_process_col_B,
455 &blacs_context_B,
456 &lda,
457 &info);
458 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("descinit_", info));
459 }
460 if (this->grid->mpi_process_is_active)
461 {
462 const int ii = 1;
463 NumberType *loc_vals_A =
464 this->values.size() > 0 ? this->values.data() : nullptr;
465 const NumberType *loc_vals_B =
466 mpi_process_is_active_B ? &(B(0, 0)) : nullptr;
467
468 // pgemr2d has to be called only for processes active on grid attached to
469 // matrix A
470 pgemr2d(&n_rows,
471 &n_columns,
472 loc_vals_B,
473 &ii,
474 &ii,
475 descriptor_B.data(),
476 loc_vals_A,
477 &ii,
478 &ii,
479 this->descriptor,
480 &(this->grid->blacs_context));
481 }
482 if (mpi_process_is_active_B)
483 Cblacs_gridexit(blacs_context_B);
484
485 MPI_Group_free(&group_A);
486 MPI_Group_free(&group_B);
487 if (MPI_COMM_NULL != communicator_B)
488 Utilities::MPI::free_communicator(communicator_B);
489
490 state = LAPACKSupport::matrix;
491}
492
493
494
495template <typename NumberType>
496unsigned int
497ScaLAPACKMatrix<NumberType>::global_row(const unsigned int loc_row) const
498{
499 Assert(n_local_rows >= 0 && loc_row < static_cast<unsigned int>(n_local_rows),
500 ExcIndexRange(loc_row, 0, n_local_rows));
501 const int i = loc_row + 1;
502 return indxl2g_(&i,
503 &row_block_size,
504 &(grid->this_process_row),
505 &first_process_row,
506 &(grid->n_process_rows)) -
507 1;
508}
509
510
511
512template <typename NumberType>
513unsigned int
514ScaLAPACKMatrix<NumberType>::global_column(const unsigned int loc_column) const
515{
516 Assert(n_local_columns >= 0 &&
517 loc_column < static_cast<unsigned int>(n_local_columns),
518 ExcIndexRange(loc_column, 0, n_local_columns));
519 const int j = loc_column + 1;
520 return indxl2g_(&j,
521 &column_block_size,
522 &(grid->this_process_column),
523 &first_process_column,
524 &(grid->n_process_columns)) -
525 1;
526}
527
528
529
530template <typename NumberType>
531void
533 const unsigned int rank) const
534{
535 if (n_rows * n_columns == 0)
536 return;
537
538 const unsigned int this_mpi_process(
539 Utilities::MPI::this_mpi_process(this->grid->mpi_communicator));
540
541# ifdef DEBUG
542 Assert(Utilities::MPI::max(rank, this->grid->mpi_communicator) == rank,
543 ExcMessage("All processes have to call routine with identical rank"));
544 Assert(Utilities::MPI::min(rank, this->grid->mpi_communicator) == rank,
545 ExcMessage("All processes have to call routine with identical rank"));
546# endif
547
548 if (this_mpi_process == rank)
549 {
550 // the process which gets the serial copy has to be in the process grid
551 Assert(this->grid->is_process_active(), ExcInternalError());
552 Assert(n_rows == int(B.m()), ExcDimensionMismatch(n_rows, B.m()));
553 Assert(n_columns == int(B.n()), ExcDimensionMismatch(n_columns, B.n()));
554 }
555
556 // Create 1x1 grid for matrix B.
557 // The underlying grid for matrix B only contains the process #rank.
558 // This grid will be used to copy to the distributed matrix to the serial
559 // matrix B using the ScaLAPACK routine pgemr2d.
560 MPI_Group group_A;
561 MPI_Comm_group(this->grid->mpi_communicator, &group_A);
562 const int n = 1;
563 const std::vector<int> ranks(n, rank);
564 MPI_Group group_B;
565 MPI_Group_incl(group_A, n, ranks.data(), &group_B);
566 MPI_Comm communicator_B;
567
569 const int ierr = MPI_Comm_create_group(this->grid->mpi_communicator,
570 group_B,
571 mpi_tag,
572 &communicator_B);
573 AssertThrowMPI(ierr);
574 int n_proc_rows_B = 1, n_proc_cols_B = 1;
575 int this_process_row_B = -1, this_process_column_B = -1;
576 int blacs_context_B = -1;
577 if (MPI_COMM_NULL != communicator_B)
578 {
579 // Initialize Cblas context from the provided communicator
580 blacs_context_B = Csys2blacs_handle(communicator_B);
581 const char *order = "Col";
582 Cblacs_gridinit(&blacs_context_B, order, n_proc_rows_B, n_proc_cols_B);
583 Cblacs_gridinfo(blacs_context_B,
584 &n_proc_rows_B,
585 &n_proc_cols_B,
586 &this_process_row_B,
587 &this_process_column_B);
588 Assert(n_proc_rows_B * n_proc_cols_B == 1, ExcInternalError());
589 // the active process of grid B has to be process #rank of the
590 // communicator attached to A
591 Assert(this_mpi_process == rank, ExcInternalError());
592 }
593 const bool mpi_process_is_active_B =
594 (this_process_row_B >= 0 && this_process_column_B >= 0);
595
596 // create descriptor for matrix B
597 std::vector<int> descriptor_B(9, -1);
598 const int first_process_row_B = 0, first_process_col_B = 0;
599
600 if (mpi_process_is_active_B)
601 {
602 // Get local sizes:
603 int n_local_rows_B = numroc_(&n_rows,
604 &n_rows,
605 &this_process_row_B,
606 &first_process_row_B,
607 &n_proc_rows_B);
608 int n_local_cols_B = numroc_(&n_columns,
609 &n_columns,
610 &this_process_column_B,
611 &first_process_col_B,
612 &n_proc_cols_B);
613 Assert(n_local_rows_B == n_rows, ExcInternalError());
614 Assert(n_local_cols_B == n_columns, ExcInternalError());
615 (void)n_local_cols_B;
616
617 int lda = std::max(1, n_local_rows_B);
618 int info = 0;
619 // fill descriptor for matrix B
620 descinit_(descriptor_B.data(),
621 &n_rows,
622 &n_columns,
623 &n_rows,
624 &n_columns,
625 &first_process_row_B,
626 &first_process_col_B,
627 &blacs_context_B,
628 &lda,
629 &info);
630 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("descinit_", info));
631 }
632 // pgemr2d has to be called only for processes active on grid attached to
633 // matrix A
634 if (this->grid->mpi_process_is_active)
635 {
636 const int ii = 1;
637 const NumberType *loc_vals_A =
638 this->values.size() > 0 ? this->values.data() : nullptr;
639 NumberType *loc_vals_B = mpi_process_is_active_B ? &(B(0, 0)) : nullptr;
640
641 pgemr2d(&n_rows,
642 &n_columns,
643 loc_vals_A,
644 &ii,
645 &ii,
646 this->descriptor,
647 loc_vals_B,
648 &ii,
649 &ii,
650 descriptor_B.data(),
651 &(this->grid->blacs_context));
652 }
653 if (mpi_process_is_active_B)
654 Cblacs_gridexit(blacs_context_B);
655
656 MPI_Group_free(&group_A);
657 MPI_Group_free(&group_B);
658 if (MPI_COMM_NULL != communicator_B)
659 Utilities::MPI::free_communicator(communicator_B);
660}
661
662
663
664template <typename NumberType>
665void
667{
668 // FIXME: use PDGEMR2d for copying?
669 // PDGEMR2d copies a submatrix of A on a submatrix of B.
670 // A and B can have different distributions
671 // see http://icl.cs.utk.edu/lapack-forum/viewtopic.php?t=50
672 Assert(n_rows == int(matrix.m()), ExcDimensionMismatch(n_rows, matrix.m()));
673 Assert(n_columns == int(matrix.n()),
674 ExcDimensionMismatch(n_columns, matrix.n()));
675
676 matrix = 0.;
677 if (grid->mpi_process_is_active)
678 {
679 for (int i = 0; i < n_local_rows; ++i)
680 {
681 const int glob_i = global_row(i);
682 for (int j = 0; j < n_local_columns; ++j)
683 {
684 const int glob_j = global_column(j);
685 matrix(glob_i, glob_j) = local_el(i, j);
686 }
687 }
688 }
689 Utilities::MPI::sum(matrix, grid->mpi_communicator, matrix);
690
691 // we could move the following lines under the main loop above,
692 // but they would be dependent on glob_i and glob_j, which
693 // won't make it much prettier
694 if (state == LAPACKSupport::cholesky)
695 {
696 if (property == LAPACKSupport::lower_triangular)
697 for (unsigned int i = 0; i < matrix.n(); ++i)
698 for (unsigned int j = i + 1; j < matrix.m(); ++j)
699 matrix(i, j) = 0.;
700 else if (property == LAPACKSupport::upper_triangular)
701 for (unsigned int i = 0; i < matrix.n(); ++i)
702 for (unsigned int j = 0; j < i; ++j)
703 matrix(i, j) = 0.;
704 }
705 else if (property == LAPACKSupport::symmetric &&
707 {
708 if (uplo == 'L')
709 for (unsigned int i = 0; i < matrix.n(); ++i)
710 for (unsigned int j = i + 1; j < matrix.m(); ++j)
711 matrix(i, j) = matrix(j, i);
712 else if (uplo == 'U')
713 for (unsigned int i = 0; i < matrix.n(); ++i)
714 for (unsigned int j = 0; j < i; ++j)
715 matrix(i, j) = matrix(j, i);
716 }
717}
718
719
720
721template <typename NumberType>
722void
725 const std::pair<unsigned int, unsigned int> &offset_A,
726 const std::pair<unsigned int, unsigned int> &offset_B,
727 const std::pair<unsigned int, unsigned int> &submatrix_size) const
728{
729 // submatrix is empty
730 if (submatrix_size.first == 0 || submatrix_size.second == 0)
731 return;
732
733 // range checking for matrix A
734 AssertIndexRange(offset_A.first, n_rows - submatrix_size.first + 1);
735 AssertIndexRange(offset_A.second, n_columns - submatrix_size.second + 1);
736
737 // range checking for matrix B
738 AssertIndexRange(offset_B.first, B.n_rows - submatrix_size.first + 1);
739 AssertIndexRange(offset_B.second, B.n_columns - submatrix_size.second + 1);
740
741 // Currently, copying of matrices will only be supported if A and B share the
742 // same MPI communicator
743 int ierr, comparison;
744 ierr = MPI_Comm_compare(grid->mpi_communicator,
745 B.grid->mpi_communicator,
746 &comparison);
747 AssertThrowMPI(ierr);
748 Assert(comparison == MPI_IDENT,
749 ExcMessage("Matrix A and B must have a common MPI Communicator"));
750
751 /*
752 * The routine pgemr2d requires a BLACS context resembling at least the union
753 * of process grids described by the BLACS contexts held by the ProcessGrids
754 * of matrix A and B. As A and B share the same MPI communicator, there is no
755 * need to create a union MPI communicator to initialize the BLACS context
756 */
757 int union_blacs_context = Csys2blacs_handle(this->grid->mpi_communicator);
758 const char *order = "Col";
759 int union_n_process_rows =
760 Utilities::MPI::n_mpi_processes(this->grid->mpi_communicator);
761 int union_n_process_columns = 1;
762 Cblacs_gridinit(&union_blacs_context,
763 order,
764 union_n_process_rows,
765 union_n_process_columns);
766
767 int n_grid_rows_A, n_grid_columns_A, my_row_A, my_column_A;
768 Cblacs_gridinfo(this->grid->blacs_context,
769 &n_grid_rows_A,
770 &n_grid_columns_A,
771 &my_row_A,
772 &my_column_A);
773
774 // check whether process is in the BLACS context of matrix A
775 const bool in_context_A =
776 (my_row_A >= 0 && my_row_A < n_grid_rows_A) &&
777 (my_column_A >= 0 && my_column_A < n_grid_columns_A);
778
779 int n_grid_rows_B, n_grid_columns_B, my_row_B, my_column_B;
780 Cblacs_gridinfo(B.grid->blacs_context,
781 &n_grid_rows_B,
782 &n_grid_columns_B,
783 &my_row_B,
784 &my_column_B);
785
786 // check whether process is in the BLACS context of matrix B
787 const bool in_context_B =
788 (my_row_B >= 0 && my_row_B < n_grid_rows_B) &&
789 (my_column_B >= 0 && my_column_B < n_grid_columns_B);
790
791 const int n_rows_submatrix = submatrix_size.first;
792 const int n_columns_submatrix = submatrix_size.second;
793
794 // due to Fortran indexing one has to be added
795 int ia = offset_A.first + 1, ja = offset_A.second + 1;
796 int ib = offset_B.first + 1, jb = offset_B.second + 1;
797
798 std::array<int, 9> desc_A, desc_B;
799
800 const NumberType *loc_vals_A = nullptr;
801 NumberType *loc_vals_B = nullptr;
802
803 // Note: the function pgemr2d has to be called for all processes in the union
804 // BLACS context If the calling process is not part of the BLACS context of A,
805 // desc_A[1] has to be -1 and all other parameters do not have to be set If
806 // the calling process is not part of the BLACS context of B, desc_B[1] has to
807 // be -1 and all other parameters do not have to be set
808 if (in_context_A)
809 {
810 if (this->values.size() != 0)
811 loc_vals_A = this->values.data();
812
813 for (unsigned int i = 0; i < desc_A.size(); ++i)
814 desc_A[i] = this->descriptor[i];
815 }
816 else
817 desc_A[1] = -1;
818
819 if (in_context_B)
820 {
821 if (B.values.size() != 0)
822 loc_vals_B = B.values.data();
823
824 for (unsigned int i = 0; i < desc_B.size(); ++i)
825 desc_B[i] = B.descriptor[i];
826 }
827 else
828 desc_B[1] = -1;
829
830 pgemr2d(&n_rows_submatrix,
831 &n_columns_submatrix,
832 loc_vals_A,
833 &ia,
834 &ja,
835 desc_A.data(),
836 loc_vals_B,
837 &ib,
838 &jb,
839 desc_B.data(),
840 &union_blacs_context);
841
843
844 // releasing the union BLACS context
845 Cblacs_gridexit(union_blacs_context);
846}
847
848
849
850template <typename NumberType>
851void
853{
854 Assert(n_rows == dest.n_rows, ExcDimensionMismatch(n_rows, dest.n_rows));
855 Assert(n_columns == dest.n_columns,
856 ExcDimensionMismatch(n_columns, dest.n_columns));
857
858 if (this->grid->mpi_process_is_active)
860 this->descriptor[0] == 1,
862 "Copying of ScaLAPACK matrices only implemented for dense matrices"));
863 if (dest.grid->mpi_process_is_active)
865 dest.descriptor[0] == 1,
867 "Copying of ScaLAPACK matrices only implemented for dense matrices"));
868
869 /*
870 * just in case of different process grids or block-cyclic distributions
871 * inter-process communication is necessary
872 * if distributed matrices have the same process grid and block sizes, local
873 * copying is enough
874 */
875 if ((this->grid != dest.grid) || (row_block_size != dest.row_block_size) ||
876 (column_block_size != dest.column_block_size))
877 {
878 /*
879 * get the MPI communicator, which is the union of the source and
880 * destination MPI communicator
881 */
882 int ierr = 0;
883 MPI_Group group_source, group_dest, group_union;
884 ierr = MPI_Comm_group(this->grid->mpi_communicator, &group_source);
885 AssertThrowMPI(ierr);
886 ierr = MPI_Comm_group(dest.grid->mpi_communicator, &group_dest);
887 AssertThrowMPI(ierr);
888 ierr = MPI_Group_union(group_source, group_dest, &group_union);
889 AssertThrowMPI(ierr);
890 MPI_Comm mpi_communicator_union;
891
892 // to create a communicator representing the union of the source
893 // and destination MPI communicator we need a communicator that
894 // is guaranteed to contain all desired processes -- i.e.,
895 // MPI_COMM_WORLD. on the other hand, as documented in the MPI
896 // standard, MPI_Comm_create_group is not collective on all
897 // processes in the first argument, but instead is collective on
898 // only those processes listed in the group. in other words,
899 // there is really no harm in passing MPI_COMM_WORLD as the
900 // first argument, even if the program we are currently running
901 // and that is calling this function only works on a subset of
902 // processes. the same holds for the wrapper/fallback we are using here.
903
905 ierr = MPI_Comm_create_group(MPI_COMM_WORLD,
906 group_union,
907 mpi_tag,
908 &mpi_communicator_union);
909 AssertThrowMPI(ierr);
910
911 /*
912 * The routine pgemr2d requires a BLACS context resembling at least the
913 * union of process grids described by the BLACS contexts of matrix A and
914 * B
915 */
916 int union_blacs_context = Csys2blacs_handle(mpi_communicator_union);
917 const char *order = "Col";
918 int union_n_process_rows =
919 Utilities::MPI::n_mpi_processes(mpi_communicator_union);
920 int union_n_process_columns = 1;
921 Cblacs_gridinit(&union_blacs_context,
922 order,
923 union_n_process_rows,
924 union_n_process_columns);
925
926 const NumberType *loc_vals_source = nullptr;
927 NumberType *loc_vals_dest = nullptr;
928
929 if (this->grid->mpi_process_is_active && (this->values.size() > 0))
930 {
931 AssertThrow(this->values.size() > 0,
933 "source: process is active but local matrix empty"));
934 loc_vals_source = this->values.data();
935 }
936 if (dest.grid->mpi_process_is_active && (dest.values.size() > 0))
937 {
939 dest.values.size() > 0,
941 "destination: process is active but local matrix empty"));
942 loc_vals_dest = dest.values.data();
943 }
944 pgemr2d(&n_rows,
945 &n_columns,
946 loc_vals_source,
947 &submatrix_row,
948 &submatrix_column,
949 descriptor,
950 loc_vals_dest,
951 &dest.submatrix_row,
952 &dest.submatrix_column,
953 dest.descriptor,
954 &union_blacs_context);
955
956 Cblacs_gridexit(union_blacs_context);
957
958 if (mpi_communicator_union != MPI_COMM_NULL)
959 Utilities::MPI::free_communicator(mpi_communicator_union);
960 ierr = MPI_Group_free(&group_source);
961 AssertThrowMPI(ierr);
962 ierr = MPI_Group_free(&group_dest);
963 AssertThrowMPI(ierr);
964 ierr = MPI_Group_free(&group_union);
965 AssertThrowMPI(ierr);
966 }
967 else
968 // process is active in the process grid
969 if (this->grid->mpi_process_is_active)
970 dest.values = this->values;
971
972 dest.state = state;
973 dest.property = property;
974}
975
976
977
978template <typename NumberType>
979void
982{
983 add(B, 0, 1, true);
984}
985
986
987
988template <typename NumberType>
989void
991 const NumberType alpha,
992 const NumberType beta,
993 const bool transpose_B)
994{
995 if (transpose_B)
996 {
997 Assert(n_rows == B.n_columns, ExcDimensionMismatch(n_rows, B.n_columns));
998 Assert(n_columns == B.n_rows, ExcDimensionMismatch(n_columns, B.n_rows));
999 Assert(column_block_size == B.row_block_size,
1000 ExcDimensionMismatch(column_block_size, B.row_block_size));
1001 Assert(row_block_size == B.column_block_size,
1002 ExcDimensionMismatch(row_block_size, B.column_block_size));
1003 }
1004 else
1005 {
1006 Assert(n_rows == B.n_rows, ExcDimensionMismatch(n_rows, B.n_rows));
1007 Assert(n_columns == B.n_columns,
1008 ExcDimensionMismatch(n_columns, B.n_columns));
1009 Assert(column_block_size == B.column_block_size,
1010 ExcDimensionMismatch(column_block_size, B.column_block_size));
1011 Assert(row_block_size == B.row_block_size,
1012 ExcDimensionMismatch(row_block_size, B.row_block_size));
1013 }
1014 Assert(this->grid == B.grid,
1015 ExcMessage("The matrices A and B need to have the same process grid"));
1016
1017 if (this->grid->mpi_process_is_active)
1018 {
1019 char trans_b = transpose_B ? 'T' : 'N';
1020 NumberType *A_loc =
1021 (this->values.size() > 0) ? this->values.data() : nullptr;
1022 const NumberType *B_loc =
1023 (B.values.size() > 0) ? B.values.data() : nullptr;
1024
1025 pgeadd(&trans_b,
1026 &n_rows,
1027 &n_columns,
1028 &beta,
1029 B_loc,
1030 &B.submatrix_row,
1032 B.descriptor,
1033 &alpha,
1034 A_loc,
1035 &submatrix_row,
1036 &submatrix_column,
1037 descriptor);
1038 }
1039 state = LAPACKSupport::matrix;
1040}
1041
1042
1043
1044template <typename NumberType>
1045void
1048{
1049 add(B, 1, a, false);
1050}
1051
1052
1053
1054template <typename NumberType>
1055void
1058{
1059 add(B, 1, a, true);
1060}
1061
1062
1063
1064template <typename NumberType>
1065void
1068 const NumberType c,
1070 const bool transpose_A,
1071 const bool transpose_B) const
1072{
1073 Assert(this->grid == B.grid,
1074 ExcMessage("The matrices A and B need to have the same process grid"));
1075 Assert(C.grid == B.grid,
1076 ExcMessage("The matrices B and C need to have the same process grid"));
1077
1078 // see for further info:
1079 // https://www.ibm.com/support/knowledgecenter/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_lgemm.htm
1080 if (!transpose_A && !transpose_B)
1081 {
1082 Assert(this->n_columns == B.n_rows,
1083 ExcDimensionMismatch(this->n_columns, B.n_rows));
1084 Assert(this->n_rows == C.n_rows,
1085 ExcDimensionMismatch(this->n_rows, C.n_rows));
1086 Assert(B.n_columns == C.n_columns,
1087 ExcDimensionMismatch(B.n_columns, C.n_columns));
1088 Assert(this->row_block_size == C.row_block_size,
1089 ExcDimensionMismatch(this->row_block_size, C.row_block_size));
1090 Assert(this->column_block_size == B.row_block_size,
1091 ExcDimensionMismatch(this->column_block_size, B.row_block_size));
1092 Assert(B.column_block_size == C.column_block_size,
1093 ExcDimensionMismatch(B.column_block_size, C.column_block_size));
1094 }
1095 else if (transpose_A && !transpose_B)
1096 {
1097 Assert(this->n_rows == B.n_rows,
1098 ExcDimensionMismatch(this->n_rows, B.n_rows));
1099 Assert(this->n_columns == C.n_rows,
1100 ExcDimensionMismatch(this->n_columns, C.n_rows));
1101 Assert(B.n_columns == C.n_columns,
1102 ExcDimensionMismatch(B.n_columns, C.n_columns));
1103 Assert(this->column_block_size == C.row_block_size,
1104 ExcDimensionMismatch(this->column_block_size, C.row_block_size));
1105 Assert(this->row_block_size == B.row_block_size,
1106 ExcDimensionMismatch(this->row_block_size, B.row_block_size));
1107 Assert(B.column_block_size == C.column_block_size,
1108 ExcDimensionMismatch(B.column_block_size, C.column_block_size));
1109 }
1110 else if (!transpose_A && transpose_B)
1111 {
1112 Assert(this->n_columns == B.n_columns,
1113 ExcDimensionMismatch(this->n_columns, B.n_columns));
1114 Assert(this->n_rows == C.n_rows,
1115 ExcDimensionMismatch(this->n_rows, C.n_rows));
1116 Assert(B.n_rows == C.n_columns,
1117 ExcDimensionMismatch(B.n_rows, C.n_columns));
1118 Assert(this->row_block_size == C.row_block_size,
1119 ExcDimensionMismatch(this->row_block_size, C.row_block_size));
1120 Assert(this->column_block_size == B.column_block_size,
1121 ExcDimensionMismatch(this->column_block_size,
1123 Assert(B.row_block_size == C.column_block_size,
1124 ExcDimensionMismatch(B.row_block_size, C.column_block_size));
1125 }
1126 else // if (transpose_A && transpose_B)
1127 {
1128 Assert(this->n_rows == B.n_columns,
1129 ExcDimensionMismatch(this->n_rows, B.n_columns));
1130 Assert(this->n_columns == C.n_rows,
1131 ExcDimensionMismatch(this->n_columns, C.n_rows));
1132 Assert(B.n_rows == C.n_columns,
1133 ExcDimensionMismatch(B.n_rows, C.n_columns));
1134 Assert(this->column_block_size == C.row_block_size,
1135 ExcDimensionMismatch(this->row_block_size, C.row_block_size));
1136 Assert(this->row_block_size == B.column_block_size,
1137 ExcDimensionMismatch(this->column_block_size, B.row_block_size));
1138 Assert(B.row_block_size == C.column_block_size,
1139 ExcDimensionMismatch(B.column_block_size, C.column_block_size));
1140 }
1141
1142 if (this->grid->mpi_process_is_active)
1143 {
1144 char trans_a = transpose_A ? 'T' : 'N';
1145 char trans_b = transpose_B ? 'T' : 'N';
1146
1147 const NumberType *A_loc =
1148 (this->values.size() > 0) ? this->values.data() : nullptr;
1149 const NumberType *B_loc =
1150 (B.values.size() > 0) ? B.values.data() : nullptr;
1151 NumberType *C_loc = (C.values.size() > 0) ? C.values.data() : nullptr;
1152 int m = C.n_rows;
1153 int n = C.n_columns;
1154 int k = transpose_A ? this->n_rows : this->n_columns;
1155
1156 pgemm(&trans_a,
1157 &trans_b,
1158 &m,
1159 &n,
1160 &k,
1161 &b,
1162 A_loc,
1163 &(this->submatrix_row),
1164 &(this->submatrix_column),
1165 this->descriptor,
1166 B_loc,
1167 &B.submatrix_row,
1169 B.descriptor,
1170 &c,
1171 C_loc,
1172 &C.submatrix_row,
1173 &C.submatrix_column,
1174 C.descriptor);
1175 }
1176 C.state = LAPACKSupport::matrix;
1177}
1178
1179
1180
1181template <typename NumberType>
1182void
1185 const bool adding) const
1186{
1187 if (adding)
1188 mult(1., B, 1., C, false, false);
1189 else
1190 mult(1., B, 0, C, false, false);
1191}
1192
1193
1194
1195template <typename NumberType>
1196void
1199 const bool adding) const
1200{
1201 if (adding)
1202 mult(1., B, 1., C, true, false);
1203 else
1204 mult(1., B, 0, C, true, false);
1205}
1206
1207
1208
1209template <typename NumberType>
1210void
1213 const bool adding) const
1214{
1215 if (adding)
1216 mult(1., B, 1., C, false, true);
1217 else
1218 mult(1., B, 0, C, false, true);
1219}
1220
1221
1222
1223template <typename NumberType>
1224void
1227 const bool adding) const
1228{
1229 if (adding)
1230 mult(1., B, 1., C, true, true);
1231 else
1232 mult(1., B, 0, C, true, true);
1233}
1234
1235
1236
1237template <typename NumberType>
1238void
1240{
1241 Assert(
1242 n_columns == n_rows && property == LAPACKSupport::Property::symmetric,
1243 ExcMessage(
1244 "Cholesky factorization can be applied to symmetric matrices only."));
1246 ExcMessage(
1247 "Matrix has to be in Matrix state before calling this function."));
1248
1249 if (grid->mpi_process_is_active)
1250 {
1251 int info = 0;
1252 NumberType *A_loc = this->values.data();
1253 // pdpotrf_(&uplo,&n_columns,A_loc,&submatrix_row,&submatrix_column,descriptor,&info);
1254 ppotrf(&uplo,
1255 &n_columns,
1256 A_loc,
1257 &submatrix_row,
1258 &submatrix_column,
1259 descriptor,
1260 &info);
1261 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("ppotrf", info));
1262 }
1264 property = (uplo == 'L' ? LAPACKSupport::lower_triangular :
1266}
1267
1268
1269
1270template <typename NumberType>
1271void
1273{
1275 ExcMessage(
1276 "Matrix has to be in Matrix state before calling this function."));
1277
1278 if (grid->mpi_process_is_active)
1279 {
1280 int info = 0;
1281 NumberType *A_loc = this->values.data();
1282
1283 const int iarow = indxg2p_(&submatrix_row,
1284 &row_block_size,
1285 &(grid->this_process_row),
1286 &first_process_row,
1287 &(grid->n_process_rows));
1288 const int mp = numroc_(&n_rows,
1289 &row_block_size,
1290 &(grid->this_process_row),
1291 &iarow,
1292 &(grid->n_process_rows));
1293 ipiv.resize(mp + row_block_size);
1294
1295 pgetrf(&n_rows,
1296 &n_columns,
1297 A_loc,
1298 &submatrix_row,
1299 &submatrix_column,
1300 descriptor,
1301 ipiv.data(),
1302 &info);
1303 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("pgetrf", info));
1304 }
1307}
1308
1309
1310
1311template <typename NumberType>
1312void
1314{
1315 // Check whether matrix is symmetric and save flag.
1316 // If a Cholesky factorization has been applied previously,
1317 // the original matrix was symmetric.
1318 const bool is_symmetric = (property == LAPACKSupport::symmetric ||
1320
1321 // Check whether matrix is triangular and is in an unfactorized state.
1322 const bool is_triangular = (property == LAPACKSupport::upper_triangular ||
1323 property == LAPACKSupport::lower_triangular) &&
1324 (state == LAPACKSupport::State::matrix ||
1326
1327 if (is_triangular)
1328 {
1329 if (grid->mpi_process_is_active)
1330 {
1331 const char uploTriangular =
1332 property == LAPACKSupport::upper_triangular ? 'U' : 'L';
1333 const char diag = 'N';
1334 int info = 0;
1335 NumberType *A_loc = this->values.data();
1336 ptrtri(&uploTriangular,
1337 &diag,
1338 &n_columns,
1339 A_loc,
1340 &submatrix_row,
1341 &submatrix_column,
1342 descriptor,
1343 &info);
1344 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("ptrtri", info));
1345 // The inversion is stored in the same part as the triangular matrix,
1346 // so we don't need to re-set the property here.
1347 }
1348 }
1349 else
1350 {
1351 // Matrix is neither in Cholesky nor LU state.
1352 // Compute the required factorizations based on the property of the
1353 // matrix.
1354 if (!(state == LAPACKSupport::State::lu ||
1356 {
1357 if (is_symmetric)
1358 compute_cholesky_factorization();
1359 else
1360 compute_lu_factorization();
1361 }
1362 if (grid->mpi_process_is_active)
1363 {
1364 int info = 0;
1365 NumberType *A_loc = this->values.data();
1366
1367 if (is_symmetric)
1368 {
1369 ppotri(&uplo,
1370 &n_columns,
1371 A_loc,
1372 &submatrix_row,
1373 &submatrix_column,
1374 descriptor,
1375 &info);
1376 AssertThrow(info == 0,
1377 LAPACKSupport::ExcErrorCode("ppotri", info));
1379 }
1380 else
1381 {
1382 int lwork = -1, liwork = -1;
1383 work.resize(1);
1384 iwork.resize(1);
1385
1386 pgetri(&n_columns,
1387 A_loc,
1388 &submatrix_row,
1389 &submatrix_column,
1390 descriptor,
1391 ipiv.data(),
1392 work.data(),
1393 &lwork,
1394 iwork.data(),
1395 &liwork,
1396 &info);
1397
1398 AssertThrow(info == 0,
1399 LAPACKSupport::ExcErrorCode("pgetri", info));
1400 lwork = static_cast<int>(work[0]);
1401 liwork = iwork[0];
1402 work.resize(lwork);
1403 iwork.resize(liwork);
1404
1405 pgetri(&n_columns,
1406 A_loc,
1407 &submatrix_row,
1408 &submatrix_column,
1409 descriptor,
1410 ipiv.data(),
1411 work.data(),
1412 &lwork,
1413 iwork.data(),
1414 &liwork,
1415 &info);
1416
1417 AssertThrow(info == 0,
1418 LAPACKSupport::ExcErrorCode("pgetri", info));
1419 }
1420 }
1421 }
1423}
1424
1425
1426
1427template <typename NumberType>
1428std::vector<NumberType>
1430 const std::pair<unsigned int, unsigned int> &index_limits,
1431 const bool compute_eigenvectors)
1432{
1433 // check validity of index limits
1434 AssertIndexRange(index_limits.first, n_rows);
1435 AssertIndexRange(index_limits.second, n_rows);
1436
1437 std::pair<unsigned int, unsigned int> idx =
1438 std::make_pair(std::min(index_limits.first, index_limits.second),
1439 std::max(index_limits.first, index_limits.second));
1440
1441 // compute all eigenvalues/eigenvectors
1442 if (idx.first == 0 && idx.second == static_cast<unsigned int>(n_rows - 1))
1443 return eigenpairs_symmetric(compute_eigenvectors);
1444 else
1445 return eigenpairs_symmetric(compute_eigenvectors, idx);
1446}
1447
1448
1449
1450template <typename NumberType>
1451std::vector<NumberType>
1453 const std::pair<NumberType, NumberType> &value_limits,
1454 const bool compute_eigenvectors)
1455{
1456 Assert(!std::isnan(value_limits.first),
1457 ExcMessage("value_limits.first is NaN"));
1458 Assert(!std::isnan(value_limits.second),
1459 ExcMessage("value_limits.second is NaN"));
1460
1461 std::pair<unsigned int, unsigned int> indices =
1462 std::make_pair(numbers::invalid_unsigned_int,
1464
1465 return eigenpairs_symmetric(compute_eigenvectors, indices, value_limits);
1466}
1467
1468
1469
1470template <typename NumberType>
1471std::vector<NumberType>
1473 const bool compute_eigenvectors,
1474 const std::pair<unsigned int, unsigned int> &eigenvalue_idx,
1475 const std::pair<NumberType, NumberType> &eigenvalue_limits)
1476{
1478 ExcMessage(
1479 "Matrix has to be in Matrix state before calling this function."));
1480 Assert(property == LAPACKSupport::symmetric,
1481 ExcMessage("Matrix has to be symmetric for this operation."));
1482
1483 std::lock_guard<std::mutex> lock(mutex);
1484
1485 const bool use_values = (std::isnan(eigenvalue_limits.first) ||
1486 std::isnan(eigenvalue_limits.second)) ?
1487 false :
1488 true;
1489 const bool use_indices =
1490 ((eigenvalue_idx.first == numbers::invalid_unsigned_int) ||
1491 (eigenvalue_idx.second == numbers::invalid_unsigned_int)) ?
1492 false :
1493 true;
1494
1495 Assert(
1496 !(use_values && use_indices),
1497 ExcMessage(
1498 "Prescribing both the index and value range for the eigenvalues is ambiguous"));
1499
1500 // if computation of eigenvectors is not required use a sufficiently small
1501 // distributed matrix
1502 std::unique_ptr<ScaLAPACKMatrix<NumberType>> eigenvectors =
1503 compute_eigenvectors ?
1504 std::make_unique<ScaLAPACKMatrix<NumberType>>(n_rows,
1505 grid,
1506 row_block_size) :
1507 std::make_unique<ScaLAPACKMatrix<NumberType>>(
1508 grid->n_process_rows, grid->n_process_columns, grid, 1, 1);
1509
1510 eigenvectors->property = property;
1511 // number of eigenvalues to be returned from psyevx; upon successful exit ev
1512 // contains the m seclected eigenvalues in ascending order set to all
1513 // eigenvaleus in case we will be using psyev.
1514 int m = n_rows;
1515 std::vector<NumberType> ev(n_rows);
1516
1517 if (grid->mpi_process_is_active)
1518 {
1519 int info = 0;
1520 /*
1521 * for jobz==N only eigenvalues are computed, for jobz='V' also the
1522 * eigenvectors of the matrix are computed
1523 */
1524 char jobz = compute_eigenvectors ? 'V' : 'N';
1525 char range = 'A';
1526 // default value is to compute all eigenvalues and optionally eigenvectors
1527 bool all_eigenpairs = true;
1528 NumberType vl = NumberType(), vu = NumberType();
1529 int il = 1, iu = 1;
1530 // number of eigenvectors to be returned;
1531 // upon successful exit the first m=nz columns contain the selected
1532 // eigenvectors (only if jobz=='V')
1533 int nz = 0;
1534 NumberType abstol = NumberType();
1535
1536 // orfac decides which eigenvectors should be reorthogonalized
1537 // see
1538 // http://www.netlib.org/scalapack/explore-html/df/d1a/pdsyevx_8f_source.html
1539 // for explanation to keeps simple no reorthogonalized will be done by
1540 // setting orfac to 0
1541 NumberType orfac = 0;
1542 // contains the indices of eigenvectors that failed to converge
1543 std::vector<int> ifail;
1544 // This array contains indices of eigenvectors corresponding to
1545 // a cluster of eigenvalues that could not be reorthogonalized
1546 // due to insufficient workspace
1547 // see
1548 // http://www.netlib.org/scalapack/explore-html/df/d1a/pdsyevx_8f_source.html
1549 // for explanation
1550 std::vector<int> iclustr;
1551 // This array contains the gap between eigenvalues whose
1552 // eigenvectors could not be reorthogonalized.
1553 // see
1554 // http://www.netlib.org/scalapack/explore-html/df/d1a/pdsyevx_8f_source.html
1555 // for explanation
1556 std::vector<NumberType> gap(n_local_rows * n_local_columns);
1557
1558 // index range for eigenvalues is not specified
1559 if (!use_indices)
1560 {
1561 // interval for eigenvalues is not specified and consequently all
1562 // eigenvalues/eigenpairs will be computed
1563 if (!use_values)
1564 {
1565 range = 'A';
1566 all_eigenpairs = true;
1567 }
1568 else
1569 {
1570 range = 'V';
1571 all_eigenpairs = false;
1572 vl = std::min(eigenvalue_limits.first, eigenvalue_limits.second);
1573 vu = std::max(eigenvalue_limits.first, eigenvalue_limits.second);
1574 }
1575 }
1576 else
1577 {
1578 range = 'I';
1579 all_eigenpairs = false;
1580 // as Fortran starts counting/indexing from 1 unlike C/C++, where it
1581 // starts from 0
1582 il = std::min(eigenvalue_idx.first, eigenvalue_idx.second) + 1;
1583 iu = std::max(eigenvalue_idx.first, eigenvalue_idx.second) + 1;
1584 }
1585 NumberType *A_loc = this->values.data();
1586 /*
1587 * by setting lwork to -1 a workspace query for optimal length of work is
1588 * performed
1589 */
1590 int lwork = -1;
1591 int liwork = -1;
1592 NumberType *eigenvectors_loc =
1593 (compute_eigenvectors ? eigenvectors->values.data() : nullptr);
1594 /*
1595 * According to the official "documentation" found on the internet
1596 * (aka source file ppsyevx.f [1]) the work array has to have a
1597 * minimal size of max(3, lwork). Because we query for optimal size
1598 * (lwork == -1) we have to guarantee at least three doubles. The
1599 * necessary size of iwork is not specified, so let's use three as
1600 * well.
1601 * [1]
1602 * https://netlib.org/scalapack/explore-html/df/d1a/pdsyevx_8f_source.html
1603 */
1604 work.resize(3);
1605 iwork.resize(3);
1606
1607 if (all_eigenpairs)
1608 {
1609 psyev(&jobz,
1610 &uplo,
1611 &n_rows,
1612 A_loc,
1613 &submatrix_row,
1614 &submatrix_column,
1615 descriptor,
1616 ev.data(),
1617 eigenvectors_loc,
1618 &eigenvectors->submatrix_row,
1619 &eigenvectors->submatrix_column,
1620 eigenvectors->descriptor,
1621 work.data(),
1622 &lwork,
1623 &info);
1624 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("psyev", info));
1625 }
1626 else
1627 {
1628 char cmach = compute_eigenvectors ? 'U' : 'S';
1629 plamch(&(this->grid->blacs_context), &cmach, abstol);
1630 abstol *= 2;
1631 ifail.resize(n_rows);
1632 iclustr.resize(2 * grid->n_process_rows * grid->n_process_columns);
1633 gap.resize(grid->n_process_rows * grid->n_process_columns);
1634
1635 psyevx(&jobz,
1636 &range,
1637 &uplo,
1638 &n_rows,
1639 A_loc,
1640 &submatrix_row,
1641 &submatrix_column,
1642 descriptor,
1643 &vl,
1644 &vu,
1645 &il,
1646 &iu,
1647 &abstol,
1648 &m,
1649 &nz,
1650 ev.data(),
1651 &orfac,
1652 eigenvectors_loc,
1653 &eigenvectors->submatrix_row,
1654 &eigenvectors->submatrix_column,
1655 eigenvectors->descriptor,
1656 work.data(),
1657 &lwork,
1658 iwork.data(),
1659 &liwork,
1660 ifail.data(),
1661 iclustr.data(),
1662 gap.data(),
1663 &info);
1664 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("psyevx", info));
1665 }
1666 lwork = static_cast<int>(work[0]);
1667 work.resize(lwork);
1668
1669 if (all_eigenpairs)
1670 {
1671 psyev(&jobz,
1672 &uplo,
1673 &n_rows,
1674 A_loc,
1675 &submatrix_row,
1676 &submatrix_column,
1677 descriptor,
1678 ev.data(),
1679 eigenvectors_loc,
1680 &eigenvectors->submatrix_row,
1681 &eigenvectors->submatrix_column,
1682 eigenvectors->descriptor,
1683 work.data(),
1684 &lwork,
1685 &info);
1686
1687 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("psyev", info));
1688 }
1689 else
1690 {
1691 liwork = iwork[0];
1692 AssertThrow(liwork > 0, ExcInternalError());
1693 iwork.resize(liwork);
1694
1695 psyevx(&jobz,
1696 &range,
1697 &uplo,
1698 &n_rows,
1699 A_loc,
1700 &submatrix_row,
1701 &submatrix_column,
1702 descriptor,
1703 &vl,
1704 &vu,
1705 &il,
1706 &iu,
1707 &abstol,
1708 &m,
1709 &nz,
1710 ev.data(),
1711 &orfac,
1712 eigenvectors_loc,
1713 &eigenvectors->submatrix_row,
1714 &eigenvectors->submatrix_column,
1715 eigenvectors->descriptor,
1716 work.data(),
1717 &lwork,
1718 iwork.data(),
1719 &liwork,
1720 ifail.data(),
1721 iclustr.data(),
1722 gap.data(),
1723 &info);
1724
1725 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("psyevx", info));
1726 }
1727 // if eigenvectors are queried copy eigenvectors to original matrix
1728 // as the temporary matrix eigenvectors has identical dimensions and
1729 // block-cyclic distribution we simply swap the local array
1730 if (compute_eigenvectors)
1731 this->values.swap(eigenvectors->values);
1732
1733 // adapt the size of ev to fit m upon return
1734 while (ev.size() > static_cast<size_type>(m))
1735 ev.pop_back();
1736 }
1737 /*
1738 * send number of computed eigenvalues to inactive processes
1739 */
1740 grid->send_to_inactive(&m, 1);
1741
1742 /*
1743 * inactive processes have to resize array of eigenvalues
1744 */
1745 if (!grid->mpi_process_is_active)
1746 ev.resize(m);
1747 /*
1748 * send the eigenvalues to processors not being part of the process grid
1749 */
1750 grid->send_to_inactive(ev.data(), ev.size());
1751
1752 /*
1753 * if only eigenvalues are queried the content of the matrix will be destroyed
1754 * if the eigenpairs are queried matrix A on exit stores the eigenvectors in
1755 * the columns
1756 */
1757 if (compute_eigenvectors)
1758 {
1761 }
1762 else
1764
1765 return ev;
1766}
1767
1768
1769
1770template <typename NumberType>
1771std::vector<NumberType>
1773 const std::pair<unsigned int, unsigned int> &index_limits,
1774 const bool compute_eigenvectors)
1775{
1776 // Check validity of index limits.
1777 AssertIndexRange(index_limits.first, static_cast<unsigned int>(n_rows));
1778 AssertIndexRange(index_limits.second, static_cast<unsigned int>(n_rows));
1779
1780 const std::pair<unsigned int, unsigned int> idx =
1781 std::make_pair(std::min(index_limits.first, index_limits.second),
1782 std::max(index_limits.first, index_limits.second));
1783
1784 // Compute all eigenvalues/eigenvectors.
1785 if (idx.first == 0 && idx.second == static_cast<unsigned int>(n_rows - 1))
1786 return eigenpairs_symmetric_MRRR(compute_eigenvectors);
1787 else
1788 return eigenpairs_symmetric_MRRR(compute_eigenvectors, idx);
1789}
1790
1791
1792
1793template <typename NumberType>
1794std::vector<NumberType>
1796 const std::pair<NumberType, NumberType> &value_limits,
1797 const bool compute_eigenvectors)
1798{
1799 AssertIsFinite(value_limits.first);
1800 AssertIsFinite(value_limits.second);
1801
1802 const std::pair<unsigned int, unsigned int> indices =
1803 std::make_pair(numbers::invalid_unsigned_int,
1805
1806 return eigenpairs_symmetric_MRRR(compute_eigenvectors, indices, value_limits);
1807}
1808
1809
1810
1811template <typename NumberType>
1812std::vector<NumberType>
1814 const bool compute_eigenvectors,
1815 const std::pair<unsigned int, unsigned int> &eigenvalue_idx,
1816 const std::pair<NumberType, NumberType> &eigenvalue_limits)
1817{
1819 ExcMessage(
1820 "Matrix has to be in Matrix state before calling this function."));
1821 Assert(property == LAPACKSupport::symmetric,
1822 ExcMessage("Matrix has to be symmetric for this operation."));
1823
1824 std::lock_guard<std::mutex> lock(mutex);
1825
1826 const bool use_values = (std::isnan(eigenvalue_limits.first) ||
1827 std::isnan(eigenvalue_limits.second)) ?
1828 false :
1829 true;
1830 const bool use_indices =
1831 ((eigenvalue_idx.first == numbers::invalid_unsigned_int) ||
1832 (eigenvalue_idx.second == numbers::invalid_unsigned_int)) ?
1833 false :
1834 true;
1835
1836 Assert(
1837 !(use_values && use_indices),
1838 ExcMessage(
1839 "Prescribing both the index and value range for the eigenvalues is ambiguous"));
1840
1841 // If computation of eigenvectors is not required, use a sufficiently small
1842 // distributed matrix.
1843 std::unique_ptr<ScaLAPACKMatrix<NumberType>> eigenvectors =
1844 compute_eigenvectors ?
1845 std::make_unique<ScaLAPACKMatrix<NumberType>>(n_rows,
1846 grid,
1847 row_block_size) :
1848 std::make_unique<ScaLAPACKMatrix<NumberType>>(
1849 grid->n_process_rows, grid->n_process_columns, grid, 1, 1);
1850
1851 eigenvectors->property = property;
1852 // Number of eigenvalues to be returned from psyevr; upon successful exit ev
1853 // contains the m seclected eigenvalues in ascending order.
1854 int m = n_rows;
1855 std::vector<NumberType> ev(n_rows);
1856
1857 // Number of eigenvectors to be returned;
1858 // Upon successful exit the first m=nz columns contain the selected
1859 // eigenvectors (only if jobz=='V').
1860 int nz = 0;
1861
1862 if (grid->mpi_process_is_active)
1863 {
1864 int info = 0;
1865 /*
1866 * For jobz==N only eigenvalues are computed, for jobz='V' also the
1867 * eigenvectors of the matrix are computed.
1868 */
1869 char jobz = compute_eigenvectors ? 'V' : 'N';
1870 // Default value is to compute all eigenvalues and optionally
1871 // eigenvectors.
1872 char range = 'A';
1873 NumberType vl = NumberType(), vu = NumberType();
1874 int il = 1, iu = 1;
1875
1876 // Index range for eigenvalues is not specified.
1877 if (!use_indices)
1878 {
1879 // Interval for eigenvalues is not specified and consequently all
1880 // eigenvalues/eigenpairs will be computed.
1881 if (!use_values)
1882 {
1883 range = 'A';
1884 }
1885 else
1886 {
1887 range = 'V';
1888 vl = std::min(eigenvalue_limits.first, eigenvalue_limits.second);
1889 vu = std::max(eigenvalue_limits.first, eigenvalue_limits.second);
1890 }
1891 }
1892 else
1893 {
1894 range = 'I';
1895 // As Fortran starts counting/indexing from 1 unlike C/C++, where it
1896 // starts from 0.
1897 il = std::min(eigenvalue_idx.first, eigenvalue_idx.second) + 1;
1898 iu = std::max(eigenvalue_idx.first, eigenvalue_idx.second) + 1;
1899 }
1900 NumberType *A_loc = this->values.data();
1901
1902 /*
1903 * By setting lwork to -1 a workspace query for optimal length of work is
1904 * performed.
1905 */
1906 int lwork = -1;
1907 int liwork = -1;
1908 NumberType *eigenvectors_loc =
1909 (compute_eigenvectors ? eigenvectors->values.data() : nullptr);
1910 work.resize(1);
1911 iwork.resize(1);
1912
1913 psyevr(&jobz,
1914 &range,
1915 &uplo,
1916 &n_rows,
1917 A_loc,
1918 &submatrix_row,
1919 &submatrix_column,
1920 descriptor,
1921 &vl,
1922 &vu,
1923 &il,
1924 &iu,
1925 &m,
1926 &nz,
1927 ev.data(),
1928 eigenvectors_loc,
1929 &eigenvectors->submatrix_row,
1930 &eigenvectors->submatrix_column,
1931 eigenvectors->descriptor,
1932 work.data(),
1933 &lwork,
1934 iwork.data(),
1935 &liwork,
1936 &info);
1937
1938 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("psyevr", info));
1939
1940 lwork = static_cast<int>(work[0]);
1941 work.resize(lwork);
1942 liwork = iwork[0];
1943 iwork.resize(liwork);
1944
1945 psyevr(&jobz,
1946 &range,
1947 &uplo,
1948 &n_rows,
1949 A_loc,
1950 &submatrix_row,
1951 &submatrix_column,
1952 descriptor,
1953 &vl,
1954 &vu,
1955 &il,
1956 &iu,
1957 &m,
1958 &nz,
1959 ev.data(),
1960 eigenvectors_loc,
1961 &eigenvectors->submatrix_row,
1962 &eigenvectors->submatrix_column,
1963 eigenvectors->descriptor,
1964 work.data(),
1965 &lwork,
1966 iwork.data(),
1967 &liwork,
1968 &info);
1969
1970 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("psyevr", info));
1971
1972 if (compute_eigenvectors)
1974 m == nz,
1975 ExcMessage(
1976 "psyevr failed to compute all eigenvectors for the selected eigenvalues"));
1977
1978 // If eigenvectors are queried, copy eigenvectors to original matrix.
1979 // As the temporary matrix eigenvectors has identical dimensions and
1980 // block-cyclic distribution we simply swap the local array.
1981 if (compute_eigenvectors)
1982 this->values.swap(eigenvectors->values);
1983
1984 // Adapt the size of ev to fit m upon return.
1985 while (ev.size() > static_cast<size_type>(m))
1986 ev.pop_back();
1987 }
1988 /*
1989 * Send number of computed eigenvalues to inactive processes.
1990 */
1991 grid->send_to_inactive(&m, 1);
1992
1993 /*
1994 * Inactive processes have to resize array of eigenvalues.
1995 */
1996 if (!grid->mpi_process_is_active)
1997 ev.resize(m);
1998 /*
1999 * Send the eigenvalues to processors not being part of the process grid.
2000 */
2001 grid->send_to_inactive(ev.data(), ev.size());
2002
2003 /*
2004 * If only eigenvalues are queried, the content of the matrix will be
2005 * destroyed. If the eigenpairs are queried, matrix A on exit stores the
2006 * eigenvectors in the columns.
2007 */
2008 if (compute_eigenvectors)
2009 {
2012 }
2013 else
2015
2016 return ev;
2017}
2018
2019
2020
2021template <typename NumberType>
2022std::vector<NumberType>
2025{
2027 ExcMessage(
2028 "Matrix has to be in Matrix state before calling this function."));
2029 Assert(row_block_size == column_block_size,
2030 ExcDimensionMismatch(row_block_size, column_block_size));
2031
2032 const bool left_singular_vectors = (U != nullptr) ? true : false;
2033 const bool right_singular_vectors = (VT != nullptr) ? true : false;
2034
2035 if (left_singular_vectors)
2036 {
2037 Assert(n_rows == U->n_rows, ExcDimensionMismatch(n_rows, U->n_rows));
2038 Assert(U->n_rows == U->n_columns,
2040 Assert(row_block_size == U->row_block_size,
2041 ExcDimensionMismatch(row_block_size, U->row_block_size));
2042 Assert(column_block_size == U->column_block_size,
2043 ExcDimensionMismatch(column_block_size, U->column_block_size));
2044 Assert(grid->blacs_context == U->grid->blacs_context,
2045 ExcDimensionMismatch(grid->blacs_context, U->grid->blacs_context));
2046 }
2047 if (right_singular_vectors)
2048 {
2049 Assert(n_columns == VT->n_rows,
2050 ExcDimensionMismatch(n_columns, VT->n_rows));
2051 Assert(VT->n_rows == VT->n_columns,
2053 Assert(row_block_size == VT->row_block_size,
2054 ExcDimensionMismatch(row_block_size, VT->row_block_size));
2055 Assert(column_block_size == VT->column_block_size,
2056 ExcDimensionMismatch(column_block_size, VT->column_block_size));
2057 Assert(grid->blacs_context == VT->grid->blacs_context,
2058 ExcDimensionMismatch(grid->blacs_context,
2059 VT->grid->blacs_context));
2060 }
2061 std::lock_guard<std::mutex> lock(mutex);
2062
2063 std::vector<NumberType> sv(std::min(n_rows, n_columns));
2064
2065 if (grid->mpi_process_is_active)
2066 {
2067 char jobu = left_singular_vectors ? 'V' : 'N';
2068 char jobvt = right_singular_vectors ? 'V' : 'N';
2069 NumberType *A_loc = this->values.data();
2070 NumberType *U_loc = left_singular_vectors ? U->values.data() : nullptr;
2071 NumberType *VT_loc = right_singular_vectors ? VT->values.data() : nullptr;
2072 int info = 0;
2073 /*
2074 * by setting lwork to -1 a workspace query for optimal length of work is
2075 * performed
2076 */
2077 int lwork = -1;
2078 work.resize(1);
2079
2080 pgesvd(&jobu,
2081 &jobvt,
2082 &n_rows,
2083 &n_columns,
2084 A_loc,
2085 &submatrix_row,
2086 &submatrix_column,
2087 descriptor,
2088 &*sv.begin(),
2089 U_loc,
2090 &U->submatrix_row,
2091 &U->submatrix_column,
2092 U->descriptor,
2093 VT_loc,
2094 &VT->submatrix_row,
2095 &VT->submatrix_column,
2096 VT->descriptor,
2097 work.data(),
2098 &lwork,
2099 &info);
2100 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("pgesvd", info));
2101
2102 lwork = static_cast<int>(work[0]);
2103 work.resize(lwork);
2104
2105 pgesvd(&jobu,
2106 &jobvt,
2107 &n_rows,
2108 &n_columns,
2109 A_loc,
2110 &submatrix_row,
2111 &submatrix_column,
2112 descriptor,
2113 &*sv.begin(),
2114 U_loc,
2115 &U->submatrix_row,
2116 &U->submatrix_column,
2117 U->descriptor,
2118 VT_loc,
2119 &VT->submatrix_row,
2120 &VT->submatrix_column,
2121 VT->descriptor,
2122 work.data(),
2123 &lwork,
2124 &info);
2125 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("pgesvd", info));
2126 }
2127
2128 /*
2129 * send the singular values to processors not being part of the process grid
2130 */
2131 grid->send_to_inactive(sv.data(), sv.size());
2132
2135
2136 return sv;
2137}
2138
2139
2140
2141template <typename NumberType>
2142void
2144 const bool transpose)
2145{
2146 Assert(grid == B.grid,
2147 ExcMessage("The matrices A and B need to have the same process grid"));
2149 ExcMessage(
2150 "Matrix has to be in Matrix state before calling this function."));
2152 ExcMessage(
2153 "Matrix B has to be in Matrix state before calling this function."));
2154
2155 if (transpose)
2156 {
2157 Assert(n_columns == B.n_rows, ExcDimensionMismatch(n_columns, B.n_rows));
2158 }
2159 else
2160 {
2161 Assert(n_rows == B.n_rows, ExcDimensionMismatch(n_rows, B.n_rows));
2162 }
2163
2164 // see
2165 // https://www.ibm.com/support/knowledgecenter/en/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_lgels.htm
2166 Assert(row_block_size == column_block_size,
2167 ExcMessage(
2168 "Use identical block sizes for rows and columns of matrix A"));
2170 ExcMessage(
2171 "Use identical block sizes for rows and columns of matrix B"));
2172 Assert(row_block_size == B.row_block_size,
2173 ExcMessage(
2174 "Use identical block-cyclic distribution for matrices A and B"));
2175
2176 std::lock_guard<std::mutex> lock(mutex);
2177
2178 if (grid->mpi_process_is_active)
2179 {
2180 char trans = transpose ? 'T' : 'N';
2181 NumberType *A_loc = this->values.data();
2182 NumberType *B_loc = B.values.data();
2183 int info = 0;
2184 /*
2185 * by setting lwork to -1 a workspace query for optimal length of work is
2186 * performed
2187 */
2188 int lwork = -1;
2189 work.resize(1);
2190
2191 pgels(&trans,
2192 &n_rows,
2193 &n_columns,
2194 &B.n_columns,
2195 A_loc,
2196 &submatrix_row,
2197 &submatrix_column,
2198 descriptor,
2199 B_loc,
2200 &B.submatrix_row,
2202 B.descriptor,
2203 work.data(),
2204 &lwork,
2205 &info);
2206 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("pgels", info));
2207
2208 lwork = static_cast<int>(work[0]);
2209 work.resize(lwork);
2210
2211 pgels(&trans,
2212 &n_rows,
2213 &n_columns,
2214 &B.n_columns,
2215 A_loc,
2216 &submatrix_row,
2217 &submatrix_column,
2218 descriptor,
2219 B_loc,
2220 &B.submatrix_row,
2222 B.descriptor,
2223 work.data(),
2224 &lwork,
2225 &info);
2226 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("pgels", info));
2227 }
2229}
2230
2231
2232
2233template <typename NumberType>
2234unsigned int
2236{
2238 ExcMessage(
2239 "Matrix has to be in Matrix state before calling this function."));
2240 Assert(row_block_size == column_block_size,
2241 ExcMessage(
2242 "Use identical block sizes for rows and columns of matrix A"));
2243 Assert(
2244 ratio > 0. && ratio < 1.,
2245 ExcMessage(
2246 "input parameter ratio has to be larger than zero and smaller than 1"));
2247
2249 n_rows,
2250 grid,
2251 row_block_size,
2252 row_block_size,
2254 ScaLAPACKMatrix<NumberType> VT(n_columns,
2255 n_columns,
2256 grid,
2257 row_block_size,
2258 row_block_size,
2260 std::vector<NumberType> sv = this->compute_SVD(&U, &VT);
2261 AssertThrow(sv[0] > std::numeric_limits<NumberType>::min(),
2262 ExcMessage("Matrix has rank 0"));
2263
2264 // Get number of singular values fulfilling the following: sv[i] > sv[0] *
2265 // ratio Obviously, 0-th element already satisfies sv[0] > sv[0] * ratio The
2266 // singular values in sv are ordered by descending value so we break out of
2267 // the loop if a singular value is smaller than sv[0] * ratio.
2268 unsigned int n_sv = 1;
2269 std::vector<NumberType> inv_sigma;
2270 inv_sigma.push_back(1 / sv[0]);
2271
2272 for (unsigned int i = 1; i < sv.size(); ++i)
2273 if (sv[i] > sv[0] * ratio)
2274 {
2275 ++n_sv;
2276 inv_sigma.push_back(1 / sv[i]);
2277 }
2278 else
2279 break;
2280
2281 // For the matrix multiplication we use only the columns of U and rows of VT
2282 // which are associated with singular values larger than the limit. That saves
2283 // computational time for matrices with rank significantly smaller than
2284 // min(n_rows,n_columns)
2285 ScaLAPACKMatrix<NumberType> U_R(n_rows,
2286 n_sv,
2287 grid,
2288 row_block_size,
2289 row_block_size,
2292 n_columns,
2293 grid,
2294 row_block_size,
2295 row_block_size,
2297 U.copy_to(U_R,
2298 std::make_pair(0, 0),
2299 std::make_pair(0, 0),
2300 std::make_pair(n_rows, n_sv));
2301 VT.copy_to(VT_R,
2302 std::make_pair(0, 0),
2303 std::make_pair(0, 0),
2304 std::make_pair(n_sv, n_columns));
2305
2306 VT_R.scale_rows(inv_sigma);
2307 this->reinit(n_columns,
2308 n_rows,
2309 this->grid,
2310 row_block_size,
2311 column_block_size,
2313 VT_R.mult(1, U_R, 0, *this, true, true);
2315 return n_sv;
2316}
2317
2318
2319
2320template <typename NumberType>
2321NumberType
2323 const NumberType a_norm) const
2324{
2326 ExcMessage(
2327 "Matrix has to be in Cholesky state before calling this function."));
2328 std::lock_guard<std::mutex> lock(mutex);
2329 NumberType rcond = 0.;
2330
2331 if (grid->mpi_process_is_active)
2332 {
2333 int liwork = n_local_rows;
2334 iwork.resize(liwork);
2335
2336 int info = 0;
2337 const NumberType *A_loc = this->values.data();
2338
2339 // by setting lwork to -1 a workspace query for optimal length of work is
2340 // performed
2341 int lwork = -1;
2342 work.resize(1);
2343 ppocon(&uplo,
2344 &n_columns,
2345 A_loc,
2346 &submatrix_row,
2347 &submatrix_column,
2348 descriptor,
2349 &a_norm,
2350 &rcond,
2351 work.data(),
2352 &lwork,
2353 iwork.data(),
2354 &liwork,
2355 &info);
2356 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("pdpocon", info));
2357 lwork = static_cast<int>(std::ceil(work[0]));
2358 work.resize(lwork);
2359
2360 // now the actual run:
2361 ppocon(&uplo,
2362 &n_columns,
2363 A_loc,
2364 &submatrix_row,
2365 &submatrix_column,
2366 descriptor,
2367 &a_norm,
2368 &rcond,
2369 work.data(),
2370 &lwork,
2371 iwork.data(),
2372 &liwork,
2373 &info);
2374 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("pdpocon", info));
2375 }
2376 grid->send_to_inactive(&rcond);
2377 return rcond;
2378}
2379
2380
2381
2382template <typename NumberType>
2383NumberType
2385{
2386 const char type('O');
2387
2388 if (property == LAPACKSupport::symmetric)
2389 return norm_symmetric(type);
2390 else
2391 return norm_general(type);
2392}
2393
2394
2395
2396template <typename NumberType>
2397NumberType
2399{
2400 const char type('I');
2401
2402 if (property == LAPACKSupport::symmetric)
2403 return norm_symmetric(type);
2404 else
2405 return norm_general(type);
2406}
2407
2408
2409
2410template <typename NumberType>
2411NumberType
2413{
2414 const char type('F');
2415
2416 if (property == LAPACKSupport::symmetric)
2417 return norm_symmetric(type);
2418 else
2419 return norm_general(type);
2420}
2421
2422
2423
2424template <typename NumberType>
2425NumberType
2427{
2428 Assert(state == LAPACKSupport::matrix ||
2430 ExcMessage("norms can be called in matrix state only."));
2431 std::lock_guard<std::mutex> lock(mutex);
2432 NumberType res = 0.;
2433
2434 if (grid->mpi_process_is_active)
2435 {
2436 const int iarow = indxg2p_(&submatrix_row,
2437 &row_block_size,
2438 &(grid->this_process_row),
2439 &first_process_row,
2440 &(grid->n_process_rows));
2441 const int iacol = indxg2p_(&submatrix_column,
2442 &column_block_size,
2443 &(grid->this_process_column),
2444 &first_process_column,
2445 &(grid->n_process_columns));
2446 const int mp0 = numroc_(&n_rows,
2447 &row_block_size,
2448 &(grid->this_process_row),
2449 &iarow,
2450 &(grid->n_process_rows));
2451 const int nq0 = numroc_(&n_columns,
2452 &column_block_size,
2453 &(grid->this_process_column),
2454 &iacol,
2455 &(grid->n_process_columns));
2456
2457 // type='M': compute largest absolute value
2458 // type='F' || type='E': compute Frobenius norm
2459 // type='0' || type='1': compute infinity norm
2460 int lwork = 0; // for type == 'M' || type == 'F' || type == 'E'
2461 if (type == 'O' || type == '1')
2462 lwork = nq0;
2463 else if (type == 'I')
2464 lwork = mp0;
2465
2466 work.resize(lwork);
2467 const NumberType *A_loc = this->values.begin();
2468 res = plange(&type,
2469 &n_rows,
2470 &n_columns,
2471 A_loc,
2472 &submatrix_row,
2473 &submatrix_column,
2474 descriptor,
2475 work.data());
2476 }
2477 grid->send_to_inactive(&res);
2478 return res;
2479}
2480
2481
2482
2483template <typename NumberType>
2484NumberType
2486{
2487 Assert(state == LAPACKSupport::matrix ||
2489 ExcMessage("norms can be called in matrix state only."));
2490 Assert(property == LAPACKSupport::symmetric,
2491 ExcMessage("Matrix has to be symmetric for this operation."));
2492 std::lock_guard<std::mutex> lock(mutex);
2493 NumberType res = 0.;
2494
2495 if (grid->mpi_process_is_active)
2496 {
2497 // int IROFFA = MOD( IA-1, MB_A )
2498 // int ICOFFA = MOD( JA-1, NB_A )
2499 const int lcm =
2500 ilcm_(&(grid->n_process_rows), &(grid->n_process_columns));
2501 const int v2 = lcm / (grid->n_process_rows);
2502
2503 const int IAROW = indxg2p_(&submatrix_row,
2504 &row_block_size,
2505 &(grid->this_process_row),
2506 &first_process_row,
2507 &(grid->n_process_rows));
2508 const int IACOL = indxg2p_(&submatrix_column,
2509 &column_block_size,
2510 &(grid->this_process_column),
2511 &first_process_column,
2512 &(grid->n_process_columns));
2513 const int Np0 = numroc_(&n_columns /*+IROFFA*/,
2514 &row_block_size,
2515 &(grid->this_process_row),
2516 &IAROW,
2517 &(grid->n_process_rows));
2518 const int Nq0 = numroc_(&n_columns /*+ICOFFA*/,
2519 &column_block_size,
2520 &(grid->this_process_column),
2521 &IACOL,
2522 &(grid->n_process_columns));
2523
2524 const int v1 = iceil_(&Np0, &row_block_size);
2525 const int ldw = (n_local_rows == n_local_columns) ?
2526 0 :
2527 row_block_size * iceil_(&v1, &v2);
2528
2529 const int lwork =
2530 (type == 'M' || type == 'F' || type == 'E') ? 0 : 2 * Nq0 + Np0 + ldw;
2531 work.resize(lwork);
2532 const NumberType *A_loc = this->values.begin();
2533 res = plansy(&type,
2534 &uplo,
2535 &n_columns,
2536 A_loc,
2537 &submatrix_row,
2538 &submatrix_column,
2539 descriptor,
2540 work.data());
2541 }
2542 grid->send_to_inactive(&res);
2543 return res;
2544}
2545
2546
2547
2548# ifdef DEAL_II_WITH_HDF5
2549namespace internal
2550{
2551 namespace
2552 {
2553 void
2554 create_HDF5_state_enum_id(hid_t &state_enum_id)
2555 {
2556 // create HDF5 enum type for LAPACKSupport::State
2558 state_enum_id = H5Tcreate(H5T_ENUM, sizeof(LAPACKSupport::State));
2560 herr_t status = H5Tenum_insert(state_enum_id, "cholesky", &val);
2561 AssertThrow(status >= 0, ExcInternalError());
2563 status = H5Tenum_insert(state_enum_id, "eigenvalues", &val);
2564 AssertThrow(status >= 0, ExcInternalError());
2566 status = H5Tenum_insert(state_enum_id, "inverse_matrix", &val);
2567 AssertThrow(status >= 0, ExcInternalError());
2569 status = H5Tenum_insert(state_enum_id, "inverse_svd", &val);
2570 AssertThrow(status >= 0, ExcInternalError());
2572 status = H5Tenum_insert(state_enum_id, "lu", &val);
2573 AssertThrow(status >= 0, ExcInternalError());
2575 status = H5Tenum_insert(state_enum_id, "matrix", &val);
2576 AssertThrow(status >= 0, ExcInternalError());
2578 status = H5Tenum_insert(state_enum_id, "svd", &val);
2579 AssertThrow(status >= 0, ExcInternalError());
2581 status = H5Tenum_insert(state_enum_id, "unusable", &val);
2582 AssertThrow(status >= 0, ExcInternalError());
2583 }
2584
2585 void
2586 create_HDF5_property_enum_id(hid_t &property_enum_id)
2587 {
2588 // create HDF5 enum type for LAPACKSupport::Property
2589 property_enum_id = H5Tcreate(H5T_ENUM, sizeof(LAPACKSupport::Property));
2591 herr_t status = H5Tenum_insert(property_enum_id, "diagonal", &prop);
2592 AssertThrow(status >= 0, ExcInternalError());
2594 status = H5Tenum_insert(property_enum_id, "general", &prop);
2595 AssertThrow(status >= 0, ExcInternalError());
2597 status = H5Tenum_insert(property_enum_id, "hessenberg", &prop);
2598 AssertThrow(status >= 0, ExcInternalError());
2600 status = H5Tenum_insert(property_enum_id, "lower_triangular", &prop);
2601 AssertThrow(status >= 0, ExcInternalError());
2603 status = H5Tenum_insert(property_enum_id, "symmetric", &prop);
2604 AssertThrow(status >= 0, ExcInternalError());
2606 status = H5Tenum_insert(property_enum_id, "upper_triangular", &prop);
2607 AssertThrow(status >= 0, ExcInternalError());
2608 }
2609 } // namespace
2610} // namespace internal
2611# endif
2612
2613
2614
2615template <typename NumberType>
2616void
2618 const std::string &filename,
2619 const std::pair<unsigned int, unsigned int> &chunk_size) const
2620{
2621# ifndef DEAL_II_WITH_HDF5
2622 (void)filename;
2623 (void)chunk_size;
2624 AssertThrow(false, ExcNeedsHDF5());
2625# else
2626
2627 std::pair<unsigned int, unsigned int> chunks_size_ = chunk_size;
2628
2629 if (chunks_size_.first == numbers::invalid_unsigned_int ||
2630 chunks_size_.second == numbers::invalid_unsigned_int)
2631 {
2632 // default: store the matrix in chunks of columns
2633 chunks_size_.first = n_rows;
2634 chunks_size_.second = 1;
2635 }
2636 Assert(chunks_size_.first > 0,
2637 ExcMessage("The row chunk size must be larger than 0."));
2638 AssertIndexRange(chunks_size_.first, n_rows + 1);
2639 Assert(chunks_size_.second > 0,
2640 ExcMessage("The column chunk size must be larger than 0."));
2641 AssertIndexRange(chunks_size_.second, n_columns + 1);
2642
2643# ifdef H5_HAVE_PARALLEL
2644 // implementation for configurations equipped with a parallel file system
2645 save_parallel(filename, chunks_size_);
2646
2647# else
2648 // implementation for configurations with no parallel file system
2649 save_serial(filename, chunks_size_);
2650
2651# endif
2652# endif
2653}
2654
2655
2656
2657template <typename NumberType>
2658void
2660 const std::string &filename,
2661 const std::pair<unsigned int, unsigned int> &chunk_size) const
2662{
2663# ifndef DEAL_II_WITH_HDF5
2664 (void)filename;
2665 (void)chunk_size;
2667# else
2668
2669 /*
2670 * The content of the distributed matrix is copied to a matrix using a 1x1
2671 * process grid. Therefore, one process has all the data and can write it to a
2672 * file.
2673 *
2674 * Create a 1x1 column grid which will be used to initialize
2675 * an effectively serial ScaLAPACK matrix to gather the contents from the
2676 * current object
2677 */
2678 const auto column_grid =
2679 std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,
2680 1,
2681 1);
2682
2683 const int MB = n_rows, NB = n_columns;
2684 ScaLAPACKMatrix<NumberType> tmp(n_rows, n_columns, column_grid, MB, NB);
2685 copy_to(tmp);
2686
2687 // the 1x1 grid has only one process and this one writes
2688 // the content of the matrix to the HDF5 file
2689 if (tmp.grid->mpi_process_is_active)
2690 {
2691 herr_t status;
2692
2693 // create a new file using default properties
2694 hid_t file_id =
2695 H5Fcreate(filename.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
2696
2697 // modify dataset creation properties, i.e. enable chunking
2698 hsize_t chunk_dims[2];
2699 // revert order of rows and columns as ScaLAPACK uses column-major
2700 // ordering
2701 chunk_dims[0] = chunk_size.second;
2702 chunk_dims[1] = chunk_size.first;
2703 hid_t data_property = H5Pcreate(H5P_DATASET_CREATE);
2704 status = H5Pset_chunk(data_property, 2, chunk_dims);
2705 AssertThrow(status >= 0, ExcIO());
2706
2707 // create the data space for the dataset
2708 hsize_t dims[2];
2709 // change order of rows and columns as ScaLAPACKMatrix uses column major
2710 // ordering
2711 dims[0] = n_columns;
2712 dims[1] = n_rows;
2713 hid_t dataspace_id = H5Screate_simple(2, dims, nullptr);
2714
2715 // create the dataset within the file using chunk creation properties
2716 hid_t type_id = hdf5_type_id(tmp.values.data());
2717 hid_t dataset_id = H5Dcreate2(file_id,
2718 "/matrix",
2719 type_id,
2720 dataspace_id,
2721 H5P_DEFAULT,
2722 data_property,
2723 H5P_DEFAULT);
2724
2725 // write the dataset
2726 status = H5Dwrite(
2727 dataset_id, type_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, tmp.values.data());
2728 AssertThrow(status >= 0, ExcIO());
2729
2730 // create HDF5 enum type for LAPACKSupport::State and
2731 // LAPACKSupport::Property
2732 hid_t state_enum_id, property_enum_id;
2733 internal::create_HDF5_state_enum_id(state_enum_id);
2734 internal::create_HDF5_property_enum_id(property_enum_id);
2735
2736 // create the data space for the state enum
2737 hsize_t dims_state[1];
2738 dims_state[0] = 1;
2739 hid_t state_enum_dataspace = H5Screate_simple(1, dims_state, nullptr);
2740 // create the dataset for the state enum
2741 hid_t state_enum_dataset = H5Dcreate2(file_id,
2742 "/state",
2743 state_enum_id,
2744 state_enum_dataspace,
2745 H5P_DEFAULT,
2746 H5P_DEFAULT,
2747 H5P_DEFAULT);
2748 // write the dataset for the state enum
2749 status = H5Dwrite(state_enum_dataset,
2750 state_enum_id,
2751 H5S_ALL,
2752 H5S_ALL,
2753 H5P_DEFAULT,
2754 &state);
2755 AssertThrow(status >= 0, ExcIO());
2756
2757 // create the data space for the property enum
2758 hsize_t dims_property[1];
2759 dims_property[0] = 1;
2760 hid_t property_enum_dataspace =
2761 H5Screate_simple(1, dims_property, nullptr);
2762 // create the dataset for the property enum
2763 hid_t property_enum_dataset = H5Dcreate2(file_id,
2764 "/property",
2765 property_enum_id,
2766 property_enum_dataspace,
2767 H5P_DEFAULT,
2768 H5P_DEFAULT,
2769 H5P_DEFAULT);
2770 // write the dataset for the property enum
2771 status = H5Dwrite(property_enum_dataset,
2772 property_enum_id,
2773 H5S_ALL,
2774 H5S_ALL,
2775 H5P_DEFAULT,
2776 &property);
2777 AssertThrow(status >= 0, ExcIO());
2778
2779 // end access to the datasets and release resources used by them
2780 status = H5Dclose(dataset_id);
2781 AssertThrow(status >= 0, ExcIO());
2782 status = H5Dclose(state_enum_dataset);
2783 AssertThrow(status >= 0, ExcIO());
2784 status = H5Dclose(property_enum_dataset);
2785 AssertThrow(status >= 0, ExcIO());
2786
2787 // terminate access to the data spaces
2788 status = H5Sclose(dataspace_id);
2789 AssertThrow(status >= 0, ExcIO());
2790 status = H5Sclose(state_enum_dataspace);
2791 AssertThrow(status >= 0, ExcIO());
2792 status = H5Sclose(property_enum_dataspace);
2793 AssertThrow(status >= 0, ExcIO());
2794
2795 // release enum data types
2796 status = H5Tclose(state_enum_id);
2797 AssertThrow(status >= 0, ExcIO());
2798 status = H5Tclose(property_enum_id);
2799 AssertThrow(status >= 0, ExcIO());
2800
2801 // release the creation property
2802 status = H5Pclose(data_property);
2803 AssertThrow(status >= 0, ExcIO());
2804
2805 // close the file.
2806 status = H5Fclose(file_id);
2807 AssertThrow(status >= 0, ExcIO());
2808 }
2809# endif
2810}
2811
2812
2813
2814template <typename NumberType>
2815void
2817 const std::string &filename,
2818 const std::pair<unsigned int, unsigned int> &chunk_size) const
2819{
2820# ifndef DEAL_II_WITH_HDF5
2821 (void)filename;
2822 (void)chunk_size;
2824# else
2825
2826 const unsigned int n_mpi_processes(
2827 Utilities::MPI::n_mpi_processes(this->grid->mpi_communicator));
2828 MPI_Info info = MPI_INFO_NULL;
2829 /*
2830 * The content of the distributed matrix is copied to a matrix using a
2831 * 1xn_processes process grid. Therefore, the processes hold contiguous chunks
2832 * of the matrix, which they can write to the file
2833 *
2834 * Create a 1xn_processes column grid
2835 */
2836 const auto column_grid =
2837 std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,
2838 1,
2839 n_mpi_processes);
2840
2841 const int MB = n_rows;
2842 /*
2843 * If the ratio n_columns/n_mpi_processes is smaller than the column block
2844 * size of the original matrix, the redistribution and saving of the matrix
2845 * requires a significant amount of MPI communication. Therefore, it is better
2846 * to set a minimum value for the block size NB, causing only
2847 * ceil(n_columns/NB) processes being actively involved in saving the matrix.
2848 * Example: A 2*10^9 x 400 matrix is distributed on a 80 x 5 process grid
2849 * using block size 32. Instead of distributing the matrix on a 1 x 400
2850 * process grid with a row block size of 2*10^9 and a column block size of 1,
2851 * the minimum value for NB yields that only ceil(400/32)=13 processes will be
2852 * writing the matrix to disk.
2853 */
2854 const int NB = std::max(static_cast<int>(std::ceil(
2855 static_cast<double>(n_columns) / n_mpi_processes)),
2856 column_block_size);
2857
2858 ScaLAPACKMatrix<NumberType> tmp(n_rows, n_columns, column_grid, MB, NB);
2859 copy_to(tmp);
2860
2861 // get pointer to data held by the process
2862 NumberType *data = (tmp.values.size() > 0) ? tmp.values.data() : nullptr;
2863
2864 herr_t status;
2865 // dataset dimensions
2866 hsize_t dims[2];
2867
2868 // set up file access property list with parallel I/O access
2869 hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
2870 status = H5Pset_fapl_mpio(plist_id, tmp.grid->mpi_communicator, info);
2871 AssertThrow(status >= 0, ExcIO());
2872
2873 // create a new file collectively and release property list identifier
2874 hid_t file_id =
2875 H5Fcreate(filename.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, plist_id);
2876 status = H5Pclose(plist_id);
2877 AssertThrow(status >= 0, ExcIO());
2878
2879 // As ScaLAPACK, and therefore the class ScaLAPACKMatrix, uses column-major
2880 // ordering but HDF5 row-major ordering, we have to reverse entries related to
2881 // columns and rows in the following. create the dataspace for the dataset
2882 dims[0] = tmp.n_columns;
2883 dims[1] = tmp.n_rows;
2884
2885 hid_t filespace = H5Screate_simple(2, dims, nullptr);
2886
2887 // create the chunked dataset with default properties and close filespace
2888 hsize_t chunk_dims[2];
2889 // revert order of rows and columns as ScaLAPACK uses column-major ordering
2890 chunk_dims[0] = chunk_size.second;
2891 chunk_dims[1] = chunk_size.first;
2892 plist_id = H5Pcreate(H5P_DATASET_CREATE);
2893 H5Pset_chunk(plist_id, 2, chunk_dims);
2894 hid_t type_id = hdf5_type_id(data);
2895 hid_t dset_id = H5Dcreate2(
2896 file_id, "/matrix", type_id, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
2897
2898 status = H5Sclose(filespace);
2899 AssertThrow(status >= 0, ExcIO());
2900
2901 status = H5Pclose(plist_id);
2902 AssertThrow(status >= 0, ExcIO());
2903
2904 // gather the number of local rows and columns from all processes
2905 std::vector<int> proc_n_local_rows(n_mpi_processes),
2906 proc_n_local_columns(n_mpi_processes);
2907 int ierr = MPI_Allgather(&tmp.n_local_rows,
2908 1,
2909 MPI_INT,
2910 proc_n_local_rows.data(),
2911 1,
2912 MPI_INT,
2913 tmp.grid->mpi_communicator);
2914 AssertThrowMPI(ierr);
2915 ierr = MPI_Allgather(&tmp.n_local_columns,
2916 1,
2917 MPI_INT,
2918 proc_n_local_columns.data(),
2919 1,
2920 MPI_INT,
2921 tmp.grid->mpi_communicator);
2922 AssertThrowMPI(ierr);
2923
2924 const unsigned int my_rank(
2925 Utilities::MPI::this_mpi_process(tmp.grid->mpi_communicator));
2926
2927 // hyperslab selection parameters
2928 // each process defines dataset in memory and writes it to the hyperslab in
2929 // the file
2930 hsize_t count[2];
2931 count[0] = tmp.n_local_columns;
2932 count[1] = tmp.n_rows;
2933 hid_t memspace = H5Screate_simple(2, count, nullptr);
2934
2935 hsize_t offset[2] = {0};
2936 for (unsigned int i = 0; i < my_rank; ++i)
2937 offset[0] += proc_n_local_columns[i];
2938
2939 // select hyperslab in the file.
2940 filespace = H5Dget_space(dset_id);
2941 status = H5Sselect_hyperslab(
2942 filespace, H5S_SELECT_SET, offset, nullptr, count, nullptr);
2943 AssertThrow(status >= 0, ExcIO());
2944
2945 // create property list for independent dataset write
2946 plist_id = H5Pcreate(H5P_DATASET_XFER);
2947 status = H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_INDEPENDENT);
2948 AssertThrow(status >= 0, ExcIO());
2949
2950 // process with no data will not participate in writing to the file
2951 if (tmp.values.size() > 0)
2952 {
2953 status = H5Dwrite(dset_id, type_id, memspace, filespace, plist_id, data);
2954 AssertThrow(status >= 0, ExcIO());
2955 }
2956 // close/release sources
2957 status = H5Dclose(dset_id);
2958 AssertThrow(status >= 0, ExcIO());
2959 status = H5Sclose(filespace);
2960 AssertThrow(status >= 0, ExcIO());
2961 status = H5Sclose(memspace);
2962 AssertThrow(status >= 0, ExcIO());
2963 status = H5Pclose(plist_id);
2964 AssertThrow(status >= 0, ExcIO());
2965 status = H5Fclose(file_id);
2966 AssertThrow(status >= 0, ExcIO());
2967
2968 // before writing the state and property to file wait for
2969 // all processes to finish writing the matrix content to the file
2970 ierr = MPI_Barrier(tmp.grid->mpi_communicator);
2971 AssertThrowMPI(ierr);
2972
2973 // only root process will write state and property to the file
2974 if (tmp.grid->this_mpi_process == 0)
2975 {
2976 // open file using default properties
2977 hid_t file_id_reopen =
2978 H5Fopen(filename.c_str(), H5F_ACC_RDWR, H5P_DEFAULT);
2979
2980 // create HDF5 enum type for LAPACKSupport::State and
2981 // LAPACKSupport::Property
2982 hid_t state_enum_id, property_enum_id;
2983 internal::create_HDF5_state_enum_id(state_enum_id);
2984 internal::create_HDF5_property_enum_id(property_enum_id);
2985
2986 // create the data space for the state enum
2987 hsize_t dims_state[1];
2988 dims_state[0] = 1;
2989 hid_t state_enum_dataspace = H5Screate_simple(1, dims_state, nullptr);
2990 // create the dataset for the state enum
2991 hid_t state_enum_dataset = H5Dcreate2(file_id_reopen,
2992 "/state",
2993 state_enum_id,
2994 state_enum_dataspace,
2995 H5P_DEFAULT,
2996 H5P_DEFAULT,
2997 H5P_DEFAULT);
2998 // write the dataset for the state enum
2999 status = H5Dwrite(state_enum_dataset,
3000 state_enum_id,
3001 H5S_ALL,
3002 H5S_ALL,
3003 H5P_DEFAULT,
3004 &state);
3005 AssertThrow(status >= 0, ExcIO());
3006
3007 // create the data space for the property enum
3008 hsize_t dims_property[1];
3009 dims_property[0] = 1;
3010 hid_t property_enum_dataspace =
3011 H5Screate_simple(1, dims_property, nullptr);
3012 // create the dataset for the property enum
3013 hid_t property_enum_dataset = H5Dcreate2(file_id_reopen,
3014 "/property",
3015 property_enum_id,
3016 property_enum_dataspace,
3017 H5P_DEFAULT,
3018 H5P_DEFAULT,
3019 H5P_DEFAULT);
3020 // write the dataset for the property enum
3021 status = H5Dwrite(property_enum_dataset,
3022 property_enum_id,
3023 H5S_ALL,
3024 H5S_ALL,
3025 H5P_DEFAULT,
3026 &property);
3027 AssertThrow(status >= 0, ExcIO());
3028
3029 status = H5Dclose(state_enum_dataset);
3030 AssertThrow(status >= 0, ExcIO());
3031 status = H5Dclose(property_enum_dataset);
3032 AssertThrow(status >= 0, ExcIO());
3033 status = H5Sclose(state_enum_dataspace);
3034 AssertThrow(status >= 0, ExcIO());
3035 status = H5Sclose(property_enum_dataspace);
3036 AssertThrow(status >= 0, ExcIO());
3037 status = H5Tclose(state_enum_id);
3038 AssertThrow(status >= 0, ExcIO());
3039 status = H5Tclose(property_enum_id);
3040 AssertThrow(status >= 0, ExcIO());
3041 status = H5Fclose(file_id_reopen);
3042 AssertThrow(status >= 0, ExcIO());
3043 }
3044
3045# endif
3046}
3047
3048
3049
3050template <typename NumberType>
3051void
3052ScaLAPACKMatrix<NumberType>::load(const std::string &filename)
3053{
3054# ifndef DEAL_II_WITH_HDF5
3055 (void)filename;
3056 AssertThrow(false, ExcNeedsHDF5());
3057# else
3058# ifdef H5_HAVE_PARALLEL
3059 // implementation for configurations equipped with a parallel file system
3060 load_parallel(filename);
3061
3062# else
3063 // implementation for configurations with no parallel file system
3064 load_serial(filename);
3065# endif
3066# endif
3067}
3068
3069
3070
3071template <typename NumberType>
3072void
3074{
3075# ifndef DEAL_II_WITH_HDF5
3076 (void)filename;
3078# else
3079
3080 /*
3081 * The content of the distributed matrix is copied to a matrix using a 1x1
3082 * process grid. Therefore, one process has all the data and can write it to a
3083 * file
3084 */
3085 // create a 1xP column grid with P being the number of MPI processes
3086 const auto one_grid =
3087 std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,
3088 1,
3089 1);
3090
3091 const int MB = n_rows, NB = n_columns;
3092 ScaLAPACKMatrix<NumberType> tmp(n_rows, n_columns, one_grid, MB, NB);
3093
3094 int state_int = -1;
3095 int property_int = -1;
3096
3097 // the 1x1 grid has only one process and this one reads
3098 // the content of the matrix from the HDF5 file
3099 if (tmp.grid->mpi_process_is_active)
3100 {
3101 herr_t status;
3102
3103 // open the file in read-only mode
3104 hid_t file_id = H5Fopen(filename.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT);
3105
3106 // open the dataset in the file
3107 hid_t dataset_id = H5Dopen2(file_id, "/matrix", H5P_DEFAULT);
3108
3109 // check the datatype of the data in the file
3110 // datatype of source and destination must have the same class
3111 // see HDF User's Guide: 6.10. Data Transfer: Datatype Conversion and
3112 // Selection
3113 hid_t datatype = H5Dget_type(dataset_id);
3114 H5T_class_t t_class_in = H5Tget_class(datatype);
3115 H5T_class_t t_class = H5Tget_class(hdf5_type_id(tmp.values.data()));
3117 t_class_in == t_class,
3118 ExcMessage(
3119 "The data type of the matrix to be read does not match the archive"));
3120
3121 // get dataspace handle
3122 hid_t dataspace_id = H5Dget_space(dataset_id);
3123 // get number of dimensions
3124 const int ndims = H5Sget_simple_extent_ndims(dataspace_id);
3125 AssertThrow(ndims == 2, ExcIO());
3126 // get every dimension
3127 hsize_t dims[2];
3128 H5Sget_simple_extent_dims(dataspace_id, dims, nullptr);
3130 static_cast<int>(dims[0]) == n_columns,
3131 ExcMessage(
3132 "The number of columns of the matrix does not match the content of the archive"));
3134 static_cast<int>(dims[1]) == n_rows,
3135 ExcMessage(
3136 "The number of rows of the matrix does not match the content of the archive"));
3137
3138 // read data
3139 status = H5Dread(dataset_id,
3140 hdf5_type_id(tmp.values.data()),
3141 H5S_ALL,
3142 H5S_ALL,
3143 H5P_DEFAULT,
3144 tmp.values.data());
3145 AssertThrow(status >= 0, ExcIO());
3146
3147 // create HDF5 enum type for LAPACKSupport::State and
3148 // LAPACKSupport::Property
3149 hid_t state_enum_id, property_enum_id;
3150 internal::create_HDF5_state_enum_id(state_enum_id);
3151 internal::create_HDF5_property_enum_id(property_enum_id);
3152
3153 // open the datasets for the state and property enum in the file
3154 hid_t dataset_state_id = H5Dopen2(file_id, "/state", H5P_DEFAULT);
3155 hid_t datatype_state = H5Dget_type(dataset_state_id);
3156 H5T_class_t t_class_state = H5Tget_class(datatype_state);
3157 AssertThrow(t_class_state == H5T_ENUM, ExcIO());
3158
3159 hid_t dataset_property_id = H5Dopen2(file_id, "/property", H5P_DEFAULT);
3160 hid_t datatype_property = H5Dget_type(dataset_property_id);
3161 H5T_class_t t_class_property = H5Tget_class(datatype_property);
3162 AssertThrow(t_class_property == H5T_ENUM, ExcIO());
3163
3164 // get dataspace handles
3165 hid_t dataspace_state = H5Dget_space(dataset_state_id);
3166 hid_t dataspace_property = H5Dget_space(dataset_property_id);
3167 // get number of dimensions
3168 const int ndims_state = H5Sget_simple_extent_ndims(dataspace_state);
3169 AssertThrow(ndims_state == 1, ExcIO());
3170 const int ndims_property = H5Sget_simple_extent_ndims(dataspace_property);
3171 AssertThrow(ndims_property == 1, ExcIO());
3172 // get every dimension
3173 hsize_t dims_state[1];
3174 H5Sget_simple_extent_dims(dataspace_state, dims_state, nullptr);
3175 AssertThrow(static_cast<int>(dims_state[0]) == 1, ExcIO());
3176 hsize_t dims_property[1];
3177 H5Sget_simple_extent_dims(dataspace_property, dims_property, nullptr);
3178 AssertThrow(static_cast<int>(dims_property[0]) == 1, ExcIO());
3179
3180 // read data
3181 status = H5Dread(dataset_state_id,
3182 state_enum_id,
3183 H5S_ALL,
3184 H5S_ALL,
3185 H5P_DEFAULT,
3186 &tmp.state);
3187 AssertThrow(status >= 0, ExcIO());
3188 // To send the state from the root process to the other processes
3189 // the state enum is casted to an integer, that will be broadcasted and
3190 // subsequently casted back to the enum type
3191 state_int = static_cast<int>(tmp.state);
3192
3193 status = H5Dread(dataset_property_id,
3194 property_enum_id,
3195 H5S_ALL,
3196 H5S_ALL,
3197 H5P_DEFAULT,
3198 &tmp.property);
3199 AssertThrow(status >= 0, ExcIO());
3200 // To send the property from the root process to the other processes
3201 // the state enum is casted to an integer, that will be broadcasted and
3202 // subsequently casted back to the enum type
3203 property_int = static_cast<int>(tmp.property);
3204
3205 // terminate access to the data spaces
3206 status = H5Sclose(dataspace_id);
3207 AssertThrow(status >= 0, ExcIO());
3208 status = H5Sclose(dataspace_state);
3209 AssertThrow(status >= 0, ExcIO());
3210 status = H5Sclose(dataspace_property);
3211 AssertThrow(status >= 0, ExcIO());
3212
3213 // release data type handles
3214 status = H5Tclose(datatype);
3215 AssertThrow(status >= 0, ExcIO());
3216 status = H5Tclose(state_enum_id);
3217 AssertThrow(status >= 0, ExcIO());
3218 status = H5Tclose(property_enum_id);
3219 AssertThrow(status >= 0, ExcIO());
3220
3221 // end access to the data sets and release resources used by them
3222 status = H5Dclose(dataset_state_id);
3223 AssertThrow(status >= 0, ExcIO());
3224 status = H5Dclose(dataset_id);
3225 AssertThrow(status >= 0, ExcIO());
3226 status = H5Dclose(dataset_property_id);
3227 AssertThrow(status >= 0, ExcIO());
3228
3229 // close the file.
3230 status = H5Fclose(file_id);
3231 AssertThrow(status >= 0, ExcIO());
3232 }
3233 // so far only the root process has the correct state integer --> broadcasting
3234 tmp.grid->send_to_inactive(&state_int, 1);
3235 // so far only the root process has the correct property integer -->
3236 // broadcasting
3237 tmp.grid->send_to_inactive(&property_int, 1);
3238
3239 tmp.state = static_cast<LAPACKSupport::State>(state_int);
3240 tmp.property = static_cast<LAPACKSupport::Property>(property_int);
3241
3242 tmp.copy_to(*this);
3243
3244# endif // DEAL_II_WITH_HDF5
3245}
3246
3247
3248
3249template <typename NumberType>
3250void
3252{
3253# ifndef DEAL_II_WITH_HDF5
3254 (void)filename;
3256# else
3257# ifndef H5_HAVE_PARALLEL
3259# else
3260
3261 const unsigned int n_mpi_processes(
3262 Utilities::MPI::n_mpi_processes(this->grid->mpi_communicator));
3263 MPI_Info info = MPI_INFO_NULL;
3264 /*
3265 * The content of the distributed matrix is copied to a matrix using a
3266 * 1xn_processes process grid. Therefore, the processes hold contiguous chunks
3267 * of the matrix, which they can write to the file
3268 */
3269 // create a 1xP column grid with P being the number of MPI processes
3270 const auto column_grid =
3271 std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,
3272 1,
3273 n_mpi_processes);
3274
3275 const int MB = n_rows;
3276 // for the choice of NB see explanation in save_parallel()
3277 const int NB = std::max(static_cast<int>(std::ceil(
3278 static_cast<double>(n_columns) / n_mpi_processes)),
3279 column_block_size);
3280
3281 ScaLAPACKMatrix<NumberType> tmp(n_rows, n_columns, column_grid, MB, NB);
3282
3283 // get pointer to data held by the process
3284 NumberType *data = (tmp.values.size() > 0) ? tmp.values.data() : nullptr;
3285
3286 herr_t status;
3287
3288 // set up file access property list with parallel I/O access
3289 hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
3290 status = H5Pset_fapl_mpio(plist_id, tmp.grid->mpi_communicator, info);
3291 AssertThrow(status >= 0, ExcIO());
3292
3293 // open file collectively in read-only mode and release property list
3294 // identifier
3295 hid_t file_id = H5Fopen(filename.c_str(), H5F_ACC_RDONLY, plist_id);
3296 status = H5Pclose(plist_id);
3297 AssertThrow(status >= 0, ExcIO());
3298
3299 // open the dataset in the file collectively
3300 hid_t dataset_id = H5Dopen2(file_id, "/matrix", H5P_DEFAULT);
3301
3302 // check the datatype of the dataset in the file
3303 // if the classes of type of the dataset and the matrix do not match abort
3304 // see HDF User's Guide: 6.10. Data Transfer: Datatype Conversion and
3305 // Selection
3306 hid_t datatype = hdf5_type_id(data);
3307 hid_t datatype_inp = H5Dget_type(dataset_id);
3308 H5T_class_t t_class_inp = H5Tget_class(datatype_inp);
3309 H5T_class_t t_class = H5Tget_class(datatype);
3311 t_class_inp == t_class,
3312 ExcMessage(
3313 "The data type of the matrix to be read does not match the archive"));
3314
3315 // get the dimensions of the matrix stored in the file
3316 // get dataspace handle
3317 hid_t dataspace_id = H5Dget_space(dataset_id);
3318 // get number of dimensions
3319 const int ndims = H5Sget_simple_extent_ndims(dataspace_id);
3320 AssertThrow(ndims == 2, ExcIO());
3321 // get every dimension
3322 hsize_t dims[2];
3323 status = H5Sget_simple_extent_dims(dataspace_id, dims, nullptr);
3324 AssertThrow(status >= 0, ExcIO());
3326 static_cast<int>(dims[0]) == n_columns,
3327 ExcMessage(
3328 "The number of columns of the matrix does not match the content of the archive"));
3330 static_cast<int>(dims[1]) == n_rows,
3331 ExcMessage(
3332 "The number of rows of the matrix does not match the content of the archive"));
3333
3334 // gather the number of local rows and columns from all processes
3335 std::vector<int> proc_n_local_rows(n_mpi_processes),
3336 proc_n_local_columns(n_mpi_processes);
3337 int ierr = MPI_Allgather(&tmp.n_local_rows,
3338 1,
3339 MPI_INT,
3340 proc_n_local_rows.data(),
3341 1,
3342 MPI_INT,
3343 tmp.grid->mpi_communicator);
3344 AssertThrowMPI(ierr);
3345 ierr = MPI_Allgather(&tmp.n_local_columns,
3346 1,
3347 MPI_INT,
3348 proc_n_local_columns.data(),
3349 1,
3350 MPI_INT,
3351 tmp.grid->mpi_communicator);
3352 AssertThrowMPI(ierr);
3353
3354 const unsigned int my_rank(
3355 Utilities::MPI::this_mpi_process(tmp.grid->mpi_communicator));
3356
3357 // hyperslab selection parameters
3358 // each process defines dataset in memory and writes it to the hyperslab in
3359 // the file
3360 hsize_t count[2];
3361 count[0] = tmp.n_local_columns;
3362 count[1] = tmp.n_local_rows;
3363
3364 hsize_t offset[2] = {0};
3365 for (unsigned int i = 0; i < my_rank; ++i)
3366 offset[0] += proc_n_local_columns[i];
3367
3368 // select hyperslab in the file
3369 status = H5Sselect_hyperslab(
3370 dataspace_id, H5S_SELECT_SET, offset, nullptr, count, nullptr);
3371 AssertThrow(status >= 0, ExcIO());
3372
3373 // create a memory dataspace independently
3374 hid_t memspace = H5Screate_simple(2, count, nullptr);
3375
3376 // read data independently
3377 status =
3378 H5Dread(dataset_id, datatype, memspace, dataspace_id, H5P_DEFAULT, data);
3379 AssertThrow(status >= 0, ExcIO());
3380
3381 // create HDF5 enum type for LAPACKSupport::State and LAPACKSupport::Property
3382 hid_t state_enum_id, property_enum_id;
3383 internal::create_HDF5_state_enum_id(state_enum_id);
3384 internal::create_HDF5_property_enum_id(property_enum_id);
3385
3386 // open the datasets for the state and property enum in the file
3387 hid_t dataset_state_id = H5Dopen2(file_id, "/state", H5P_DEFAULT);
3388 hid_t datatype_state = H5Dget_type(dataset_state_id);
3389 H5T_class_t t_class_state = H5Tget_class(datatype_state);
3390 AssertThrow(t_class_state == H5T_ENUM, ExcIO());
3391
3392 hid_t dataset_property_id = H5Dopen2(file_id, "/property", H5P_DEFAULT);
3393 hid_t datatype_property = H5Dget_type(dataset_property_id);
3394 H5T_class_t t_class_property = H5Tget_class(datatype_property);
3395 AssertThrow(t_class_property == H5T_ENUM, ExcIO());
3396
3397 // get dataspace handles
3398 hid_t dataspace_state = H5Dget_space(dataset_state_id);
3399 hid_t dataspace_property = H5Dget_space(dataset_property_id);
3400 // get number of dimensions
3401 const int ndims_state = H5Sget_simple_extent_ndims(dataspace_state);
3402 AssertThrow(ndims_state == 1, ExcIO());
3403 const int ndims_property = H5Sget_simple_extent_ndims(dataspace_property);
3404 AssertThrow(ndims_property == 1, ExcIO());
3405 // get every dimension
3406 hsize_t dims_state[1];
3407 H5Sget_simple_extent_dims(dataspace_state, dims_state, nullptr);
3408 AssertThrow(static_cast<int>(dims_state[0]) == 1, ExcIO());
3409 hsize_t dims_property[1];
3410 H5Sget_simple_extent_dims(dataspace_property, dims_property, nullptr);
3411 AssertThrow(static_cast<int>(dims_property[0]) == 1, ExcIO());
3412
3413 // read data
3414 status = H5Dread(
3415 dataset_state_id, state_enum_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, &tmp.state);
3416 AssertThrow(status >= 0, ExcIO());
3417
3418 status = H5Dread(dataset_property_id,
3419 property_enum_id,
3420 H5S_ALL,
3421 H5S_ALL,
3422 H5P_DEFAULT,
3423 &tmp.property);
3424 AssertThrow(status >= 0, ExcIO());
3425
3426 // close/release sources
3427 status = H5Sclose(memspace);
3428 AssertThrow(status >= 0, ExcIO());
3429 status = H5Dclose(dataset_id);
3430 AssertThrow(status >= 0, ExcIO());
3431 status = H5Dclose(dataset_state_id);
3432 AssertThrow(status >= 0, ExcIO());
3433 status = H5Dclose(dataset_property_id);
3434 AssertThrow(status >= 0, ExcIO());
3435 status = H5Sclose(dataspace_id);
3436 AssertThrow(status >= 0, ExcIO());
3437 status = H5Sclose(dataspace_state);
3438 AssertThrow(status >= 0, ExcIO());
3439 status = H5Sclose(dataspace_property);
3440 AssertThrow(status >= 0, ExcIO());
3441 // status = H5Tclose(datatype);
3442 // AssertThrow(status >= 0, ExcIO());
3443 status = H5Tclose(state_enum_id);
3444 AssertThrow(status >= 0, ExcIO());
3445 status = H5Tclose(property_enum_id);
3446 AssertThrow(status >= 0, ExcIO());
3447 status = H5Fclose(file_id);
3448 AssertThrow(status >= 0, ExcIO());
3449
3450 // copying the distributed matrices
3451 tmp.copy_to(*this);
3452
3453# endif // H5_HAVE_PARALLEL
3454# endif // DEAL_II_WITH_HDF5
3455}
3456
3457
3458
3459namespace internal
3460{
3461 namespace
3462 {
3463 template <typename NumberType>
3464 void
3465 scale_columns(ScaLAPACKMatrix<NumberType> &matrix,
3466 const ArrayView<const NumberType> &factors)
3467 {
3468 Assert(matrix.n() == factors.size(),
3469 ExcDimensionMismatch(matrix.n(), factors.size()));
3470
3471 for (unsigned int i = 0; i < matrix.local_n(); ++i)
3472 {
3473 const NumberType s = factors[matrix.global_column(i)];
3474
3475 for (unsigned int j = 0; j < matrix.local_m(); ++j)
3476 matrix.local_el(j, i) *= s;
3477 }
3478 }
3479
3480 template <typename NumberType>
3481 void
3482 scale_rows(ScaLAPACKMatrix<NumberType> &matrix,
3483 const ArrayView<const NumberType> &factors)
3484 {
3485 Assert(matrix.m() == factors.size(),
3486 ExcDimensionMismatch(matrix.m(), factors.size()));
3487
3488 for (unsigned int i = 0; i < matrix.local_m(); ++i)
3489 {
3490 const NumberType s = factors[matrix.global_row(i)];
3491
3492 for (unsigned int j = 0; j < matrix.local_n(); ++j)
3493 matrix.local_el(i, j) *= s;
3494 }
3495 }
3496
3497 } // namespace
3498} // namespace internal
3499
3500
3501
3502template <typename NumberType>
3503template <class InputVector>
3504void
3506{
3507 if (this->grid->mpi_process_is_active)
3508 internal::scale_columns(*this, make_array_view(factors));
3509}
3510
3511
3512
3513template <typename NumberType>
3514template <class InputVector>
3515void
3517{
3518 if (this->grid->mpi_process_is_active)
3519 internal::scale_rows(*this, make_array_view(factors));
3520}
3521
3522
3523
3524// instantiations
3525# include "scalapack.inst"
3526
3527
3529
3530#endif // DEAL_II_WITH_SCALAPACK
ArrayView< std::remove_reference_t< typename std::iterator_traits< Iterator >::reference >, MemorySpaceType > make_array_view(const Iterator begin, const Iterator end)
Definition array_view.h:949
std::size_t size() const
Definition array_view.h:684
size_type m() const
size_type n() const
int descriptor[9]
Definition scalapack.h:934
std::vector< NumberType > compute_SVD(ScaLAPACKMatrix< NumberType > *U=nullptr, ScaLAPACKMatrix< NumberType > *VT=nullptr)
std::vector< NumberType > eigenpairs_symmetric_by_value(const std::pair< NumberType, NumberType > &value_limits, const bool compute_eigenvectors)
NumberType frobenius_norm() const
unsigned int pseudoinverse(const NumberType ratio)
std::vector< NumberType > eigenpairs_symmetric_by_value_MRRR(const std::pair< NumberType, NumberType > &value_limits, const bool compute_eigenvectors)
void copy_from(const LAPACKFullMatrix< NumberType > &matrix, const unsigned int rank)
Definition scalapack.cc:362
void save_parallel(const std::string &filename, const std::pair< unsigned int, unsigned int > &chunk_size) const
void least_squares(ScaLAPACKMatrix< NumberType > &B, const bool transpose=false)
ScaLAPACKMatrix< NumberType > & operator=(const FullMatrix< NumberType > &)
Definition scalapack.cc:331
void Tadd(const NumberType b, const ScaLAPACKMatrix< NumberType > &B)
void mTmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
void add(const ScaLAPACKMatrix< NumberType > &B, const NumberType a=0., const NumberType b=1., const bool transpose_B=false)
Definition scalapack.cc:990
LAPACKSupport::State get_state() const
Definition scalapack.cc:322
LAPACKSupport::Property get_property() const
Definition scalapack.cc:313
void Tmmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
std::vector< NumberType > eigenpairs_symmetric_MRRR(const bool compute_eigenvectors, const std::pair< unsigned int, unsigned int > &index_limits=std::make_pair(numbers::invalid_unsigned_int, numbers::invalid_unsigned_int), const std::pair< NumberType, NumberType > &value_limits=std::make_pair(std::numeric_limits< NumberType >::quiet_NaN(), std::numeric_limits< NumberType >::quiet_NaN()))
void scale_rows(const InputVector &factors)
std::shared_ptr< const Utilities::MPI::ProcessGrid > grid
Definition scalapack.h:899
ScaLAPACKMatrix(const size_type n_rows, const size_type n_columns, const std::shared_ptr< const Utilities::MPI::ProcessGrid > &process_grid, const size_type row_block_size=32, const size_type column_block_size=32, const LAPACKSupport::Property property=LAPACKSupport::Property::general)
Definition scalapack.cc:80
void load(const std::string &filename)
const int submatrix_column
Definition scalapack.h:980
const int submatrix_row
Definition scalapack.h:974
void mmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
std::vector< NumberType > eigenpairs_symmetric(const bool compute_eigenvectors, const std::pair< unsigned int, unsigned int > &index_limits=std::make_pair(numbers::invalid_unsigned_int, numbers::invalid_unsigned_int), const std::pair< NumberType, NumberType > &value_limits=std::make_pair(std::numeric_limits< NumberType >::quiet_NaN(), std::numeric_limits< NumberType >::quiet_NaN()))
void save_serial(const std::string &filename, const std::pair< unsigned int, unsigned int > &chunk_size) const
NumberType norm_general(const char type) const
void save(const std::string &filename, const std::pair< unsigned int, unsigned int > &chunk_size=std::make_pair(numbers::invalid_unsigned_int, numbers::invalid_unsigned_int)) const
void load_parallel(const std::string &filename)
NumberType l1_norm() const
void compute_lu_factorization()
NumberType norm_symmetric(const char type) const
void mult(const NumberType b, const ScaLAPACKMatrix< NumberType > &B, const NumberType c, ScaLAPACKMatrix< NumberType > &C, const bool transpose_A=false, const bool transpose_B=false) const
LAPACKSupport::Property property
Definition scalapack.h:892
void set_property(const LAPACKSupport::Property property)
Definition scalapack.cc:303
void reinit(const size_type n_rows, const size_type n_columns, const std::shared_ptr< const Utilities::MPI::ProcessGrid > &process_grid, const size_type row_block_size=32, const size_type column_block_size=32, const LAPACKSupport::Property property=LAPACKSupport::Property::general)
Definition scalapack.cc:216
void load_serial(const std::string &filename)
std::vector< NumberType > eigenpairs_symmetric_by_index_MRRR(const std::pair< unsigned int, unsigned int > &index_limits, const bool compute_eigenvectors)
NumberType reciprocal_condition_number(const NumberType a_norm) const
LAPACKSupport::State state
Definition scalapack.h:886
void TmTmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
std::vector< NumberType > eigenpairs_symmetric_by_index(const std::pair< unsigned int, unsigned int > &index_limits, const bool compute_eigenvectors)
unsigned int global_column(const unsigned int loc_column) const
Definition scalapack.cc:514
void copy_to(FullMatrix< NumberType > &matrix) const
Definition scalapack.cc:666
unsigned int global_row(const unsigned int loc_row) const
Definition scalapack.cc:497
void compute_cholesky_factorization()
void copy_transposed(const ScaLAPACKMatrix< NumberType > &B)
Definition scalapack.cc:980
NumberType linfty_norm() const
void scale_columns(const InputVector &factors)
AlignedVector< T > values
Definition table.h:795
void reinit(const size_type size1, const size_type size2, const bool omit_default_initialization=false)
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:498
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:499
DerivativeForm< 1, spacedim, dim, Number > transpose(const DerivativeForm< 1, dim, spacedim, Number > &DF)
const unsigned int v1
static ::ExceptionBase & ExcIO()
static ::ExceptionBase & ExcErrorCode(std::string arg1, types::blas_int arg2)
#define Assert(cond, exc)
#define AssertIsFinite(number)
#define AssertThrowMPI(error_code)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcNeedsHDF5()
static ::ExceptionBase & ExcIndexRange(std::size_t arg1, std::size_t arg2, std::size_t arg3)
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
#define DEAL_II_ASSERT_UNREACHABLE()
#define DEAL_II_NOT_IMPLEMENTED()
const unsigned int my_rank
Definition mpi.cc:918
std::vector< index_type > data
Definition mpi.cc:735
std::size_t size
Definition mpi.cc:734
@ cholesky
Contents is a Cholesky decomposition.
@ lu
Contents is an LU decomposition.
@ matrix
Contents is actually a matrix.
@ unusable
Contents is something useless.
@ inverse_matrix
Contents is the inverse of a matrix.
@ svd
Matrix contains singular value decomposition,.
@ inverse_svd
Matrix is the inverse of a singular value decomposition.
@ eigenvalues
Eigenvalue vector is filled.
@ symmetric
Matrix is symmetric.
@ hessenberg
Matrix is in upper Hessenberg form.
@ diagonal
Matrix is diagonal.
@ upper_triangular
Matrix is upper triangular.
@ lower_triangular
Matrix is lower triangular.
@ general
No special properties.
@ scalapack_copy_from
ScaLAPACKMatrix<NumberType>::copy_from.
Definition mpi_tags.h:119
@ scalapack_copy_to2
ScaLAPACKMatrix<NumberType>::copy_to.
Definition mpi_tags.h:117
@ scalapack_copy_to
ScaLAPACKMatrix<NumberType>::copy_to.
Definition mpi_tags.h:115
T sum(const T &t, const MPI_Comm mpi_communicator)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition mpi.cc:92
T max(const T &t, const MPI_Comm mpi_communicator)
T min(const T &t, const MPI_Comm mpi_communicator)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:107
const MPI_Datatype mpi_type_id_for_type
Definition mpi.h:1626
void free_communicator(MPI_Comm mpi_communicator)
Definition mpi.cc:154
static const unsigned int invalid_unsigned_int
Definition types.h:220
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
hid_t hdf5_type_id(const number *)
Definition scalapack.cc:39
std::array< std::pair< Number, Tensor< 1, dim, Number > >, dim > eigenvectors(const SymmetricTensor< 2, dim, Number > &T, const SymmetricTensorEigenvectorMethod method=SymmetricTensorEigenvectorMethod::ql_implicit_shifts)