Reference documentation for deal.II version 9.4.1
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
scalapack.cc
Go to the documentation of this file.
1// ---------------------------------------------------------------------
2//
3// Copyright (C) 2017 - 2022 by the deal.II authors
4//
5// This file is part of the deal.II library.
6//
7// The deal.II library is free software; you can use it, redistribute
8// it, and/or modify it under the terms of the GNU Lesser General
9// Public License as published by the Free Software Foundation; either
10// version 2.1 of the License, or (at your option) any later version.
11// The full text of the license can be found in the file LICENSE.md at
12// the top level directory of deal.II.
13//
14// ---------------------------------------------------------------------
15
16
18
19#ifdef DEAL_II_WITH_SCALAPACK
20
22# include <deal.II/base/mpi.h>
23# include <deal.II/base/mpi.templates.h>
24
25# include <deal.II/lac/scalapack.templates.h>
26
27# ifdef DEAL_II_WITH_HDF5
28# include <hdf5.h>
29# endif
30
31# include <memory>
32
34
35# ifdef DEAL_II_WITH_HDF5
36
37template <typename number>
38inline hid_t
39hdf5_type_id(const number *)
40{
42 // don't know what to put here; it does not matter
43 return -1;
44}
45
46inline hid_t
47hdf5_type_id(const double *)
48{
49 return H5T_NATIVE_DOUBLE;
50}
51
52inline hid_t
53hdf5_type_id(const float *)
54{
55 return H5T_NATIVE_FLOAT;
56}
57
58inline hid_t
59hdf5_type_id(const int *)
60{
61 return H5T_NATIVE_INT;
62}
63
64inline hid_t
65hdf5_type_id(const unsigned int *)
66{
67 return H5T_NATIVE_UINT;
68}
69
70inline hid_t
71hdf5_type_id(const char *)
72{
73 return H5T_NATIVE_CHAR;
74}
75# endif // DEAL_II_WITH_HDF5
76
77
78
79template <typename NumberType>
81 const size_type n_rows_,
82 const size_type n_columns_,
83 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
84 const size_type row_block_size_,
85 const size_type column_block_size_,
86 const LAPACKSupport::Property property_)
87 : uplo('L')
88 , // for non-symmetric matrices this is not needed
89 first_process_row(0)
90 , first_process_column(0)
91 , submatrix_row(1)
92 , submatrix_column(1)
93{
94 reinit(n_rows_,
95 n_columns_,
96 process_grid,
97 row_block_size_,
98 column_block_size_,
99 property_);
100}
101
102
103
104template <typename NumberType>
106 const size_type size,
107 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
108 const size_type block_size,
109 const LAPACKSupport::Property property)
110 : ScaLAPACKMatrix<NumberType>(size,
111 size,
112 process_grid,
113 block_size,
114 block_size,
115 property)
116{}
117
118
119
120template <typename NumberType>
122 const std::string & filename,
123 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
124 const size_type row_block_size,
125 const size_type column_block_size)
126 : uplo('L')
127 , // for non-symmetric matrices this is not needed
128 first_process_row(0)
129 , first_process_column(0)
130 , submatrix_row(1)
131 , submatrix_column(1)
132{
133# ifndef DEAL_II_WITH_HDF5
134 (void)filename;
135 (void)process_grid;
136 (void)row_block_size;
137 (void)column_block_size;
138 Assert(
139 false,
141 "This function is only available when deal.II is configured with HDF5"));
142# else
143
144 const unsigned int this_mpi_process(
145 Utilities::MPI::this_mpi_process(process_grid->mpi_communicator));
146
147 // Before reading the content from disk the root process determines the
148 // dimensions of the matrix. Subsequently, memory is allocated by a call to
149 // reinit() and the matrix is loaded by a call to load().
150 if (this_mpi_process == 0)
151 {
152 herr_t status = 0;
153
154 // open file in read-only mode
155 hid_t file = H5Fopen(filename.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT);
156 AssertThrow(file >= 0, ExcIO());
157
158 // get data set in file
159 hid_t dataset = H5Dopen2(file, "/matrix", H5P_DEFAULT);
160 AssertThrow(dataset >= 0, ExcIO());
161
162 // determine file space
163 hid_t filespace = H5Dget_space(dataset);
164
165 // get number of dimensions in data set
166 int rank = H5Sget_simple_extent_ndims(filespace);
167 AssertThrow(rank == 2, ExcIO());
168 hsize_t dims[2];
169 status = H5Sget_simple_extent_dims(filespace, dims, nullptr);
170 AssertThrow(status >= 0, ExcIO());
171
172 // due to ScaLAPACK's column-major memory layout the dimensions are
173 // swapped
174 n_rows = dims[1];
175 n_columns = dims[0];
176
177 // close/release resources
178 status = H5Sclose(filespace);
179 AssertThrow(status >= 0, ExcIO());
180 status = H5Dclose(dataset);
181 AssertThrow(status >= 0, ExcIO());
182 status = H5Fclose(file);
183 AssertThrow(status >= 0, ExcIO());
184 }
185 int ierr = MPI_Bcast(&n_rows,
186 1,
188 0 /*from root*/,
189 process_grid->mpi_communicator);
190 AssertThrowMPI(ierr);
191
192 ierr = MPI_Bcast(&n_columns,
193 1,
195 0 /*from root*/,
196 process_grid->mpi_communicator);
197 AssertThrowMPI(ierr);
198
199 // the property will be overwritten by the subsequent call to load()
201 n_columns,
202 process_grid,
206
207 load(filename.c_str());
208
209# endif // DEAL_II_WITH_HDF5
210}
211
212
213
214template <typename NumberType>
215void
217 const size_type n_rows_,
218 const size_type n_columns_,
219 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
220 const size_type row_block_size_,
221 const size_type column_block_size_,
222 const LAPACKSupport::Property property_)
223{
224 Assert(row_block_size_ > 0, ExcMessage("Row block size has to be positive."));
225 Assert(column_block_size_ > 0,
226 ExcMessage("Column block size has to be positive."));
227 Assert(
228 row_block_size_ <= n_rows_,
230 "Row block size can not be greater than the number of rows of the matrix"));
231 Assert(
232 column_block_size_ <= n_columns_,
234 "Column block size can not be greater than the number of columns of the matrix"));
235
237 property = property_;
238 grid = process_grid;
239 n_rows = n_rows_;
240 n_columns = n_columns_;
241 row_block_size = row_block_size_;
242 column_block_size = column_block_size_;
243
244 if (grid->mpi_process_is_active)
245 {
246 // Get local sizes:
247 n_local_rows = numroc_(&n_rows,
248 &row_block_size,
249 &(grid->this_process_row),
250 &first_process_row,
251 &(grid->n_process_rows));
252 n_local_columns = numroc_(&n_columns,
253 &column_block_size,
254 &(grid->this_process_column),
255 &first_process_column,
256 &(grid->n_process_columns));
257
258 // LLD_A = MAX(1,NUMROC(M_A, MB_A, MYROW, RSRC_A, NPROW)), different
259 // between processes
260 int lda = std::max(1, n_local_rows);
261
262 int info = 0;
263 descinit_(descriptor,
264 &n_rows,
265 &n_columns,
266 &row_block_size,
267 &column_block_size,
268 &first_process_row,
269 &first_process_column,
270 &(grid->blacs_context),
271 &lda,
272 &info);
273 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("descinit_", info));
274
275 this->TransposeTable<NumberType>::reinit(n_local_rows, n_local_columns);
276 }
277 else
278 {
279 // set process-local variables to something telling:
280 n_local_rows = -1;
281 n_local_columns = -1;
282 std::fill(std::begin(descriptor), std::end(descriptor), -1);
283 }
284}
285
286
287
288template <typename NumberType>
289void
291 const size_type size,
292 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
293 const size_type block_size,
294 const LAPACKSupport::Property property)
295{
296 reinit(size, size, process_grid, block_size, block_size, property);
297}
298
299
300
301template <typename NumberType>
302void
304 const LAPACKSupport::Property property_)
305{
306 property = property_;
307}
308
309
310
311template <typename NumberType>
314{
315 return property;
316}
317
318
319
320template <typename NumberType>
323{
324 return state;
325}
326
327
328
329template <typename NumberType>
332{
333 // FIXME: another way to copy is to use pdgeadd_ PBLAS routine.
334 // This routine computes the sum of two matrices B := a*A + b*B.
335 // Matrices can have different distribution,in particular matrix A can
336 // be owned by only one process, so we can set a=1 and b=0 to copy
337 // non-distributed matrix A into distributed matrix B.
338 Assert(n_rows == int(matrix.m()), ExcDimensionMismatch(n_rows, matrix.m()));
339 Assert(n_columns == int(matrix.n()),
340 ExcDimensionMismatch(n_columns, matrix.n()));
341
342 if (grid->mpi_process_is_active)
343 {
344 for (int i = 0; i < n_local_rows; ++i)
345 {
346 const int glob_i = global_row(i);
347 for (int j = 0; j < n_local_columns; ++j)
348 {
349 const int glob_j = global_column(j);
350 local_el(i, j) = matrix(glob_i, glob_j);
351 }
352 }
353 }
354 state = LAPACKSupport::matrix;
355 return *this;
356}
357
358
359
360template <typename NumberType>
361void
363 const unsigned int rank)
364{
365 if (n_rows * n_columns == 0)
366 return;
367
368 const unsigned int this_mpi_process(
369 Utilities::MPI::this_mpi_process(this->grid->mpi_communicator));
370
371# ifdef DEBUG
372 Assert(Utilities::MPI::max(rank, this->grid->mpi_communicator) == rank,
373 ExcMessage("All processes have to call routine with identical rank"));
374 Assert(Utilities::MPI::min(rank, this->grid->mpi_communicator) == rank,
375 ExcMessage("All processes have to call routine with identical rank"));
376# endif
377
378 // root process has to be active in the grid of A
379 if (this_mpi_process == rank)
380 {
381 Assert(grid->mpi_process_is_active, ExcInternalError());
382 Assert(n_rows == int(B.m()), ExcDimensionMismatch(n_rows, B.m()));
383 Assert(n_columns == int(B.n()), ExcDimensionMismatch(n_columns, B.n()));
384 }
385 // Create 1x1 grid for matrix B.
386 // The underlying grid for matrix B only contains the process #rank.
387 // This grid will be used to copy the serial matrix B to the distributed
388 // matrix using the ScaLAPACK routine pgemr2d.
389 MPI_Group group_A;
390 MPI_Comm_group(this->grid->mpi_communicator, &group_A);
391 const int n = 1;
392 const std::vector<int> ranks(n, rank);
393 MPI_Group group_B;
394 MPI_Group_incl(group_A, n, ranks.data(), &group_B);
395 MPI_Comm communicator_B;
396
398 const int ierr = MPI_Comm_create_group(this->grid->mpi_communicator,
399 group_B,
400 mpi_tag,
401 &communicator_B);
402 AssertThrowMPI(ierr);
403 int n_proc_rows_B = 1, n_proc_cols_B = 1;
404 int this_process_row_B = -1, this_process_column_B = -1;
405 int blacs_context_B = -1;
406 if (MPI_COMM_NULL != communicator_B)
407 {
408 // Initialize Cblas context from the provided communicator
409 blacs_context_B = Csys2blacs_handle(communicator_B);
410 const char *order = "Col";
411 Cblacs_gridinit(&blacs_context_B, order, n_proc_rows_B, n_proc_cols_B);
412 Cblacs_gridinfo(blacs_context_B,
413 &n_proc_rows_B,
414 &n_proc_cols_B,
415 &this_process_row_B,
416 &this_process_column_B);
417 Assert(n_proc_rows_B * n_proc_cols_B == 1, ExcInternalError());
418 // the active process of grid B has to be process #rank of the
419 // communicator attached to A
420 Assert(this_mpi_process == rank, ExcInternalError());
421 }
422 const bool mpi_process_is_active_B =
423 (this_process_row_B >= 0 && this_process_column_B >= 0);
424
425 // create descriptor for matrix B
426 std::vector<int> descriptor_B(9, -1);
427 const int first_process_row_B = 0, first_process_col_B = 0;
428
429 if (mpi_process_is_active_B)
430 {
431 // Get local sizes:
432 int n_local_rows_B = numroc_(&n_rows,
433 &n_rows,
434 &this_process_row_B,
435 &first_process_row_B,
436 &n_proc_rows_B);
437 int n_local_cols_B = numroc_(&n_columns,
438 &n_columns,
439 &this_process_column_B,
440 &first_process_col_B,
441 &n_proc_cols_B);
442 Assert(n_local_rows_B == n_rows, ExcInternalError());
443 Assert(n_local_cols_B == n_columns, ExcInternalError());
444 (void)n_local_cols_B;
445
446 int lda = std::max(1, n_local_rows_B);
447 int info = 0;
448 descinit_(descriptor_B.data(),
449 &n_rows,
450 &n_columns,
451 &n_rows,
452 &n_columns,
453 &first_process_row_B,
454 &first_process_col_B,
455 &blacs_context_B,
456 &lda,
457 &info);
458 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("descinit_", info));
459 }
460 if (this->grid->mpi_process_is_active)
461 {
462 const int ii = 1;
463 NumberType *loc_vals_A =
464 this->values.size() > 0 ? this->values.data() : nullptr;
465 const NumberType *loc_vals_B =
466 mpi_process_is_active_B ? &(B(0, 0)) : nullptr;
467
468 // pgemr2d has to be called only for processes active on grid attached to
469 // matrix A
470 pgemr2d(&n_rows,
471 &n_columns,
472 loc_vals_B,
473 &ii,
474 &ii,
475 descriptor_B.data(),
476 loc_vals_A,
477 &ii,
478 &ii,
479 this->descriptor,
480 &(this->grid->blacs_context));
481 }
482 if (mpi_process_is_active_B)
483 Cblacs_gridexit(blacs_context_B);
484
485 MPI_Group_free(&group_A);
486 MPI_Group_free(&group_B);
487 if (MPI_COMM_NULL != communicator_B)
488 Utilities::MPI::free_communicator(communicator_B);
489
490 state = LAPACKSupport::matrix;
491}
492
493
494
495template <typename NumberType>
496unsigned int
497ScaLAPACKMatrix<NumberType>::global_row(const unsigned int loc_row) const
498{
499 Assert(n_local_rows >= 0 && loc_row < static_cast<unsigned int>(n_local_rows),
500 ExcIndexRange(loc_row, 0, n_local_rows));
501 const int i = loc_row + 1;
502 return indxl2g_(&i,
503 &row_block_size,
504 &(grid->this_process_row),
505 &first_process_row,
506 &(grid->n_process_rows)) -
507 1;
508}
509
510
511
512template <typename NumberType>
513unsigned int
514ScaLAPACKMatrix<NumberType>::global_column(const unsigned int loc_column) const
515{
516 Assert(n_local_columns >= 0 &&
517 loc_column < static_cast<unsigned int>(n_local_columns),
518 ExcIndexRange(loc_column, 0, n_local_columns));
519 const int j = loc_column + 1;
520 return indxl2g_(&j,
521 &column_block_size,
522 &(grid->this_process_column),
523 &first_process_column,
524 &(grid->n_process_columns)) -
525 1;
526}
527
528
529
530template <typename NumberType>
531void
533 const unsigned int rank) const
534{
535 if (n_rows * n_columns == 0)
536 return;
537
538 const unsigned int this_mpi_process(
539 Utilities::MPI::this_mpi_process(this->grid->mpi_communicator));
540
541# ifdef DEBUG
542 Assert(Utilities::MPI::max(rank, this->grid->mpi_communicator) == rank,
543 ExcMessage("All processes have to call routine with identical rank"));
544 Assert(Utilities::MPI::min(rank, this->grid->mpi_communicator) == rank,
545 ExcMessage("All processes have to call routine with identical rank"));
546# endif
547
548 if (this_mpi_process == rank)
549 {
550 // the process which gets the serial copy has to be in the process grid
551 Assert(this->grid->is_process_active(), ExcInternalError());
552 Assert(n_rows == int(B.m()), ExcDimensionMismatch(n_rows, B.m()));
553 Assert(n_columns == int(B.n()), ExcDimensionMismatch(n_columns, B.n()));
554 }
555
556 // Create 1x1 grid for matrix B.
557 // The underlying grid for matrix B only contains the process #rank.
558 // This grid will be used to copy to the distributed matrix to the serial
559 // matrix B using the ScaLAPACK routine pgemr2d.
560 MPI_Group group_A;
561 MPI_Comm_group(this->grid->mpi_communicator, &group_A);
562 const int n = 1;
563 const std::vector<int> ranks(n, rank);
564 MPI_Group group_B;
565 MPI_Group_incl(group_A, n, ranks.data(), &group_B);
566 MPI_Comm communicator_B;
567
569 const int ierr = MPI_Comm_create_group(this->grid->mpi_communicator,
570 group_B,
571 mpi_tag,
572 &communicator_B);
573 AssertThrowMPI(ierr);
574 int n_proc_rows_B = 1, n_proc_cols_B = 1;
575 int this_process_row_B = -1, this_process_column_B = -1;
576 int blacs_context_B = -1;
577 if (MPI_COMM_NULL != communicator_B)
578 {
579 // Initialize Cblas context from the provided communicator
580 blacs_context_B = Csys2blacs_handle(communicator_B);
581 const char *order = "Col";
582 Cblacs_gridinit(&blacs_context_B, order, n_proc_rows_B, n_proc_cols_B);
583 Cblacs_gridinfo(blacs_context_B,
584 &n_proc_rows_B,
585 &n_proc_cols_B,
586 &this_process_row_B,
587 &this_process_column_B);
588 Assert(n_proc_rows_B * n_proc_cols_B == 1, ExcInternalError());
589 // the active process of grid B has to be process #rank of the
590 // communicator attached to A
591 Assert(this_mpi_process == rank, ExcInternalError());
592 }
593 const bool mpi_process_is_active_B =
594 (this_process_row_B >= 0 && this_process_column_B >= 0);
595
596 // create descriptor for matrix B
597 std::vector<int> descriptor_B(9, -1);
598 const int first_process_row_B = 0, first_process_col_B = 0;
599
600 if (mpi_process_is_active_B)
601 {
602 // Get local sizes:
603 int n_local_rows_B = numroc_(&n_rows,
604 &n_rows,
605 &this_process_row_B,
606 &first_process_row_B,
607 &n_proc_rows_B);
608 int n_local_cols_B = numroc_(&n_columns,
609 &n_columns,
610 &this_process_column_B,
611 &first_process_col_B,
612 &n_proc_cols_B);
613 Assert(n_local_rows_B == n_rows, ExcInternalError());
614 Assert(n_local_cols_B == n_columns, ExcInternalError());
615 (void)n_local_cols_B;
616
617 int lda = std::max(1, n_local_rows_B);
618 int info = 0;
619 // fill descriptor for matrix B
620 descinit_(descriptor_B.data(),
621 &n_rows,
622 &n_columns,
623 &n_rows,
624 &n_columns,
625 &first_process_row_B,
626 &first_process_col_B,
627 &blacs_context_B,
628 &lda,
629 &info);
630 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("descinit_", info));
631 }
632 // pgemr2d has to be called only for processes active on grid attached to
633 // matrix A
634 if (this->grid->mpi_process_is_active)
635 {
636 const int ii = 1;
637 const NumberType *loc_vals_A =
638 this->values.size() > 0 ? this->values.data() : nullptr;
639 NumberType *loc_vals_B = mpi_process_is_active_B ? &(B(0, 0)) : nullptr;
640
641 pgemr2d(&n_rows,
642 &n_columns,
643 loc_vals_A,
644 &ii,
645 &ii,
646 this->descriptor,
647 loc_vals_B,
648 &ii,
649 &ii,
650 descriptor_B.data(),
651 &(this->grid->blacs_context));
652 }
653 if (mpi_process_is_active_B)
654 Cblacs_gridexit(blacs_context_B);
655
656 MPI_Group_free(&group_A);
657 MPI_Group_free(&group_B);
658 if (MPI_COMM_NULL != communicator_B)
659 Utilities::MPI::free_communicator(communicator_B);
660}
661
662
663
664template <typename NumberType>
665void
667{
668 // FIXME: use PDGEMR2D for copying?
669 // PDGEMR2D copies a submatrix of A on a submatrix of B.
670 // A and B can have different distributions
671 // see http://icl.cs.utk.edu/lapack-forum/viewtopic.php?t=50
672 Assert(n_rows == int(matrix.m()), ExcDimensionMismatch(n_rows, matrix.m()));
673 Assert(n_columns == int(matrix.n()),
674 ExcDimensionMismatch(n_columns, matrix.n()));
675
676 matrix = 0.;
677 if (grid->mpi_process_is_active)
678 {
679 for (int i = 0; i < n_local_rows; ++i)
680 {
681 const int glob_i = global_row(i);
682 for (int j = 0; j < n_local_columns; ++j)
683 {
684 const int glob_j = global_column(j);
685 matrix(glob_i, glob_j) = local_el(i, j);
686 }
687 }
688 }
689 Utilities::MPI::sum(matrix, grid->mpi_communicator, matrix);
690
691 // we could move the following lines under the main loop above,
692 // but they would be dependent on glob_i and glob_j, which
693 // won't make it much prettier
694 if (state == LAPACKSupport::cholesky)
695 {
696 if (property == LAPACKSupport::lower_triangular)
697 for (unsigned int i = 0; i < matrix.n(); ++i)
698 for (unsigned int j = i + 1; j < matrix.m(); ++j)
699 matrix(i, j) = 0.;
700 else if (property == LAPACKSupport::upper_triangular)
701 for (unsigned int i = 0; i < matrix.n(); ++i)
702 for (unsigned int j = 0; j < i; ++j)
703 matrix(i, j) = 0.;
704 }
705 else if (property == LAPACKSupport::symmetric &&
707 {
708 if (uplo == 'L')
709 for (unsigned int i = 0; i < matrix.n(); ++i)
710 for (unsigned int j = i + 1; j < matrix.m(); ++j)
711 matrix(i, j) = matrix(j, i);
712 else if (uplo == 'U')
713 for (unsigned int i = 0; i < matrix.n(); ++i)
714 for (unsigned int j = 0; j < i; ++j)
715 matrix(i, j) = matrix(j, i);
716 }
717}
718
719
720
721template <typename NumberType>
722void
725 const std::pair<unsigned int, unsigned int> &offset_A,
726 const std::pair<unsigned int, unsigned int> &offset_B,
727 const std::pair<unsigned int, unsigned int> &submatrix_size) const
728{
729 // submatrix is empty
730 if (submatrix_size.first == 0 || submatrix_size.second == 0)
731 return;
732
733 // range checking for matrix A
734 AssertIndexRange(offset_A.first, n_rows - submatrix_size.first + 1);
735 AssertIndexRange(offset_A.second, n_columns - submatrix_size.second + 1);
736
737 // range checking for matrix B
738 AssertIndexRange(offset_B.first, B.n_rows - submatrix_size.first + 1);
739 AssertIndexRange(offset_B.second, B.n_columns - submatrix_size.second + 1);
740
741 // Currently, copying of matrices will only be supported if A and B share the
742 // same MPI communicator
743 int ierr, comparison;
744 ierr = MPI_Comm_compare(grid->mpi_communicator,
745 B.grid->mpi_communicator,
746 &comparison);
747 AssertThrowMPI(ierr);
748 Assert(comparison == MPI_IDENT,
749 ExcMessage("Matrix A and B must have a common MPI Communicator"));
750
751 /*
752 * The routine pgemr2d requires a BLACS context resembling at least the union
753 * of process grids described by the BLACS contexts held by the ProcessGrids
754 * of matrix A and B. As A and B share the same MPI communicator, there is no
755 * need to create a union MPI communicator to initialise the BLACS context
756 */
757 int union_blacs_context = Csys2blacs_handle(this->grid->mpi_communicator);
758 const char *order = "Col";
759 int union_n_process_rows =
760 Utilities::MPI::n_mpi_processes(this->grid->mpi_communicator);
761 int union_n_process_columns = 1;
762 Cblacs_gridinit(&union_blacs_context,
763 order,
764 union_n_process_rows,
765 union_n_process_columns);
766
767 int n_grid_rows_A, n_grid_columns_A, my_row_A, my_column_A;
768 Cblacs_gridinfo(this->grid->blacs_context,
769 &n_grid_rows_A,
770 &n_grid_columns_A,
771 &my_row_A,
772 &my_column_A);
773
774 // check whether process is in the BLACS context of matrix A
775 const bool in_context_A =
776 (my_row_A >= 0 && my_row_A < n_grid_rows_A) &&
777 (my_column_A >= 0 && my_column_A < n_grid_columns_A);
778
779 int n_grid_rows_B, n_grid_columns_B, my_row_B, my_column_B;
780 Cblacs_gridinfo(B.grid->blacs_context,
781 &n_grid_rows_B,
782 &n_grid_columns_B,
783 &my_row_B,
784 &my_column_B);
785
786 // check whether process is in the BLACS context of matrix B
787 const bool in_context_B =
788 (my_row_B >= 0 && my_row_B < n_grid_rows_B) &&
789 (my_column_B >= 0 && my_column_B < n_grid_columns_B);
790
791 const int n_rows_submatrix = submatrix_size.first;
792 const int n_columns_submatrix = submatrix_size.second;
793
794 // due to Fortran indexing one has to be added
795 int ia = offset_A.first + 1, ja = offset_A.second + 1;
796 int ib = offset_B.first + 1, jb = offset_B.second + 1;
797
798 std::array<int, 9> desc_A, desc_B;
799
800 const NumberType *loc_vals_A = nullptr;
801 NumberType * loc_vals_B = nullptr;
802
803 // Note: the function pgemr2d has to be called for all processes in the union
804 // BLACS context If the calling process is not part of the BLACS context of A,
805 // desc_A[1] has to be -1 and all other parameters do not have to be set If
806 // the calling process is not part of the BLACS context of B, desc_B[1] has to
807 // be -1 and all other parameters do not have to be set
808 if (in_context_A)
809 {
810 if (this->values.size() != 0)
811 loc_vals_A = this->values.data();
812
813 for (unsigned int i = 0; i < desc_A.size(); ++i)
814 desc_A[i] = this->descriptor[i];
815 }
816 else
817 desc_A[1] = -1;
818
819 if (in_context_B)
820 {
821 if (B.values.size() != 0)
822 loc_vals_B = B.values.data();
823
824 for (unsigned int i = 0; i < desc_B.size(); ++i)
825 desc_B[i] = B.descriptor[i];
826 }
827 else
828 desc_B[1] = -1;
829
830 pgemr2d(&n_rows_submatrix,
831 &n_columns_submatrix,
832 loc_vals_A,
833 &ia,
834 &ja,
835 desc_A.data(),
836 loc_vals_B,
837 &ib,
838 &jb,
839 desc_B.data(),
840 &union_blacs_context);
841
843
844 // releasing the union BLACS context
845 Cblacs_gridexit(union_blacs_context);
846}
847
848
849
850template <typename NumberType>
851void
853{
854 Assert(n_rows == dest.n_rows, ExcDimensionMismatch(n_rows, dest.n_rows));
855 Assert(n_columns == dest.n_columns,
856 ExcDimensionMismatch(n_columns, dest.n_columns));
857
858 if (this->grid->mpi_process_is_active)
860 this->descriptor[0] == 1,
862 "Copying of ScaLAPACK matrices only implemented for dense matrices"));
863 if (dest.grid->mpi_process_is_active)
865 dest.descriptor[0] == 1,
867 "Copying of ScaLAPACK matrices only implemented for dense matrices"));
868
869 /*
870 * just in case of different process grids or block-cyclic distributions
871 * inter-process communication is necessary
872 * if distributed matrices have the same process grid and block sizes, local
873 * copying is enough
874 */
875 if ((this->grid != dest.grid) || (row_block_size != dest.row_block_size) ||
876 (column_block_size != dest.column_block_size))
877 {
878 /*
879 * get the MPI communicator, which is the union of the source and
880 * destination MPI communicator
881 */
882 int ierr = 0;
883 MPI_Group group_source, group_dest, group_union;
884 ierr = MPI_Comm_group(this->grid->mpi_communicator, &group_source);
885 AssertThrowMPI(ierr);
886 ierr = MPI_Comm_group(dest.grid->mpi_communicator, &group_dest);
887 AssertThrowMPI(ierr);
888 ierr = MPI_Group_union(group_source, group_dest, &group_union);
889 AssertThrowMPI(ierr);
890 MPI_Comm mpi_communicator_union;
891
892 // to create a communicator representing the union of the source
893 // and destination MPI communicator we need a communicator that
894 // is guaranteed to contain all desired processes -- i.e.,
895 // MPI_COMM_WORLD. on the other hand, as documented in the MPI
896 // standard, MPI_Comm_create_group is not collective on all
897 // processes in the first argument, but instead is collective on
898 // only those processes listed in the group. in other words,
899 // there is really no harm in passing MPI_COMM_WORLD as the
900 // first argument, even if the program we are currently running
901 // and that is calling this function only works on a subset of
902 // processes. the same holds for the wrapper/fallback we are using here.
903
905 ierr = MPI_Comm_create_group(MPI_COMM_WORLD,
906 group_union,
907 mpi_tag,
908 &mpi_communicator_union);
909 AssertThrowMPI(ierr);
910
911 /*
912 * The routine pgemr2d requires a BLACS context resembling at least the
913 * union of process grids described by the BLACS contexts of matrix A and
914 * B
915 */
916 int union_blacs_context = Csys2blacs_handle(mpi_communicator_union);
917 const char *order = "Col";
918 int union_n_process_rows =
919 Utilities::MPI::n_mpi_processes(mpi_communicator_union);
920 int union_n_process_columns = 1;
921 Cblacs_gridinit(&union_blacs_context,
922 order,
923 union_n_process_rows,
924 union_n_process_columns);
925
926 const NumberType *loc_vals_source = nullptr;
927 NumberType * loc_vals_dest = nullptr;
928
929 if (this->grid->mpi_process_is_active && (this->values.size() > 0))
930 {
931 AssertThrow(this->values.size() > 0,
933 "source: process is active but local matrix empty"));
934 loc_vals_source = this->values.data();
935 }
936 if (dest.grid->mpi_process_is_active && (dest.values.size() > 0))
937 {
939 dest.values.size() > 0,
941 "destination: process is active but local matrix empty"));
942 loc_vals_dest = dest.values.data();
943 }
944 pgemr2d(&n_rows,
945 &n_columns,
946 loc_vals_source,
947 &submatrix_row,
948 &submatrix_column,
949 descriptor,
950 loc_vals_dest,
951 &dest.submatrix_row,
952 &dest.submatrix_column,
953 dest.descriptor,
954 &union_blacs_context);
955
956 Cblacs_gridexit(union_blacs_context);
957
958 if (mpi_communicator_union != MPI_COMM_NULL)
959 Utilities::MPI::free_communicator(mpi_communicator_union);
960 ierr = MPI_Group_free(&group_source);
961 AssertThrowMPI(ierr);
962 ierr = MPI_Group_free(&group_dest);
963 AssertThrowMPI(ierr);
964 ierr = MPI_Group_free(&group_union);
965 AssertThrowMPI(ierr);
966 }
967 else
968 // process is active in the process grid
969 if (this->grid->mpi_process_is_active)
970 dest.values = this->values;
971
972 dest.state = state;
973 dest.property = property;
974}
975
976
977
978template <typename NumberType>
979void
982{
983 add(B, 0, 1, true);
984}
985
986
987
988template <typename NumberType>
989void
991 const NumberType alpha,
992 const NumberType beta,
993 const bool transpose_B)
994{
995 if (transpose_B)
996 {
997 Assert(n_rows == B.n_columns, ExcDimensionMismatch(n_rows, B.n_columns));
998 Assert(n_columns == B.n_rows, ExcDimensionMismatch(n_columns, B.n_rows));
999 Assert(column_block_size == B.row_block_size,
1000 ExcDimensionMismatch(column_block_size, B.row_block_size));
1001 Assert(row_block_size == B.column_block_size,
1002 ExcDimensionMismatch(row_block_size, B.column_block_size));
1003 }
1004 else
1005 {
1006 Assert(n_rows == B.n_rows, ExcDimensionMismatch(n_rows, B.n_rows));
1007 Assert(n_columns == B.n_columns,
1008 ExcDimensionMismatch(n_columns, B.n_columns));
1009 Assert(column_block_size == B.column_block_size,
1010 ExcDimensionMismatch(column_block_size, B.column_block_size));
1011 Assert(row_block_size == B.row_block_size,
1012 ExcDimensionMismatch(row_block_size, B.row_block_size));
1013 }
1014 Assert(this->grid == B.grid,
1015 ExcMessage("The matrices A and B need to have the same process grid"));
1016
1017 if (this->grid->mpi_process_is_active)
1018 {
1019 char trans_b = transpose_B ? 'T' : 'N';
1020 NumberType *A_loc =
1021 (this->values.size() > 0) ? this->values.data() : nullptr;
1022 const NumberType *B_loc =
1023 (B.values.size() > 0) ? B.values.data() : nullptr;
1024
1025 pgeadd(&trans_b,
1026 &n_rows,
1027 &n_columns,
1028 &beta,
1029 B_loc,
1030 &B.submatrix_row,
1032 B.descriptor,
1033 &alpha,
1034 A_loc,
1035 &submatrix_row,
1036 &submatrix_column,
1037 descriptor);
1038 }
1039 state = LAPACKSupport::matrix;
1040}
1041
1042
1043
1044template <typename NumberType>
1045void
1048{
1049 add(B, 1, a, false);
1050}
1051
1052
1053
1054template <typename NumberType>
1055void
1058{
1059 add(B, 1, a, true);
1060}
1061
1062
1063
1064template <typename NumberType>
1065void
1068 const NumberType c,
1070 const bool transpose_A,
1071 const bool transpose_B) const
1072{
1073 Assert(this->grid == B.grid,
1074 ExcMessage("The matrices A and B need to have the same process grid"));
1075 Assert(C.grid == B.grid,
1076 ExcMessage("The matrices B and C need to have the same process grid"));
1077
1078 // see for further info:
1079 // https://www.ibm.com/support/knowledgecenter/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_lgemm.htm
1080 if (!transpose_A && !transpose_B)
1081 {
1082 Assert(this->n_columns == B.n_rows,
1083 ExcDimensionMismatch(this->n_columns, B.n_rows));
1084 Assert(this->n_rows == C.n_rows,
1085 ExcDimensionMismatch(this->n_rows, C.n_rows));
1086 Assert(B.n_columns == C.n_columns,
1087 ExcDimensionMismatch(B.n_columns, C.n_columns));
1088 Assert(this->row_block_size == C.row_block_size,
1089 ExcDimensionMismatch(this->row_block_size, C.row_block_size));
1090 Assert(this->column_block_size == B.row_block_size,
1091 ExcDimensionMismatch(this->column_block_size, B.row_block_size));
1092 Assert(B.column_block_size == C.column_block_size,
1093 ExcDimensionMismatch(B.column_block_size, C.column_block_size));
1094 }
1095 else if (transpose_A && !transpose_B)
1096 {
1097 Assert(this->n_rows == B.n_rows,
1098 ExcDimensionMismatch(this->n_rows, B.n_rows));
1099 Assert(this->n_columns == C.n_rows,
1100 ExcDimensionMismatch(this->n_columns, C.n_rows));
1101 Assert(B.n_columns == C.n_columns,
1102 ExcDimensionMismatch(B.n_columns, C.n_columns));
1103 Assert(this->column_block_size == C.row_block_size,
1104 ExcDimensionMismatch(this->column_block_size, C.row_block_size));
1105 Assert(this->row_block_size == B.row_block_size,
1106 ExcDimensionMismatch(this->row_block_size, B.row_block_size));
1107 Assert(B.column_block_size == C.column_block_size,
1108 ExcDimensionMismatch(B.column_block_size, C.column_block_size));
1109 }
1110 else if (!transpose_A && transpose_B)
1111 {
1112 Assert(this->n_columns == B.n_columns,
1113 ExcDimensionMismatch(this->n_columns, B.n_columns));
1114 Assert(this->n_rows == C.n_rows,
1115 ExcDimensionMismatch(this->n_rows, C.n_rows));
1116 Assert(B.n_rows == C.n_columns,
1117 ExcDimensionMismatch(B.n_rows, C.n_columns));
1118 Assert(this->row_block_size == C.row_block_size,
1119 ExcDimensionMismatch(this->row_block_size, C.row_block_size));
1120 Assert(this->column_block_size == B.column_block_size,
1121 ExcDimensionMismatch(this->column_block_size,
1123 Assert(B.row_block_size == C.column_block_size,
1124 ExcDimensionMismatch(B.row_block_size, C.column_block_size));
1125 }
1126 else // if (transpose_A && transpose_B)
1127 {
1128 Assert(this->n_rows == B.n_columns,
1129 ExcDimensionMismatch(this->n_rows, B.n_columns));
1130 Assert(this->n_columns == C.n_rows,
1131 ExcDimensionMismatch(this->n_columns, C.n_rows));
1132 Assert(B.n_rows == C.n_columns,
1133 ExcDimensionMismatch(B.n_rows, C.n_columns));
1134 Assert(this->column_block_size == C.row_block_size,
1135 ExcDimensionMismatch(this->row_block_size, C.row_block_size));
1136 Assert(this->row_block_size == B.column_block_size,
1137 ExcDimensionMismatch(this->column_block_size, B.row_block_size));
1138 Assert(B.row_block_size == C.column_block_size,
1139 ExcDimensionMismatch(B.column_block_size, C.column_block_size));
1140 }
1141
1142 if (this->grid->mpi_process_is_active)
1143 {
1144 char trans_a = transpose_A ? 'T' : 'N';
1145 char trans_b = transpose_B ? 'T' : 'N';
1146
1147 const NumberType *A_loc =
1148 (this->values.size() > 0) ? this->values.data() : nullptr;
1149 const NumberType *B_loc =
1150 (B.values.size() > 0) ? B.values.data() : nullptr;
1151 NumberType *C_loc = (C.values.size() > 0) ? C.values.data() : nullptr;
1152 int m = C.n_rows;
1153 int n = C.n_columns;
1154 int k = transpose_A ? this->n_rows : this->n_columns;
1155
1156 pgemm(&trans_a,
1157 &trans_b,
1158 &m,
1159 &n,
1160 &k,
1161 &b,
1162 A_loc,
1163 &(this->submatrix_row),
1164 &(this->submatrix_column),
1165 this->descriptor,
1166 B_loc,
1167 &B.submatrix_row,
1169 B.descriptor,
1170 &c,
1171 C_loc,
1172 &C.submatrix_row,
1173 &C.submatrix_column,
1174 C.descriptor);
1175 }
1176 C.state = LAPACKSupport::matrix;
1177}
1178
1179
1180
1181template <typename NumberType>
1182void
1185 const bool adding) const
1186{
1187 if (adding)
1188 mult(1., B, 1., C, false, false);
1189 else
1190 mult(1., B, 0, C, false, false);
1191}
1192
1193
1194
1195template <typename NumberType>
1196void
1199 const bool adding) const
1200{
1201 if (adding)
1202 mult(1., B, 1., C, true, false);
1203 else
1204 mult(1., B, 0, C, true, false);
1205}
1206
1207
1208
1209template <typename NumberType>
1210void
1213 const bool adding) const
1214{
1215 if (adding)
1216 mult(1., B, 1., C, false, true);
1217 else
1218 mult(1., B, 0, C, false, true);
1219}
1220
1221
1222
1223template <typename NumberType>
1224void
1227 const bool adding) const
1228{
1229 if (adding)
1230 mult(1., B, 1., C, true, true);
1231 else
1232 mult(1., B, 0, C, true, true);
1233}
1234
1235
1236
1237template <typename NumberType>
1238void
1240{
1241 Assert(
1242 n_columns == n_rows && property == LAPACKSupport::Property::symmetric,
1243 ExcMessage(
1244 "Cholesky factorization can be applied to symmetric matrices only."));
1246 ExcMessage(
1247 "Matrix has to be in Matrix state before calling this function."));
1248
1249 if (grid->mpi_process_is_active)
1250 {
1251 int info = 0;
1252 NumberType *A_loc = this->values.data();
1253 // pdpotrf_(&uplo,&n_columns,A_loc,&submatrix_row,&submatrix_column,descriptor,&info);
1254 ppotrf(&uplo,
1255 &n_columns,
1256 A_loc,
1257 &submatrix_row,
1258 &submatrix_column,
1259 descriptor,
1260 &info);
1261 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("ppotrf", info));
1262 }
1264 property = (uplo == 'L' ? LAPACKSupport::lower_triangular :
1266}
1267
1268
1269
1270template <typename NumberType>
1271void
1273{
1275 ExcMessage(
1276 "Matrix has to be in Matrix state before calling this function."));
1277
1278 if (grid->mpi_process_is_active)
1279 {
1280 int info = 0;
1281 NumberType *A_loc = this->values.data();
1282
1283 const int iarow = indxg2p_(&submatrix_row,
1284 &row_block_size,
1285 &(grid->this_process_row),
1286 &first_process_row,
1287 &(grid->n_process_rows));
1288 const int mp = numroc_(&n_rows,
1289 &row_block_size,
1290 &(grid->this_process_row),
1291 &iarow,
1292 &(grid->n_process_rows));
1293 ipiv.resize(mp + row_block_size);
1294
1295 pgetrf(&n_rows,
1296 &n_columns,
1297 A_loc,
1298 &submatrix_row,
1299 &submatrix_column,
1300 descriptor,
1301 ipiv.data(),
1302 &info);
1303 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("pgetrf", info));
1304 }
1307}
1308
1309
1310
1311template <typename NumberType>
1312void
1314{
1315 // Check whether matrix is symmetric and save flag.
1316 // If a Cholesky factorization has been applied previously,
1317 // the original matrix was symmetric.
1318 const bool is_symmetric = (property == LAPACKSupport::symmetric ||
1320
1321 // Check whether matrix is triangular and is in an unfactorized state.
1322 const bool is_triangular = (property == LAPACKSupport::upper_triangular ||
1323 property == LAPACKSupport::lower_triangular) &&
1324 (state == LAPACKSupport::State::matrix ||
1326
1327 if (is_triangular)
1328 {
1329 if (grid->mpi_process_is_active)
1330 {
1331 const char uploTriangular =
1332 property == LAPACKSupport::upper_triangular ? 'U' : 'L';
1333 const char diag = 'N';
1334 int info = 0;
1335 NumberType *A_loc = this->values.data();
1336 ptrtri(&uploTriangular,
1337 &diag,
1338 &n_columns,
1339 A_loc,
1340 &submatrix_row,
1341 &submatrix_column,
1342 descriptor,
1343 &info);
1344 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("ptrtri", info));
1345 // The inversion is stored in the same part as the triangular matrix,
1346 // so we don't need to re-set the property here.
1347 }
1348 }
1349 else
1350 {
1351 // Matrix is neither in Cholesky nor LU state.
1352 // Compute the required factorizations based on the property of the
1353 // matrix.
1354 if (!(state == LAPACKSupport::State::lu ||
1356 {
1357 if (is_symmetric)
1358 compute_cholesky_factorization();
1359 else
1360 compute_lu_factorization();
1361 }
1362 if (grid->mpi_process_is_active)
1363 {
1364 int info = 0;
1365 NumberType *A_loc = this->values.data();
1366
1367 if (is_symmetric)
1368 {
1369 ppotri(&uplo,
1370 &n_columns,
1371 A_loc,
1372 &submatrix_row,
1373 &submatrix_column,
1374 descriptor,
1375 &info);
1376 AssertThrow(info == 0,
1377 LAPACKSupport::ExcErrorCode("ppotri", info));
1379 }
1380 else
1381 {
1382 int lwork = -1, liwork = -1;
1383 work.resize(1);
1384 iwork.resize(1);
1385
1386 pgetri(&n_columns,
1387 A_loc,
1388 &submatrix_row,
1389 &submatrix_column,
1390 descriptor,
1391 ipiv.data(),
1392 work.data(),
1393 &lwork,
1394 iwork.data(),
1395 &liwork,
1396 &info);
1397
1398 AssertThrow(info == 0,
1399 LAPACKSupport::ExcErrorCode("pgetri", info));
1400 lwork = static_cast<int>(work[0]);
1401 liwork = iwork[0];
1402 work.resize(lwork);
1403 iwork.resize(liwork);
1404
1405 pgetri(&n_columns,
1406 A_loc,
1407 &submatrix_row,
1408 &submatrix_column,
1409 descriptor,
1410 ipiv.data(),
1411 work.data(),
1412 &lwork,
1413 iwork.data(),
1414 &liwork,
1415 &info);
1416
1417 AssertThrow(info == 0,
1418 LAPACKSupport::ExcErrorCode("pgetri", info));
1419 }
1420 }
1421 }
1423}
1424
1425
1426
1427template <typename NumberType>
1428std::vector<NumberType>
1430 const std::pair<unsigned int, unsigned int> &index_limits,
1431 const bool compute_eigenvectors)
1432{
1433 // check validity of index limits
1434 AssertIndexRange(index_limits.first, n_rows);
1435 AssertIndexRange(index_limits.second, n_rows);
1436
1437 std::pair<unsigned int, unsigned int> idx =
1438 std::make_pair(std::min(index_limits.first, index_limits.second),
1439 std::max(index_limits.first, index_limits.second));
1440
1441 // compute all eigenvalues/eigenvectors
1442 if (idx.first == 0 && idx.second == static_cast<unsigned int>(n_rows - 1))
1443 return eigenpairs_symmetric(compute_eigenvectors);
1444 else
1445 return eigenpairs_symmetric(compute_eigenvectors, idx);
1446}
1447
1448
1449
1450template <typename NumberType>
1451std::vector<NumberType>
1453 const std::pair<NumberType, NumberType> &value_limits,
1454 const bool compute_eigenvectors)
1455{
1456 Assert(!std::isnan(value_limits.first),
1457 ExcMessage("value_limits.first is NaN"));
1458 Assert(!std::isnan(value_limits.second),
1459 ExcMessage("value_limits.second is NaN"));
1460
1461 std::pair<unsigned int, unsigned int> indices =
1462 std::make_pair(numbers::invalid_unsigned_int,
1464
1465 return eigenpairs_symmetric(compute_eigenvectors, indices, value_limits);
1466}
1467
1468
1469
1470template <typename NumberType>
1471std::vector<NumberType>
1473 const bool compute_eigenvectors,
1474 const std::pair<unsigned int, unsigned int> &eigenvalue_idx,
1475 const std::pair<NumberType, NumberType> & eigenvalue_limits)
1476{
1478 ExcMessage(
1479 "Matrix has to be in Matrix state before calling this function."));
1480 Assert(property == LAPACKSupport::symmetric,
1481 ExcMessage("Matrix has to be symmetric for this operation."));
1482
1483 std::lock_guard<std::mutex> lock(mutex);
1484
1485 const bool use_values = (std::isnan(eigenvalue_limits.first) ||
1486 std::isnan(eigenvalue_limits.second)) ?
1487 false :
1488 true;
1489 const bool use_indices =
1490 ((eigenvalue_idx.first == numbers::invalid_unsigned_int) ||
1491 (eigenvalue_idx.second == numbers::invalid_unsigned_int)) ?
1492 false :
1493 true;
1494
1495 Assert(
1496 !(use_values && use_indices),
1497 ExcMessage(
1498 "Prescribing both the index and value range for the eigenvalues is ambiguous"));
1499
1500 // if computation of eigenvectors is not required use a sufficiently small
1501 // distributed matrix
1502 std::unique_ptr<ScaLAPACKMatrix<NumberType>> eigenvectors =
1503 compute_eigenvectors ?
1504 std::make_unique<ScaLAPACKMatrix<NumberType>>(n_rows,
1505 grid,
1506 row_block_size) :
1507 std::make_unique<ScaLAPACKMatrix<NumberType>>(
1508 grid->n_process_rows, grid->n_process_columns, grid, 1, 1);
1509
1510 eigenvectors->property = property;
1511 // number of eigenvalues to be returned from psyevx; upon successful exit ev
1512 // contains the m seclected eigenvalues in ascending order set to all
1513 // eigenvaleus in case we will be using psyev.
1514 int m = n_rows;
1515 std::vector<NumberType> ev(n_rows);
1516
1517 if (grid->mpi_process_is_active)
1518 {
1519 int info = 0;
1520 /*
1521 * for jobz==N only eigenvalues are computed, for jobz='V' also the
1522 * eigenvectors of the matrix are computed
1523 */
1524 char jobz = compute_eigenvectors ? 'V' : 'N';
1525 char range = 'A';
1526 // default value is to compute all eigenvalues and optionally eigenvectors
1527 bool all_eigenpairs = true;
1528 NumberType vl = NumberType(), vu = NumberType();
1529 int il = 1, iu = 1;
1530 // number of eigenvectors to be returned;
1531 // upon successful exit the first m=nz columns contain the selected
1532 // eigenvectors (only if jobz=='V')
1533 int nz = 0;
1534 NumberType abstol = NumberType();
1535
1536 // orfac decides which eigenvectors should be reorthogonalized
1537 // see
1538 // http://www.netlib.org/scalapack/explore-html/df/d1a/pdsyevx_8f_source.html
1539 // for explanation to keeps simple no reorthogonalized will be done by
1540 // setting orfac to 0
1541 NumberType orfac = 0;
1542 // contains the indices of eigenvectors that failed to converge
1543 std::vector<int> ifail;
1544 // This array contains indices of eigenvectors corresponding to
1545 // a cluster of eigenvalues that could not be reorthogonalized
1546 // due to insufficient workspace
1547 // see
1548 // http://www.netlib.org/scalapack/explore-html/df/d1a/pdsyevx_8f_source.html
1549 // for explanation
1550 std::vector<int> iclustr;
1551 // This array contains the gap between eigenvalues whose
1552 // eigenvectors could not be reorthogonalized.
1553 // see
1554 // http://www.netlib.org/scalapack/explore-html/df/d1a/pdsyevx_8f_source.html
1555 // for explanation
1556 std::vector<NumberType> gap(n_local_rows * n_local_columns);
1557
1558 // index range for eigenvalues is not specified
1559 if (!use_indices)
1560 {
1561 // interval for eigenvalues is not specified and consequently all
1562 // eigenvalues/eigenpairs will be computed
1563 if (!use_values)
1564 {
1565 range = 'A';
1566 all_eigenpairs = true;
1567 }
1568 else
1569 {
1570 range = 'V';
1571 all_eigenpairs = false;
1572 vl = std::min(eigenvalue_limits.first, eigenvalue_limits.second);
1573 vu = std::max(eigenvalue_limits.first, eigenvalue_limits.second);
1574 }
1575 }
1576 else
1577 {
1578 range = 'I';
1579 all_eigenpairs = false;
1580 // as Fortran starts counting/indexing from 1 unlike C/C++, where it
1581 // starts from 0
1582 il = std::min(eigenvalue_idx.first, eigenvalue_idx.second) + 1;
1583 iu = std::max(eigenvalue_idx.first, eigenvalue_idx.second) + 1;
1584 }
1585 NumberType *A_loc = this->values.data();
1586 /*
1587 * by setting lwork to -1 a workspace query for optimal length of work is
1588 * performed
1589 */
1590 int lwork = -1;
1591 int liwork = -1;
1592 NumberType *eigenvectors_loc =
1593 (compute_eigenvectors ? eigenvectors->values.data() : nullptr);
1594 work.resize(1);
1595 iwork.resize(1);
1596
1597 if (all_eigenpairs)
1598 {
1599 psyev(&jobz,
1600 &uplo,
1601 &n_rows,
1602 A_loc,
1603 &submatrix_row,
1604 &submatrix_column,
1605 descriptor,
1606 ev.data(),
1607 eigenvectors_loc,
1608 &eigenvectors->submatrix_row,
1609 &eigenvectors->submatrix_column,
1610 eigenvectors->descriptor,
1611 work.data(),
1612 &lwork,
1613 &info);
1614 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("psyev", info));
1615 }
1616 else
1617 {
1618 char cmach = compute_eigenvectors ? 'U' : 'S';
1619 plamch(&(this->grid->blacs_context), &cmach, abstol);
1620 abstol *= 2;
1621 ifail.resize(n_rows);
1622 iclustr.resize(2 * grid->n_process_rows * grid->n_process_columns);
1623 gap.resize(grid->n_process_rows * grid->n_process_columns);
1624
1625 psyevx(&jobz,
1626 &range,
1627 &uplo,
1628 &n_rows,
1629 A_loc,
1630 &submatrix_row,
1631 &submatrix_column,
1632 descriptor,
1633 &vl,
1634 &vu,
1635 &il,
1636 &iu,
1637 &abstol,
1638 &m,
1639 &nz,
1640 ev.data(),
1641 &orfac,
1642 eigenvectors_loc,
1643 &eigenvectors->submatrix_row,
1644 &eigenvectors->submatrix_column,
1645 eigenvectors->descriptor,
1646 work.data(),
1647 &lwork,
1648 iwork.data(),
1649 &liwork,
1650 ifail.data(),
1651 iclustr.data(),
1652 gap.data(),
1653 &info);
1654 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("psyevx", info));
1655 }
1656 lwork = static_cast<int>(work[0]);
1657 work.resize(lwork);
1658
1659 if (all_eigenpairs)
1660 {
1661 psyev(&jobz,
1662 &uplo,
1663 &n_rows,
1664 A_loc,
1665 &submatrix_row,
1666 &submatrix_column,
1667 descriptor,
1668 ev.data(),
1669 eigenvectors_loc,
1670 &eigenvectors->submatrix_row,
1671 &eigenvectors->submatrix_column,
1672 eigenvectors->descriptor,
1673 work.data(),
1674 &lwork,
1675 &info);
1676
1677 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("psyev", info));
1678 }
1679 else
1680 {
1681 liwork = iwork[0];
1682 AssertThrow(liwork > 0, ExcInternalError());
1683 iwork.resize(liwork);
1684
1685 psyevx(&jobz,
1686 &range,
1687 &uplo,
1688 &n_rows,
1689 A_loc,
1690 &submatrix_row,
1691 &submatrix_column,
1692 descriptor,
1693 &vl,
1694 &vu,
1695 &il,
1696 &iu,
1697 &abstol,
1698 &m,
1699 &nz,
1700 ev.data(),
1701 &orfac,
1702 eigenvectors_loc,
1703 &eigenvectors->submatrix_row,
1704 &eigenvectors->submatrix_column,
1705 eigenvectors->descriptor,
1706 work.data(),
1707 &lwork,
1708 iwork.data(),
1709 &liwork,
1710 ifail.data(),
1711 iclustr.data(),
1712 gap.data(),
1713 &info);
1714
1715 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("psyevx", info));
1716 }
1717 // if eigenvectors are queried copy eigenvectors to original matrix
1718 // as the temporary matrix eigenvectors has identical dimensions and
1719 // block-cyclic distribution we simply swap the local array
1720 if (compute_eigenvectors)
1721 this->values.swap(eigenvectors->values);
1722
1723 // adapt the size of ev to fit m upon return
1724 while (ev.size() > static_cast<size_type>(m))
1725 ev.pop_back();
1726 }
1727 /*
1728 * send number of computed eigenvalues to inactive processes
1729 */
1730 grid->send_to_inactive(&m, 1);
1731
1732 /*
1733 * inactive processes have to resize array of eigenvalues
1734 */
1735 if (!grid->mpi_process_is_active)
1736 ev.resize(m);
1737 /*
1738 * send the eigenvalues to processors not being part of the process grid
1739 */
1740 grid->send_to_inactive(ev.data(), ev.size());
1741
1742 /*
1743 * if only eigenvalues are queried the content of the matrix will be destroyed
1744 * if the eigenpairs are queried matrix A on exit stores the eigenvectors in
1745 * the columns
1746 */
1747 if (compute_eigenvectors)
1748 {
1751 }
1752 else
1754
1755 return ev;
1756}
1757
1758
1759
1760template <typename NumberType>
1761std::vector<NumberType>
1763 const std::pair<unsigned int, unsigned int> &index_limits,
1764 const bool compute_eigenvectors)
1765{
1766 // Check validity of index limits.
1767 AssertIndexRange(index_limits.first, static_cast<unsigned int>(n_rows));
1768 AssertIndexRange(index_limits.second, static_cast<unsigned int>(n_rows));
1769
1770 const std::pair<unsigned int, unsigned int> idx =
1771 std::make_pair(std::min(index_limits.first, index_limits.second),
1772 std::max(index_limits.first, index_limits.second));
1773
1774 // Compute all eigenvalues/eigenvectors.
1775 if (idx.first == 0 && idx.second == static_cast<unsigned int>(n_rows - 1))
1776 return eigenpairs_symmetric_MRRR(compute_eigenvectors);
1777 else
1778 return eigenpairs_symmetric_MRRR(compute_eigenvectors, idx);
1779}
1780
1781
1782
1783template <typename NumberType>
1784std::vector<NumberType>
1786 const std::pair<NumberType, NumberType> &value_limits,
1787 const bool compute_eigenvectors)
1788{
1789 AssertIsFinite(value_limits.first);
1790 AssertIsFinite(value_limits.second);
1791
1792 const std::pair<unsigned int, unsigned int> indices =
1793 std::make_pair(numbers::invalid_unsigned_int,
1795
1796 return eigenpairs_symmetric_MRRR(compute_eigenvectors, indices, value_limits);
1797}
1798
1799
1800
1801template <typename NumberType>
1802std::vector<NumberType>
1804 const bool compute_eigenvectors,
1805 const std::pair<unsigned int, unsigned int> &eigenvalue_idx,
1806 const std::pair<NumberType, NumberType> & eigenvalue_limits)
1807{
1809 ExcMessage(
1810 "Matrix has to be in Matrix state before calling this function."));
1811 Assert(property == LAPACKSupport::symmetric,
1812 ExcMessage("Matrix has to be symmetric for this operation."));
1813
1814 std::lock_guard<std::mutex> lock(mutex);
1815
1816 const bool use_values = (std::isnan(eigenvalue_limits.first) ||
1817 std::isnan(eigenvalue_limits.second)) ?
1818 false :
1819 true;
1820 const bool use_indices =
1821 ((eigenvalue_idx.first == numbers::invalid_unsigned_int) ||
1822 (eigenvalue_idx.second == numbers::invalid_unsigned_int)) ?
1823 false :
1824 true;
1825
1826 Assert(
1827 !(use_values && use_indices),
1828 ExcMessage(
1829 "Prescribing both the index and value range for the eigenvalues is ambiguous"));
1830
1831 // If computation of eigenvectors is not required, use a sufficiently small
1832 // distributed matrix.
1833 std::unique_ptr<ScaLAPACKMatrix<NumberType>> eigenvectors =
1834 compute_eigenvectors ?
1835 std::make_unique<ScaLAPACKMatrix<NumberType>>(n_rows,
1836 grid,
1837 row_block_size) :
1838 std::make_unique<ScaLAPACKMatrix<NumberType>>(
1839 grid->n_process_rows, grid->n_process_columns, grid, 1, 1);
1840
1841 eigenvectors->property = property;
1842 // Number of eigenvalues to be returned from psyevr; upon successful exit ev
1843 // contains the m seclected eigenvalues in ascending order.
1844 int m = n_rows;
1845 std::vector<NumberType> ev(n_rows);
1846
1847 // Number of eigenvectors to be returned;
1848 // Upon successful exit the first m=nz columns contain the selected
1849 // eigenvectors (only if jobz=='V').
1850 int nz = 0;
1851
1852 if (grid->mpi_process_is_active)
1853 {
1854 int info = 0;
1855 /*
1856 * For jobz==N only eigenvalues are computed, for jobz='V' also the
1857 * eigenvectors of the matrix are computed.
1858 */
1859 char jobz = compute_eigenvectors ? 'V' : 'N';
1860 // Default value is to compute all eigenvalues and optionally
1861 // eigenvectors.
1862 char range = 'A';
1863 NumberType vl = NumberType(), vu = NumberType();
1864 int il = 1, iu = 1;
1865
1866 // Index range for eigenvalues is not specified.
1867 if (!use_indices)
1868 {
1869 // Interval for eigenvalues is not specified and consequently all
1870 // eigenvalues/eigenpairs will be computed.
1871 if (!use_values)
1872 {
1873 range = 'A';
1874 }
1875 else
1876 {
1877 range = 'V';
1878 vl = std::min(eigenvalue_limits.first, eigenvalue_limits.second);
1879 vu = std::max(eigenvalue_limits.first, eigenvalue_limits.second);
1880 }
1881 }
1882 else
1883 {
1884 range = 'I';
1885 // As Fortran starts counting/indexing from 1 unlike C/C++, where it
1886 // starts from 0.
1887 il = std::min(eigenvalue_idx.first, eigenvalue_idx.second) + 1;
1888 iu = std::max(eigenvalue_idx.first, eigenvalue_idx.second) + 1;
1889 }
1890 NumberType *A_loc = this->values.data();
1891
1892 /*
1893 * By setting lwork to -1 a workspace query for optimal length of work is
1894 * performed.
1895 */
1896 int lwork = -1;
1897 int liwork = -1;
1898 NumberType *eigenvectors_loc =
1899 (compute_eigenvectors ? eigenvectors->values.data() : nullptr);
1900 work.resize(1);
1901 iwork.resize(1);
1902
1903 psyevr(&jobz,
1904 &range,
1905 &uplo,
1906 &n_rows,
1907 A_loc,
1908 &submatrix_row,
1909 &submatrix_column,
1910 descriptor,
1911 &vl,
1912 &vu,
1913 &il,
1914 &iu,
1915 &m,
1916 &nz,
1917 ev.data(),
1918 eigenvectors_loc,
1919 &eigenvectors->submatrix_row,
1920 &eigenvectors->submatrix_column,
1921 eigenvectors->descriptor,
1922 work.data(),
1923 &lwork,
1924 iwork.data(),
1925 &liwork,
1926 &info);
1927
1928 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("psyevr", info));
1929
1930 lwork = static_cast<int>(work[0]);
1931 work.resize(lwork);
1932 liwork = iwork[0];
1933 iwork.resize(liwork);
1934
1935 psyevr(&jobz,
1936 &range,
1937 &uplo,
1938 &n_rows,
1939 A_loc,
1940 &submatrix_row,
1941 &submatrix_column,
1942 descriptor,
1943 &vl,
1944 &vu,
1945 &il,
1946 &iu,
1947 &m,
1948 &nz,
1949 ev.data(),
1950 eigenvectors_loc,
1951 &eigenvectors->submatrix_row,
1952 &eigenvectors->submatrix_column,
1953 eigenvectors->descriptor,
1954 work.data(),
1955 &lwork,
1956 iwork.data(),
1957 &liwork,
1958 &info);
1959
1960 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("psyevr", info));
1961
1962 if (compute_eigenvectors)
1964 m == nz,
1965 ExcMessage(
1966 "psyevr failed to compute all eigenvectors for the selected eigenvalues"));
1967
1968 // If eigenvectors are queried, copy eigenvectors to original matrix.
1969 // As the temporary matrix eigenvectors has identical dimensions and
1970 // block-cyclic distribution we simply swap the local array.
1971 if (compute_eigenvectors)
1972 this->values.swap(eigenvectors->values);
1973
1974 // Adapt the size of ev to fit m upon return.
1975 while (ev.size() > static_cast<size_type>(m))
1976 ev.pop_back();
1977 }
1978 /*
1979 * Send number of computed eigenvalues to inactive processes.
1980 */
1981 grid->send_to_inactive(&m, 1);
1982
1983 /*
1984 * Inactive processes have to resize array of eigenvalues.
1985 */
1986 if (!grid->mpi_process_is_active)
1987 ev.resize(m);
1988 /*
1989 * Send the eigenvalues to processors not being part of the process grid.
1990 */
1991 grid->send_to_inactive(ev.data(), ev.size());
1992
1993 /*
1994 * If only eigenvalues are queried, the content of the matrix will be
1995 * destroyed. If the eigenpairs are queried, matrix A on exit stores the
1996 * eigenvectors in the columns.
1997 */
1998 if (compute_eigenvectors)
1999 {
2002 }
2003 else
2005
2006 return ev;
2007}
2008
2009
2010
2011template <typename NumberType>
2012std::vector<NumberType>
2015{
2017 ExcMessage(
2018 "Matrix has to be in Matrix state before calling this function."));
2019 Assert(row_block_size == column_block_size,
2020 ExcDimensionMismatch(row_block_size, column_block_size));
2021
2022 const bool left_singluar_vectors = (U != nullptr) ? true : false;
2023 const bool right_singluar_vectors = (VT != nullptr) ? true : false;
2024
2025 if (left_singluar_vectors)
2026 {
2027 Assert(n_rows == U->n_rows, ExcDimensionMismatch(n_rows, U->n_rows));
2028 Assert(U->n_rows == U->n_columns,
2030 Assert(row_block_size == U->row_block_size,
2031 ExcDimensionMismatch(row_block_size, U->row_block_size));
2032 Assert(column_block_size == U->column_block_size,
2033 ExcDimensionMismatch(column_block_size, U->column_block_size));
2034 Assert(grid->blacs_context == U->grid->blacs_context,
2035 ExcDimensionMismatch(grid->blacs_context, U->grid->blacs_context));
2036 }
2037 if (right_singluar_vectors)
2038 {
2039 Assert(n_columns == VT->n_rows,
2040 ExcDimensionMismatch(n_columns, VT->n_rows));
2041 Assert(VT->n_rows == VT->n_columns,
2043 Assert(row_block_size == VT->row_block_size,
2044 ExcDimensionMismatch(row_block_size, VT->row_block_size));
2045 Assert(column_block_size == VT->column_block_size,
2046 ExcDimensionMismatch(column_block_size, VT->column_block_size));
2047 Assert(grid->blacs_context == VT->grid->blacs_context,
2048 ExcDimensionMismatch(grid->blacs_context,
2049 VT->grid->blacs_context));
2050 }
2051 std::lock_guard<std::mutex> lock(mutex);
2052
2053 std::vector<NumberType> sv(std::min(n_rows, n_columns));
2054
2055 if (grid->mpi_process_is_active)
2056 {
2057 char jobu = left_singluar_vectors ? 'V' : 'N';
2058 char jobvt = right_singluar_vectors ? 'V' : 'N';
2059 NumberType *A_loc = this->values.data();
2060 NumberType *U_loc = left_singluar_vectors ? U->values.data() : nullptr;
2061 NumberType *VT_loc = right_singluar_vectors ? VT->values.data() : nullptr;
2062 int info = 0;
2063 /*
2064 * by setting lwork to -1 a workspace query for optimal length of work is
2065 * performed
2066 */
2067 int lwork = -1;
2068 work.resize(1);
2069
2070 pgesvd(&jobu,
2071 &jobvt,
2072 &n_rows,
2073 &n_columns,
2074 A_loc,
2075 &submatrix_row,
2076 &submatrix_column,
2077 descriptor,
2078 &*sv.begin(),
2079 U_loc,
2080 &U->submatrix_row,
2081 &U->submatrix_column,
2082 U->descriptor,
2083 VT_loc,
2084 &VT->submatrix_row,
2085 &VT->submatrix_column,
2086 VT->descriptor,
2087 work.data(),
2088 &lwork,
2089 &info);
2090 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("pgesvd", info));
2091
2092 lwork = static_cast<int>(work[0]);
2093 work.resize(lwork);
2094
2095 pgesvd(&jobu,
2096 &jobvt,
2097 &n_rows,
2098 &n_columns,
2099 A_loc,
2100 &submatrix_row,
2101 &submatrix_column,
2102 descriptor,
2103 &*sv.begin(),
2104 U_loc,
2105 &U->submatrix_row,
2106 &U->submatrix_column,
2107 U->descriptor,
2108 VT_loc,
2109 &VT->submatrix_row,
2110 &VT->submatrix_column,
2111 VT->descriptor,
2112 work.data(),
2113 &lwork,
2114 &info);
2115 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("pgesvd", info));
2116 }
2117
2118 /*
2119 * send the singular values to processors not being part of the process grid
2120 */
2121 grid->send_to_inactive(sv.data(), sv.size());
2122
2125
2126 return sv;
2127}
2128
2129
2130
2131template <typename NumberType>
2132void
2134 const bool transpose)
2135{
2136 Assert(grid == B.grid,
2137 ExcMessage("The matrices A and B need to have the same process grid"));
2139 ExcMessage(
2140 "Matrix has to be in Matrix state before calling this function."));
2142 ExcMessage(
2143 "Matrix B has to be in Matrix state before calling this function."));
2144
2145 if (transpose)
2146 {
2147 Assert(n_columns == B.n_rows, ExcDimensionMismatch(n_columns, B.n_rows));
2148 }
2149 else
2150 {
2151 Assert(n_rows == B.n_rows, ExcDimensionMismatch(n_rows, B.n_rows));
2152 }
2153
2154 // see
2155 // https://www.ibm.com/support/knowledgecenter/en/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_lgels.htm
2156 Assert(row_block_size == column_block_size,
2157 ExcMessage(
2158 "Use identical block sizes for rows and columns of matrix A"));
2160 ExcMessage(
2161 "Use identical block sizes for rows and columns of matrix B"));
2162 Assert(row_block_size == B.row_block_size,
2163 ExcMessage(
2164 "Use identical block-cyclic distribution for matrices A and B"));
2165
2166 std::lock_guard<std::mutex> lock(mutex);
2167
2168 if (grid->mpi_process_is_active)
2169 {
2170 char trans = transpose ? 'T' : 'N';
2171 NumberType *A_loc = this->values.data();
2172 NumberType *B_loc = B.values.data();
2173 int info = 0;
2174 /*
2175 * by setting lwork to -1 a workspace query for optimal length of work is
2176 * performed
2177 */
2178 int lwork = -1;
2179 work.resize(1);
2180
2181 pgels(&trans,
2182 &n_rows,
2183 &n_columns,
2184 &B.n_columns,
2185 A_loc,
2186 &submatrix_row,
2187 &submatrix_column,
2188 descriptor,
2189 B_loc,
2190 &B.submatrix_row,
2192 B.descriptor,
2193 work.data(),
2194 &lwork,
2195 &info);
2196 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("pgels", info));
2197
2198 lwork = static_cast<int>(work[0]);
2199 work.resize(lwork);
2200
2201 pgels(&trans,
2202 &n_rows,
2203 &n_columns,
2204 &B.n_columns,
2205 A_loc,
2206 &submatrix_row,
2207 &submatrix_column,
2208 descriptor,
2209 B_loc,
2210 &B.submatrix_row,
2212 B.descriptor,
2213 work.data(),
2214 &lwork,
2215 &info);
2216 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("pgels", info));
2217 }
2219}
2220
2221
2222
2223template <typename NumberType>
2224unsigned int
2226{
2228 ExcMessage(
2229 "Matrix has to be in Matrix state before calling this function."));
2230 Assert(row_block_size == column_block_size,
2231 ExcMessage(
2232 "Use identical block sizes for rows and columns of matrix A"));
2233 Assert(
2234 ratio > 0. && ratio < 1.,
2235 ExcMessage(
2236 "input parameter ratio has to be larger than zero and smaller than 1"));
2237
2239 n_rows,
2240 grid,
2241 row_block_size,
2242 row_block_size,
2244 ScaLAPACKMatrix<NumberType> VT(n_columns,
2245 n_columns,
2246 grid,
2247 row_block_size,
2248 row_block_size,
2250 std::vector<NumberType> sv = this->compute_SVD(&U, &VT);
2251 AssertThrow(sv[0] > std::numeric_limits<NumberType>::min(),
2252 ExcMessage("Matrix has rank 0"));
2253
2254 // Get number of singular values fulfilling the following: sv[i] > sv[0] *
2255 // ratio Obviously, 0-th element already satisfies sv[0] > sv[0] * ratio The
2256 // singular values in sv are ordered by descending value so we break out of
2257 // the loop if a singular value is smaller than sv[0] * ratio.
2258 unsigned int n_sv = 1;
2259 std::vector<NumberType> inv_sigma;
2260 inv_sigma.push_back(1 / sv[0]);
2261
2262 for (unsigned int i = 1; i < sv.size(); ++i)
2263 if (sv[i] > sv[0] * ratio)
2264 {
2265 ++n_sv;
2266 inv_sigma.push_back(1 / sv[i]);
2267 }
2268 else
2269 break;
2270
2271 // For the matrix multiplication we use only the columns of U and rows of VT
2272 // which are associated with singular values larger than the limit. That saves
2273 // computational time for matrices with rank significantly smaller than
2274 // min(n_rows,n_columns)
2275 ScaLAPACKMatrix<NumberType> U_R(n_rows,
2276 n_sv,
2277 grid,
2278 row_block_size,
2279 row_block_size,
2282 n_columns,
2283 grid,
2284 row_block_size,
2285 row_block_size,
2287 U.copy_to(U_R,
2288 std::make_pair(0, 0),
2289 std::make_pair(0, 0),
2290 std::make_pair(n_rows, n_sv));
2291 VT.copy_to(VT_R,
2292 std::make_pair(0, 0),
2293 std::make_pair(0, 0),
2294 std::make_pair(n_sv, n_columns));
2295
2296 VT_R.scale_rows(inv_sigma);
2297 this->reinit(n_columns,
2298 n_rows,
2299 this->grid,
2300 row_block_size,
2301 column_block_size,
2303 VT_R.mult(1, U_R, 0, *this, true, true);
2305 return n_sv;
2306}
2307
2308
2309
2310template <typename NumberType>
2311NumberType
2313 const NumberType a_norm) const
2314{
2316 ExcMessage(
2317 "Matrix has to be in Cholesky state before calling this function."));
2318 std::lock_guard<std::mutex> lock(mutex);
2319 NumberType rcond = 0.;
2320
2321 if (grid->mpi_process_is_active)
2322 {
2323 int liwork = n_local_rows;
2324 iwork.resize(liwork);
2325
2326 int info = 0;
2327 const NumberType *A_loc = this->values.data();
2328
2329 // by setting lwork to -1 a workspace query for optimal length of work is
2330 // performed
2331 int lwork = -1;
2332 work.resize(1);
2333 ppocon(&uplo,
2334 &n_columns,
2335 A_loc,
2336 &submatrix_row,
2337 &submatrix_column,
2338 descriptor,
2339 &a_norm,
2340 &rcond,
2341 work.data(),
2342 &lwork,
2343 iwork.data(),
2344 &liwork,
2345 &info);
2346 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("pdpocon", info));
2347 lwork = static_cast<int>(std::ceil(work[0]));
2348 work.resize(lwork);
2349
2350 // now the actual run:
2351 ppocon(&uplo,
2352 &n_columns,
2353 A_loc,
2354 &submatrix_row,
2355 &submatrix_column,
2356 descriptor,
2357 &a_norm,
2358 &rcond,
2359 work.data(),
2360 &lwork,
2361 iwork.data(),
2362 &liwork,
2363 &info);
2364 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("pdpocon", info));
2365 }
2366 grid->send_to_inactive(&rcond);
2367 return rcond;
2368}
2369
2370
2371
2372template <typename NumberType>
2373NumberType
2375{
2376 const char type('O');
2377
2378 if (property == LAPACKSupport::symmetric)
2379 return norm_symmetric(type);
2380 else
2381 return norm_general(type);
2382}
2383
2384
2385
2386template <typename NumberType>
2387NumberType
2389{
2390 const char type('I');
2391
2392 if (property == LAPACKSupport::symmetric)
2393 return norm_symmetric(type);
2394 else
2395 return norm_general(type);
2396}
2397
2398
2399
2400template <typename NumberType>
2401NumberType
2403{
2404 const char type('F');
2405
2406 if (property == LAPACKSupport::symmetric)
2407 return norm_symmetric(type);
2408 else
2409 return norm_general(type);
2410}
2411
2412
2413
2414template <typename NumberType>
2415NumberType
2417{
2418 Assert(state == LAPACKSupport::matrix ||
2420 ExcMessage("norms can be called in matrix state only."));
2421 std::lock_guard<std::mutex> lock(mutex);
2422 NumberType res = 0.;
2423
2424 if (grid->mpi_process_is_active)
2425 {
2426 const int iarow = indxg2p_(&submatrix_row,
2427 &row_block_size,
2428 &(grid->this_process_row),
2429 &first_process_row,
2430 &(grid->n_process_rows));
2431 const int iacol = indxg2p_(&submatrix_column,
2432 &column_block_size,
2433 &(grid->this_process_column),
2434 &first_process_column,
2435 &(grid->n_process_columns));
2436 const int mp0 = numroc_(&n_rows,
2437 &row_block_size,
2438 &(grid->this_process_row),
2439 &iarow,
2440 &(grid->n_process_rows));
2441 const int nq0 = numroc_(&n_columns,
2442 &column_block_size,
2443 &(grid->this_process_column),
2444 &iacol,
2445 &(grid->n_process_columns));
2446
2447 // type='M': compute largest absolute value
2448 // type='F' || type='E': compute Frobenius norm
2449 // type='0' || type='1': compute infinity norm
2450 int lwork = 0; // for type == 'M' || type == 'F' || type == 'E'
2451 if (type == 'O' || type == '1')
2452 lwork = nq0;
2453 else if (type == 'I')
2454 lwork = mp0;
2455
2456 work.resize(lwork);
2457 const NumberType *A_loc = this->values.begin();
2458 res = plange(&type,
2459 &n_rows,
2460 &n_columns,
2461 A_loc,
2462 &submatrix_row,
2463 &submatrix_column,
2464 descriptor,
2465 work.data());
2466 }
2467 grid->send_to_inactive(&res);
2468 return res;
2469}
2470
2471
2472
2473template <typename NumberType>
2474NumberType
2476{
2477 Assert(state == LAPACKSupport::matrix ||
2479 ExcMessage("norms can be called in matrix state only."));
2480 Assert(property == LAPACKSupport::symmetric,
2481 ExcMessage("Matrix has to be symmetric for this operation."));
2482 std::lock_guard<std::mutex> lock(mutex);
2483 NumberType res = 0.;
2484
2485 if (grid->mpi_process_is_active)
2486 {
2487 // int IROFFA = MOD( IA-1, MB_A )
2488 // int ICOFFA = MOD( JA-1, NB_A )
2489 const int lcm =
2490 ilcm_(&(grid->n_process_rows), &(grid->n_process_columns));
2491 const int v2 = lcm / (grid->n_process_rows);
2492
2493 const int IAROW = indxg2p_(&submatrix_row,
2494 &row_block_size,
2495 &(grid->this_process_row),
2496 &first_process_row,
2497 &(grid->n_process_rows));
2498 const int IACOL = indxg2p_(&submatrix_column,
2499 &column_block_size,
2500 &(grid->this_process_column),
2501 &first_process_column,
2502 &(grid->n_process_columns));
2503 const int Np0 = numroc_(&n_columns /*+IROFFA*/,
2504 &row_block_size,
2505 &(grid->this_process_row),
2506 &IAROW,
2507 &(grid->n_process_rows));
2508 const int Nq0 = numroc_(&n_columns /*+ICOFFA*/,
2509 &column_block_size,
2510 &(grid->this_process_column),
2511 &IACOL,
2512 &(grid->n_process_columns));
2513
2514 const int v1 = iceil_(&Np0, &row_block_size);
2515 const int ldw = (n_local_rows == n_local_columns) ?
2516 0 :
2517 row_block_size * iceil_(&v1, &v2);
2518
2519 const int lwork =
2520 (type == 'M' || type == 'F' || type == 'E') ? 0 : 2 * Nq0 + Np0 + ldw;
2521 work.resize(lwork);
2522 const NumberType *A_loc = this->values.begin();
2523 res = plansy(&type,
2524 &uplo,
2525 &n_columns,
2526 A_loc,
2527 &submatrix_row,
2528 &submatrix_column,
2529 descriptor,
2530 work.data());
2531 }
2532 grid->send_to_inactive(&res);
2533 return res;
2534}
2535
2536
2537
2538# ifdef DEAL_II_WITH_HDF5
2539namespace internal
2540{
2541 namespace
2542 {
2543 void
2544 create_HDF5_state_enum_id(hid_t &state_enum_id)
2545 {
2546 // create HDF5 enum type for LAPACKSupport::State
2548 state_enum_id = H5Tcreate(H5T_ENUM, sizeof(LAPACKSupport::State));
2550 herr_t status = H5Tenum_insert(state_enum_id, "cholesky", &val);
2551 AssertThrow(status >= 0, ExcInternalError());
2553 status = H5Tenum_insert(state_enum_id, "eigenvalues", &val);
2554 AssertThrow(status >= 0, ExcInternalError());
2556 status = H5Tenum_insert(state_enum_id, "inverse_matrix", &val);
2557 AssertThrow(status >= 0, ExcInternalError());
2559 status = H5Tenum_insert(state_enum_id, "inverse_svd", &val);
2560 AssertThrow(status >= 0, ExcInternalError());
2562 status = H5Tenum_insert(state_enum_id, "lu", &val);
2563 AssertThrow(status >= 0, ExcInternalError());
2565 status = H5Tenum_insert(state_enum_id, "matrix", &val);
2566 AssertThrow(status >= 0, ExcInternalError());
2568 status = H5Tenum_insert(state_enum_id, "svd", &val);
2569 AssertThrow(status >= 0, ExcInternalError());
2571 status = H5Tenum_insert(state_enum_id, "unusable", &val);
2572 AssertThrow(status >= 0, ExcInternalError());
2573 }
2574
2575 void
2576 create_HDF5_property_enum_id(hid_t &property_enum_id)
2577 {
2578 // create HDF5 enum type for LAPACKSupport::Property
2579 property_enum_id = H5Tcreate(H5T_ENUM, sizeof(LAPACKSupport::Property));
2581 herr_t status = H5Tenum_insert(property_enum_id, "diagonal", &prop);
2582 AssertThrow(status >= 0, ExcInternalError());
2584 status = H5Tenum_insert(property_enum_id, "general", &prop);
2585 AssertThrow(status >= 0, ExcInternalError());
2587 status = H5Tenum_insert(property_enum_id, "hessenberg", &prop);
2588 AssertThrow(status >= 0, ExcInternalError());
2590 status = H5Tenum_insert(property_enum_id, "lower_triangular", &prop);
2591 AssertThrow(status >= 0, ExcInternalError());
2593 status = H5Tenum_insert(property_enum_id, "symmetric", &prop);
2594 AssertThrow(status >= 0, ExcInternalError());
2596 status = H5Tenum_insert(property_enum_id, "upper_triangular", &prop);
2597 AssertThrow(status >= 0, ExcInternalError());
2598 }
2599 } // namespace
2600} // namespace internal
2601# endif
2602
2603
2604
2605template <typename NumberType>
2606void
2608 const std::string & filename,
2609 const std::pair<unsigned int, unsigned int> &chunk_size) const
2610{
2611# ifndef DEAL_II_WITH_HDF5
2612 (void)filename;
2613 (void)chunk_size;
2614 AssertThrow(false, ExcMessage("HDF5 support is disabled."));
2615# else
2616
2617 std::pair<unsigned int, unsigned int> chunks_size_ = chunk_size;
2618
2619 if (chunks_size_.first == numbers::invalid_unsigned_int ||
2620 chunks_size_.second == numbers::invalid_unsigned_int)
2621 {
2622 // default: store the matrix in chunks of columns
2623 chunks_size_.first = n_rows;
2624 chunks_size_.second = 1;
2625 }
2626 Assert(chunks_size_.first > 0,
2627 ExcMessage("The row chunk size must be larger than 0."));
2628 AssertIndexRange(chunks_size_.first, n_rows + 1);
2629 Assert(chunks_size_.second > 0,
2630 ExcMessage("The column chunk size must be larger than 0."));
2631 AssertIndexRange(chunks_size_.second, n_columns + 1);
2632
2633# ifdef H5_HAVE_PARALLEL
2634 // implementation for configurations equipped with a parallel file system
2635 save_parallel(filename, chunks_size_);
2636
2637# else
2638 // implementation for configurations with no parallel file system
2639 save_serial(filename, chunks_size_);
2640
2641# endif
2642# endif
2643}
2644
2645
2646
2647template <typename NumberType>
2648void
2650 const std::string & filename,
2651 const std::pair<unsigned int, unsigned int> &chunk_size) const
2652{
2653# ifndef DEAL_II_WITH_HDF5
2654 (void)filename;
2655 (void)chunk_size;
2656 Assert(false, ExcInternalError());
2657# else
2658
2659 /*
2660 * The content of the distributed matrix is copied to a matrix using a 1x1
2661 * process grid. Therefore, one process has all the data and can write it to a
2662 * file.
2663 *
2664 * Create a 1x1 column grid which will be used to initialize
2665 * an effectively serial ScaLAPACK matrix to gather the contents from the
2666 * current object
2667 */
2668 const auto column_grid =
2669 std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,
2670 1,
2671 1);
2672
2673 const int MB = n_rows, NB = n_columns;
2674 ScaLAPACKMatrix<NumberType> tmp(n_rows, n_columns, column_grid, MB, NB);
2675 copy_to(tmp);
2676
2677 // the 1x1 grid has only one process and this one writes
2678 // the content of the matrix to the HDF5 file
2679 if (tmp.grid->mpi_process_is_active)
2680 {
2681 herr_t status;
2682
2683 // create a new file using default properties
2684 hid_t file_id =
2685 H5Fcreate(filename.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
2686
2687 // modify dataset creation properties, i.e. enable chunking
2688 hsize_t chunk_dims[2];
2689 // revert order of rows and columns as ScaLAPACK uses column-major
2690 // ordering
2691 chunk_dims[0] = chunk_size.second;
2692 chunk_dims[1] = chunk_size.first;
2693 hid_t data_property = H5Pcreate(H5P_DATASET_CREATE);
2694 status = H5Pset_chunk(data_property, 2, chunk_dims);
2695 AssertThrow(status >= 0, ExcIO());
2696
2697 // create the data space for the dataset
2698 hsize_t dims[2];
2699 // change order of rows and columns as ScaLAPACKMatrix uses column major
2700 // ordering
2701 dims[0] = n_columns;
2702 dims[1] = n_rows;
2703 hid_t dataspace_id = H5Screate_simple(2, dims, nullptr);
2704
2705 // create the dataset within the file using chunk creation properties
2706 hid_t type_id = hdf5_type_id(tmp.values.data());
2707 hid_t dataset_id = H5Dcreate2(file_id,
2708 "/matrix",
2709 type_id,
2710 dataspace_id,
2711 H5P_DEFAULT,
2712 data_property,
2713 H5P_DEFAULT);
2714
2715 // write the dataset
2716 status = H5Dwrite(
2717 dataset_id, type_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, tmp.values.data());
2718 AssertThrow(status >= 0, ExcIO());
2719
2720 // create HDF5 enum type for LAPACKSupport::State and
2721 // LAPACKSupport::Property
2722 hid_t state_enum_id, property_enum_id;
2723 internal::create_HDF5_state_enum_id(state_enum_id);
2724 internal::create_HDF5_property_enum_id(property_enum_id);
2725
2726 // create the data space for the state enum
2727 hsize_t dims_state[1];
2728 dims_state[0] = 1;
2729 hid_t state_enum_dataspace = H5Screate_simple(1, dims_state, nullptr);
2730 // create the dataset for the state enum
2731 hid_t state_enum_dataset = H5Dcreate2(file_id,
2732 "/state",
2733 state_enum_id,
2734 state_enum_dataspace,
2735 H5P_DEFAULT,
2736 H5P_DEFAULT,
2737 H5P_DEFAULT);
2738 // write the dataset for the state enum
2739 status = H5Dwrite(state_enum_dataset,
2740 state_enum_id,
2741 H5S_ALL,
2742 H5S_ALL,
2743 H5P_DEFAULT,
2744 &state);
2745 AssertThrow(status >= 0, ExcIO());
2746
2747 // create the data space for the property enum
2748 hsize_t dims_property[1];
2749 dims_property[0] = 1;
2750 hid_t property_enum_dataspace =
2751 H5Screate_simple(1, dims_property, nullptr);
2752 // create the dataset for the property enum
2753 hid_t property_enum_dataset = H5Dcreate2(file_id,
2754 "/property",
2755 property_enum_id,
2756 property_enum_dataspace,
2757 H5P_DEFAULT,
2758 H5P_DEFAULT,
2759 H5P_DEFAULT);
2760 // write the dataset for the property enum
2761 status = H5Dwrite(property_enum_dataset,
2762 property_enum_id,
2763 H5S_ALL,
2764 H5S_ALL,
2765 H5P_DEFAULT,
2766 &property);
2767 AssertThrow(status >= 0, ExcIO());
2768
2769 // end access to the datasets and release resources used by them
2770 status = H5Dclose(dataset_id);
2771 AssertThrow(status >= 0, ExcIO());
2772 status = H5Dclose(state_enum_dataset);
2773 AssertThrow(status >= 0, ExcIO());
2774 status = H5Dclose(property_enum_dataset);
2775 AssertThrow(status >= 0, ExcIO());
2776
2777 // terminate access to the data spaces
2778 status = H5Sclose(dataspace_id);
2779 AssertThrow(status >= 0, ExcIO());
2780 status = H5Sclose(state_enum_dataspace);
2781 AssertThrow(status >= 0, ExcIO());
2782 status = H5Sclose(property_enum_dataspace);
2783 AssertThrow(status >= 0, ExcIO());
2784
2785 // release enum data types
2786 status = H5Tclose(state_enum_id);
2787 AssertThrow(status >= 0, ExcIO());
2788 status = H5Tclose(property_enum_id);
2789 AssertThrow(status >= 0, ExcIO());
2790
2791 // release the creation property
2792 status = H5Pclose(data_property);
2793 AssertThrow(status >= 0, ExcIO());
2794
2795 // close the file.
2796 status = H5Fclose(file_id);
2797 AssertThrow(status >= 0, ExcIO());
2798 }
2799# endif
2800}
2801
2802
2803
2804template <typename NumberType>
2805void
2807 const std::string & filename,
2808 const std::pair<unsigned int, unsigned int> &chunk_size) const
2809{
2810# ifndef DEAL_II_WITH_HDF5
2811 (void)filename;
2812 (void)chunk_size;
2813 Assert(false, ExcInternalError());
2814# else
2815
2816 const unsigned int n_mpi_processes(
2817 Utilities::MPI::n_mpi_processes(this->grid->mpi_communicator));
2818 MPI_Info info = MPI_INFO_NULL;
2819 /*
2820 * The content of the distributed matrix is copied to a matrix using a
2821 * 1xn_processes process grid. Therefore, the processes hold contiguous chunks
2822 * of the matrix, which they can write to the file
2823 *
2824 * Create a 1xn_processes column grid
2825 */
2826 const auto column_grid =
2827 std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,
2828 1,
2829 n_mpi_processes);
2830
2831 const int MB = n_rows;
2832 /*
2833 * If the ratio n_columns/n_mpi_processes is smaller than the column block
2834 * size of the original matrix, the redistribution and saving of the matrix
2835 * requires a significant amount of MPI communication. Therefore, it is better
2836 * to set a minimum value for the block size NB, causing only
2837 * ceil(n_columns/NB) processes being actively involved in saving the matrix.
2838 * Example: A 2*10^9 x 400 matrix is distributed on a 80 x 5 process grid
2839 * using block size 32. Instead of distributing the matrix on a 1 x 400
2840 * process grid with a row block size of 2*10^9 and a column block size of 1,
2841 * the minimum value for NB yields that only ceil(400/32)=13 processes will be
2842 * writing the matrix to disk.
2843 */
2844 const int NB = std::max(static_cast<int>(std::ceil(
2845 static_cast<double>(n_columns) / n_mpi_processes)),
2846 column_block_size);
2847
2848 ScaLAPACKMatrix<NumberType> tmp(n_rows, n_columns, column_grid, MB, NB);
2849 copy_to(tmp);
2850
2851 // get pointer to data held by the process
2852 NumberType *data = (tmp.values.size() > 0) ? tmp.values.data() : nullptr;
2853
2854 herr_t status;
2855 // dataset dimensions
2856 hsize_t dims[2];
2857
2858 // set up file access property list with parallel I/O access
2859 hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
2860 status = H5Pset_fapl_mpio(plist_id, tmp.grid->mpi_communicator, info);
2861 AssertThrow(status >= 0, ExcIO());
2862
2863 // create a new file collectively and release property list identifier
2864 hid_t file_id =
2865 H5Fcreate(filename.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, plist_id);
2866 status = H5Pclose(plist_id);
2867 AssertThrow(status >= 0, ExcIO());
2868
2869 // As ScaLAPACK, and therefore the class ScaLAPACKMatrix, uses column-major
2870 // ordering but HDF5 row-major ordering, we have to reverse entries related to
2871 // columns and rows in the following. create the dataspace for the dataset
2872 dims[0] = tmp.n_columns;
2873 dims[1] = tmp.n_rows;
2874
2875 hid_t filespace = H5Screate_simple(2, dims, nullptr);
2876
2877 // create the chunked dataset with default properties and close filespace
2878 hsize_t chunk_dims[2];
2879 // revert order of rows and columns as ScaLAPACK uses column-major ordering
2880 chunk_dims[0] = chunk_size.second;
2881 chunk_dims[1] = chunk_size.first;
2882 plist_id = H5Pcreate(H5P_DATASET_CREATE);
2883 H5Pset_chunk(plist_id, 2, chunk_dims);
2884 hid_t type_id = hdf5_type_id(data);
2885 hid_t dset_id = H5Dcreate2(
2886 file_id, "/matrix", type_id, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
2887
2888 status = H5Sclose(filespace);
2889 AssertThrow(status >= 0, ExcIO());
2890
2891 status = H5Pclose(plist_id);
2892 AssertThrow(status >= 0, ExcIO());
2893
2894 // gather the number of local rows and columns from all processes
2895 std::vector<int> proc_n_local_rows(n_mpi_processes),
2896 proc_n_local_columns(n_mpi_processes);
2897 int ierr = MPI_Allgather(&tmp.n_local_rows,
2898 1,
2899 MPI_INT,
2900 proc_n_local_rows.data(),
2901 1,
2902 MPI_INT,
2903 tmp.grid->mpi_communicator);
2904 AssertThrowMPI(ierr);
2905 ierr = MPI_Allgather(&tmp.n_local_columns,
2906 1,
2907 MPI_INT,
2908 proc_n_local_columns.data(),
2909 1,
2910 MPI_INT,
2911 tmp.grid->mpi_communicator);
2912 AssertThrowMPI(ierr);
2913
2914 const unsigned int my_rank(
2915 Utilities::MPI::this_mpi_process(tmp.grid->mpi_communicator));
2916
2917 // hyperslab selection parameters
2918 // each process defines dataset in memory and writes it to the hyperslab in
2919 // the file
2920 hsize_t count[2];
2921 count[0] = tmp.n_local_columns;
2922 count[1] = tmp.n_rows;
2923 hid_t memspace = H5Screate_simple(2, count, nullptr);
2924
2925 hsize_t offset[2] = {0};
2926 for (unsigned int i = 0; i < my_rank; ++i)
2927 offset[0] += proc_n_local_columns[i];
2928
2929 // select hyperslab in the file.
2930 filespace = H5Dget_space(dset_id);
2931 status = H5Sselect_hyperslab(
2932 filespace, H5S_SELECT_SET, offset, nullptr, count, nullptr);
2933 AssertThrow(status >= 0, ExcIO());
2934
2935 // create property list for independent dataset write
2936 plist_id = H5Pcreate(H5P_DATASET_XFER);
2937 status = H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_INDEPENDENT);
2938 AssertThrow(status >= 0, ExcIO());
2939
2940 // process with no data will not participate in writing to the file
2941 if (tmp.values.size() > 0)
2942 {
2943 status = H5Dwrite(dset_id, type_id, memspace, filespace, plist_id, data);
2944 AssertThrow(status >= 0, ExcIO());
2945 }
2946 // close/release sources
2947 status = H5Dclose(dset_id);
2948 AssertThrow(status >= 0, ExcIO());
2949 status = H5Sclose(filespace);
2950 AssertThrow(status >= 0, ExcIO());
2951 status = H5Sclose(memspace);
2952 AssertThrow(status >= 0, ExcIO());
2953 status = H5Pclose(plist_id);
2954 AssertThrow(status >= 0, ExcIO());
2955 status = H5Fclose(file_id);
2956 AssertThrow(status >= 0, ExcIO());
2957
2958 // before writing the state and property to file wait for
2959 // all processes to finish writing the matrix content to the file
2960 ierr = MPI_Barrier(tmp.grid->mpi_communicator);
2961 AssertThrowMPI(ierr);
2962
2963 // only root process will write state and property to the file
2964 if (tmp.grid->this_mpi_process == 0)
2965 {
2966 // open file using default properties
2967 hid_t file_id_reopen =
2968 H5Fopen(filename.c_str(), H5F_ACC_RDWR, H5P_DEFAULT);
2969
2970 // create HDF5 enum type for LAPACKSupport::State and
2971 // LAPACKSupport::Property
2972 hid_t state_enum_id, property_enum_id;
2973 internal::create_HDF5_state_enum_id(state_enum_id);
2974 internal::create_HDF5_property_enum_id(property_enum_id);
2975
2976 // create the data space for the state enum
2977 hsize_t dims_state[1];
2978 dims_state[0] = 1;
2979 hid_t state_enum_dataspace = H5Screate_simple(1, dims_state, nullptr);
2980 // create the dataset for the state enum
2981 hid_t state_enum_dataset = H5Dcreate2(file_id_reopen,
2982 "/state",
2983 state_enum_id,
2984 state_enum_dataspace,
2985 H5P_DEFAULT,
2986 H5P_DEFAULT,
2987 H5P_DEFAULT);
2988 // write the dataset for the state enum
2989 status = H5Dwrite(state_enum_dataset,
2990 state_enum_id,
2991 H5S_ALL,
2992 H5S_ALL,
2993 H5P_DEFAULT,
2994 &state);
2995 AssertThrow(status >= 0, ExcIO());
2996
2997 // create the data space for the property enum
2998 hsize_t dims_property[1];
2999 dims_property[0] = 1;
3000 hid_t property_enum_dataspace =
3001 H5Screate_simple(1, dims_property, nullptr);
3002 // create the dataset for the property enum
3003 hid_t property_enum_dataset = H5Dcreate2(file_id_reopen,
3004 "/property",
3005 property_enum_id,
3006 property_enum_dataspace,
3007 H5P_DEFAULT,
3008 H5P_DEFAULT,
3009 H5P_DEFAULT);
3010 // write the dataset for the property enum
3011 status = H5Dwrite(property_enum_dataset,
3012 property_enum_id,
3013 H5S_ALL,
3014 H5S_ALL,
3015 H5P_DEFAULT,
3016 &property);
3017 AssertThrow(status >= 0, ExcIO());
3018
3019 status = H5Dclose(state_enum_dataset);
3020 AssertThrow(status >= 0, ExcIO());
3021 status = H5Dclose(property_enum_dataset);
3022 AssertThrow(status >= 0, ExcIO());
3023 status = H5Sclose(state_enum_dataspace);
3024 AssertThrow(status >= 0, ExcIO());
3025 status = H5Sclose(property_enum_dataspace);
3026 AssertThrow(status >= 0, ExcIO());
3027 status = H5Tclose(state_enum_id);
3028 AssertThrow(status >= 0, ExcIO());
3029 status = H5Tclose(property_enum_id);
3030 AssertThrow(status >= 0, ExcIO());
3031 status = H5Fclose(file_id_reopen);
3032 AssertThrow(status >= 0, ExcIO());
3033 }
3034
3035# endif
3036}
3037
3038
3039
3040template <typename NumberType>
3041void
3042ScaLAPACKMatrix<NumberType>::load(const std::string &filename)
3043{
3044# ifndef DEAL_II_WITH_HDF5
3045 (void)filename;
3046 AssertThrow(false, ExcMessage("HDF5 support is disabled."));
3047# else
3048# ifdef H5_HAVE_PARALLEL
3049 // implementation for configurations equipped with a parallel file system
3050 load_parallel(filename);
3051
3052# else
3053 // implementation for configurations with no parallel file system
3054 load_serial(filename);
3055# endif
3056# endif
3057}
3058
3059
3060
3061template <typename NumberType>
3062void
3064{
3065# ifndef DEAL_II_WITH_HDF5
3066 (void)filename;
3067 Assert(false, ExcInternalError());
3068# else
3069
3070 /*
3071 * The content of the distributed matrix is copied to a matrix using a 1x1
3072 * process grid. Therefore, one process has all the data and can write it to a
3073 * file
3074 */
3075 // create a 1xP column grid with P being the number of MPI processes
3076 const auto one_grid =
3077 std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,
3078 1,
3079 1);
3080
3081 const int MB = n_rows, NB = n_columns;
3082 ScaLAPACKMatrix<NumberType> tmp(n_rows, n_columns, one_grid, MB, NB);
3083
3084 int state_int = -1;
3085 int property_int = -1;
3086
3087 // the 1x1 grid has only one process and this one reads
3088 // the content of the matrix from the HDF5 file
3089 if (tmp.grid->mpi_process_is_active)
3090 {
3091 herr_t status;
3092
3093 // open the file in read-only mode
3094 hid_t file_id = H5Fopen(filename.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT);
3095
3096 // open the dataset in the file
3097 hid_t dataset_id = H5Dopen2(file_id, "/matrix", H5P_DEFAULT);
3098
3099 // check the datatype of the data in the file
3100 // datatype of source and destination must have the same class
3101 // see HDF User's Guide: 6.10. Data Transfer: Datatype Conversion and
3102 // Selection
3103 hid_t datatype = H5Dget_type(dataset_id);
3104 H5T_class_t t_class_in = H5Tget_class(datatype);
3105 H5T_class_t t_class = H5Tget_class(hdf5_type_id(tmp.values.data()));
3107 t_class_in == t_class,
3108 ExcMessage(
3109 "The data type of the matrix to be read does not match the archive"));
3110
3111 // get dataspace handle
3112 hid_t dataspace_id = H5Dget_space(dataset_id);
3113 // get number of dimensions
3114 const int ndims = H5Sget_simple_extent_ndims(dataspace_id);
3115 AssertThrow(ndims == 2, ExcIO());
3116 // get every dimension
3117 hsize_t dims[2];
3118 H5Sget_simple_extent_dims(dataspace_id, dims, nullptr);
3120 static_cast<int>(dims[0]) == n_columns,
3121 ExcMessage(
3122 "The number of columns of the matrix does not match the content of the archive"));
3124 static_cast<int>(dims[1]) == n_rows,
3125 ExcMessage(
3126 "The number of rows of the matrix does not match the content of the archive"));
3127
3128 // read data
3129 status = H5Dread(dataset_id,
3130 hdf5_type_id(tmp.values.data()),
3131 H5S_ALL,
3132 H5S_ALL,
3133 H5P_DEFAULT,
3134 tmp.values.data());
3135 AssertThrow(status >= 0, ExcIO());
3136
3137 // create HDF5 enum type for LAPACKSupport::State and
3138 // LAPACKSupport::Property
3139 hid_t state_enum_id, property_enum_id;
3140 internal::create_HDF5_state_enum_id(state_enum_id);
3141 internal::create_HDF5_property_enum_id(property_enum_id);
3142
3143 // open the datasets for the state and property enum in the file
3144 hid_t dataset_state_id = H5Dopen2(file_id, "/state", H5P_DEFAULT);
3145 hid_t datatype_state = H5Dget_type(dataset_state_id);
3146 H5T_class_t t_class_state = H5Tget_class(datatype_state);
3147 AssertThrow(t_class_state == H5T_ENUM, ExcIO());
3148
3149 hid_t dataset_property_id = H5Dopen2(file_id, "/property", H5P_DEFAULT);
3150 hid_t datatype_property = H5Dget_type(dataset_property_id);
3151 H5T_class_t t_class_property = H5Tget_class(datatype_property);
3152 AssertThrow(t_class_property == H5T_ENUM, ExcIO());
3153
3154 // get dataspace handles
3155 hid_t dataspace_state = H5Dget_space(dataset_state_id);
3156 hid_t dataspace_property = H5Dget_space(dataset_property_id);
3157 // get number of dimensions
3158 const int ndims_state = H5Sget_simple_extent_ndims(dataspace_state);
3159 AssertThrow(ndims_state == 1, ExcIO());
3160 const int ndims_property = H5Sget_simple_extent_ndims(dataspace_property);
3161 AssertThrow(ndims_property == 1, ExcIO());
3162 // get every dimension
3163 hsize_t dims_state[1];
3164 H5Sget_simple_extent_dims(dataspace_state, dims_state, nullptr);
3165 AssertThrow(static_cast<int>(dims_state[0]) == 1, ExcIO());
3166 hsize_t dims_property[1];
3167 H5Sget_simple_extent_dims(dataspace_property, dims_property, nullptr);
3168 AssertThrow(static_cast<int>(dims_property[0]) == 1, ExcIO());
3169
3170 // read data
3171 status = H5Dread(dataset_state_id,
3172 state_enum_id,
3173 H5S_ALL,
3174 H5S_ALL,
3175 H5P_DEFAULT,
3176 &tmp.state);
3177 AssertThrow(status >= 0, ExcIO());
3178 // To send the state from the root process to the other processes
3179 // the state enum is casted to an integer, that will be broadcasted and
3180 // subsequently casted back to the enum type
3181 state_int = static_cast<int>(tmp.state);
3182
3183 status = H5Dread(dataset_property_id,
3184 property_enum_id,
3185 H5S_ALL,
3186 H5S_ALL,
3187 H5P_DEFAULT,
3188 &tmp.property);
3189 AssertThrow(status >= 0, ExcIO());
3190 // To send the property from the root process to the other processes
3191 // the state enum is casted to an integer, that will be broadcasted and
3192 // subsequently casted back to the enum type
3193 property_int = static_cast<int>(tmp.property);
3194
3195 // terminate access to the data spaces
3196 status = H5Sclose(dataspace_id);
3197 AssertThrow(status >= 0, ExcIO());
3198 status = H5Sclose(dataspace_state);
3199 AssertThrow(status >= 0, ExcIO());
3200 status = H5Sclose(dataspace_property);
3201 AssertThrow(status >= 0, ExcIO());
3202
3203 // release data type handles
3204 status = H5Tclose(datatype);
3205 AssertThrow(status >= 0, ExcIO());
3206 status = H5Tclose(state_enum_id);
3207 AssertThrow(status >= 0, ExcIO());
3208 status = H5Tclose(property_enum_id);
3209 AssertThrow(status >= 0, ExcIO());
3210
3211 // end access to the data sets and release resources used by them
3212 status = H5Dclose(dataset_state_id);
3213 AssertThrow(status >= 0, ExcIO());
3214 status = H5Dclose(dataset_id);
3215 AssertThrow(status >= 0, ExcIO());
3216 status = H5Dclose(dataset_property_id);
3217 AssertThrow(status >= 0, ExcIO());
3218
3219 // close the file.
3220 status = H5Fclose(file_id);
3221 AssertThrow(status >= 0, ExcIO());
3222 }
3223 // so far only the root process has the correct state integer --> broadcasting
3224 tmp.grid->send_to_inactive(&state_int, 1);
3225 // so far only the root process has the correct property integer -->
3226 // broadcasting
3227 tmp.grid->send_to_inactive(&property_int, 1);
3228
3229 tmp.state = static_cast<LAPACKSupport::State>(state_int);
3230 tmp.property = static_cast<LAPACKSupport::Property>(property_int);
3231
3232 tmp.copy_to(*this);
3233
3234# endif // DEAL_II_WITH_HDF5
3235}
3236
3237
3238
3239template <typename NumberType>
3240void
3242{
3243# ifndef DEAL_II_WITH_HDF5
3244 (void)filename;
3245 Assert(false, ExcInternalError());
3246# else
3247# ifndef H5_HAVE_PARALLEL
3248 Assert(false, ExcInternalError());
3249# else
3250
3251 const unsigned int n_mpi_processes(
3252 Utilities::MPI::n_mpi_processes(this->grid->mpi_communicator));
3253 MPI_Info info = MPI_INFO_NULL;
3254 /*
3255 * The content of the distributed matrix is copied to a matrix using a
3256 * 1xn_processes process grid. Therefore, the processes hold contiguous chunks
3257 * of the matrix, which they can write to the file
3258 */
3259 // create a 1xP column grid with P being the number of MPI processes
3260 const auto column_grid =
3261 std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,
3262 1,
3263 n_mpi_processes);
3264
3265 const int MB = n_rows;
3266 // for the choice of NB see explanation in save_parallel()
3267 const int NB = std::max(static_cast<int>(std::ceil(
3268 static_cast<double>(n_columns) / n_mpi_processes)),
3269 column_block_size);
3270
3271 ScaLAPACKMatrix<NumberType> tmp(n_rows, n_columns, column_grid, MB, NB);
3272
3273 // get pointer to data held by the process
3274 NumberType *data = (tmp.values.size() > 0) ? tmp.values.data() : nullptr;
3275
3276 herr_t status;
3277
3278 // set up file access property list with parallel I/O access
3279 hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
3280 status = H5Pset_fapl_mpio(plist_id, tmp.grid->mpi_communicator, info);
3281 AssertThrow(status >= 0, ExcIO());
3282
3283 // open file collectively in read-only mode and release property list
3284 // identifier
3285 hid_t file_id = H5Fopen(filename.c_str(), H5F_ACC_RDONLY, plist_id);
3286 status = H5Pclose(plist_id);
3287 AssertThrow(status >= 0, ExcIO());
3288
3289 // open the dataset in the file collectively
3290 hid_t dataset_id = H5Dopen2(file_id, "/matrix", H5P_DEFAULT);
3291
3292 // check the datatype of the dataset in the file
3293 // if the classes of type of the dataset and the matrix do not match abort
3294 // see HDF User's Guide: 6.10. Data Transfer: Datatype Conversion and
3295 // Selection
3296 hid_t datatype = hdf5_type_id(data);
3297 hid_t datatype_inp = H5Dget_type(dataset_id);
3298 H5T_class_t t_class_inp = H5Tget_class(datatype_inp);
3299 H5T_class_t t_class = H5Tget_class(datatype);
3301 t_class_inp == t_class,
3302 ExcMessage(
3303 "The data type of the matrix to be read does not match the archive"));
3304
3305 // get the dimensions of the matrix stored in the file
3306 // get dataspace handle
3307 hid_t dataspace_id = H5Dget_space(dataset_id);
3308 // get number of dimensions
3309 const int ndims = H5Sget_simple_extent_ndims(dataspace_id);
3310 AssertThrow(ndims == 2, ExcIO());
3311 // get every dimension
3312 hsize_t dims[2];
3313 status = H5Sget_simple_extent_dims(dataspace_id, dims, nullptr);
3314 AssertThrow(status >= 0, ExcIO());
3316 static_cast<int>(dims[0]) == n_columns,
3317 ExcMessage(
3318 "The number of columns of the matrix does not match the content of the archive"));
3320 static_cast<int>(dims[1]) == n_rows,
3321 ExcMessage(
3322 "The number of rows of the matrix does not match the content of the archive"));
3323
3324 // gather the number of local rows and columns from all processes
3325 std::vector<int> proc_n_local_rows(n_mpi_processes),
3326 proc_n_local_columns(n_mpi_processes);
3327 int ierr = MPI_Allgather(&tmp.n_local_rows,
3328 1,
3329 MPI_INT,
3330 proc_n_local_rows.data(),
3331 1,
3332 MPI_INT,
3333 tmp.grid->mpi_communicator);
3334 AssertThrowMPI(ierr);
3335 ierr = MPI_Allgather(&tmp.n_local_columns,
3336 1,
3337 MPI_INT,
3338 proc_n_local_columns.data(),
3339 1,
3340 MPI_INT,
3341 tmp.grid->mpi_communicator);
3342 AssertThrowMPI(ierr);
3343
3344 const unsigned int my_rank(
3345 Utilities::MPI::this_mpi_process(tmp.grid->mpi_communicator));
3346
3347 // hyperslab selection parameters
3348 // each process defines dataset in memory and writes it to the hyperslab in
3349 // the file
3350 hsize_t count[2];
3351 count[0] = tmp.n_local_columns;
3352 count[1] = tmp.n_local_rows;
3353
3354 hsize_t offset[2] = {0};
3355 for (unsigned int i = 0; i < my_rank; ++i)
3356 offset[0] += proc_n_local_columns[i];
3357
3358 // select hyperslab in the file
3359 status = H5Sselect_hyperslab(
3360 dataspace_id, H5S_SELECT_SET, offset, nullptr, count, nullptr);
3361 AssertThrow(status >= 0, ExcIO());
3362
3363 // create a memory dataspace independently
3364 hid_t memspace = H5Screate_simple(2, count, nullptr);
3365
3366 // read data independently
3367 status =
3368 H5Dread(dataset_id, datatype, memspace, dataspace_id, H5P_DEFAULT, data);
3369 AssertThrow(status >= 0, ExcIO());
3370
3371 // create HDF5 enum type for LAPACKSupport::State and LAPACKSupport::Property
3372 hid_t state_enum_id, property_enum_id;
3373 internal::create_HDF5_state_enum_id(state_enum_id);
3374 internal::create_HDF5_property_enum_id(property_enum_id);
3375
3376 // open the datasets for the state and property enum in the file
3377 hid_t dataset_state_id = H5Dopen2(file_id, "/state", H5P_DEFAULT);
3378 hid_t datatype_state = H5Dget_type(dataset_state_id);
3379 H5T_class_t t_class_state = H5Tget_class(datatype_state);
3380 AssertThrow(t_class_state == H5T_ENUM, ExcIO());
3381
3382 hid_t dataset_property_id = H5Dopen2(file_id, "/property", H5P_DEFAULT);
3383 hid_t datatype_property = H5Dget_type(dataset_property_id);
3384 H5T_class_t t_class_property = H5Tget_class(datatype_property);
3385 AssertThrow(t_class_property == H5T_ENUM, ExcIO());
3386
3387 // get dataspace handles
3388 hid_t dataspace_state = H5Dget_space(dataset_state_id);
3389 hid_t dataspace_property = H5Dget_space(dataset_property_id);
3390 // get number of dimensions
3391 const int ndims_state = H5Sget_simple_extent_ndims(dataspace_state);
3392 AssertThrow(ndims_state == 1, ExcIO());
3393 const int ndims_property = H5Sget_simple_extent_ndims(dataspace_property);
3394 AssertThrow(ndims_property == 1, ExcIO());
3395 // get every dimension
3396 hsize_t dims_state[1];
3397 H5Sget_simple_extent_dims(dataspace_state, dims_state, nullptr);
3398 AssertThrow(static_cast<int>(dims_state[0]) == 1, ExcIO());
3399 hsize_t dims_property[1];
3400 H5Sget_simple_extent_dims(dataspace_property, dims_property, nullptr);
3401 AssertThrow(static_cast<int>(dims_property[0]) == 1, ExcIO());
3402
3403 // read data
3404 status = H5Dread(
3405 dataset_state_id, state_enum_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, &tmp.state);
3406 AssertThrow(status >= 0, ExcIO());
3407
3408 status = H5Dread(dataset_property_id,
3409 property_enum_id,
3410 H5S_ALL,
3411 H5S_ALL,
3412 H5P_DEFAULT,
3413 &tmp.property);
3414 AssertThrow(status >= 0, ExcIO());
3415
3416 // close/release sources
3417 status = H5Sclose(memspace);
3418 AssertThrow(status >= 0, ExcIO());
3419 status = H5Dclose(dataset_id);
3420 AssertThrow(status >= 0, ExcIO());
3421 status = H5Dclose(dataset_state_id);
3422 AssertThrow(status >= 0, ExcIO());
3423 status = H5Dclose(dataset_property_id);
3424 AssertThrow(status >= 0, ExcIO());
3425 status = H5Sclose(dataspace_id);
3426 AssertThrow(status >= 0, ExcIO());
3427 status = H5Sclose(dataspace_state);
3428 AssertThrow(status >= 0, ExcIO());
3429 status = H5Sclose(dataspace_property);
3430 AssertThrow(status >= 0, ExcIO());
3431 // status = H5Tclose(datatype);
3432 // AssertThrow(status >= 0, ExcIO());
3433 status = H5Tclose(state_enum_id);
3434 AssertThrow(status >= 0, ExcIO());
3435 status = H5Tclose(property_enum_id);
3436 AssertThrow(status >= 0, ExcIO());
3437 status = H5Fclose(file_id);
3438 AssertThrow(status >= 0, ExcIO());
3439
3440 // copying the distributed matrices
3441 tmp.copy_to(*this);
3442
3443# endif // H5_HAVE_PARALLEL
3444# endif // DEAL_II_WITH_HDF5
3445}
3446
3447
3448
3449namespace internal
3450{
3451 namespace
3452 {
3453 template <typename NumberType>
3454 void
3455 scale_columns(ScaLAPACKMatrix<NumberType> & matrix,
3456 const ArrayView<const NumberType> &factors)
3457 {
3458 Assert(matrix.n() == factors.size(),
3459 ExcDimensionMismatch(matrix.n(), factors.size()));
3460
3461 for (unsigned int i = 0; i < matrix.local_n(); ++i)
3462 {
3463 const NumberType s = factors[matrix.global_column(i)];
3464
3465 for (unsigned int j = 0; j < matrix.local_m(); ++j)
3466 matrix.local_el(j, i) *= s;
3467 }
3468 }
3469
3470 template <typename NumberType>
3471 void
3472 scale_rows(ScaLAPACKMatrix<NumberType> & matrix,
3473 const ArrayView<const NumberType> &factors)
3474 {
3475 Assert(matrix.m() == factors.size(),
3476 ExcDimensionMismatch(matrix.m(), factors.size()));
3477
3478 for (unsigned int i = 0; i < matrix.local_m(); ++i)
3479 {
3480 const NumberType s = factors[matrix.global_row(i)];
3481
3482 for (unsigned int j = 0; j < matrix.local_n(); ++j)
3483 matrix.local_el(i, j) *= s;
3484 }
3485 }
3486
3487 } // namespace
3488} // namespace internal
3489
3490
3491
3492template <typename NumberType>
3493template <class InputVector>
3494void
3496{
3497 if (this->grid->mpi_process_is_active)
3498 internal::scale_columns(*this, make_array_view(factors));
3499}
3500
3501
3502
3503template <typename NumberType>
3504template <class InputVector>
3505void
3507{
3508 if (this->grid->mpi_process_is_active)
3509 internal::scale_rows(*this, make_array_view(factors));
3510}
3511
3512
3513
3514// instantiations
3515# include "scalapack.inst"
3516
3517
3519
3520#endif // DEAL_II_WITH_SCALAPACK
ArrayView< typename std::remove_reference< typename std::iterator_traits< Iterator >::reference >::type, MemorySpaceType > make_array_view(const Iterator begin, const Iterator end)
Definition: array_view.h:699
std::size_t size() const
Definition: array_view.h:576
size_type m() const
size_type n() const
int descriptor[9]
Definition: scalapack.h:935
std::vector< NumberType > compute_SVD(ScaLAPACKMatrix< NumberType > *U=nullptr, ScaLAPACKMatrix< NumberType > *VT=nullptr)
Definition: scalapack.cc:2013
std::vector< NumberType > eigenpairs_symmetric_by_value(const std::pair< NumberType, NumberType > &value_limits, const bool compute_eigenvectors)
Definition: scalapack.cc:1452
NumberType frobenius_norm() const
Definition: scalapack.cc:2402
unsigned int pseudoinverse(const NumberType ratio)
Definition: scalapack.cc:2225
std::vector< NumberType > eigenpairs_symmetric_by_value_MRRR(const std::pair< NumberType, NumberType > &value_limits, const bool compute_eigenvectors)
Definition: scalapack.cc:1785
void copy_from(const LAPACKFullMatrix< NumberType > &matrix, const unsigned int rank)
Definition: scalapack.cc:362
void save_parallel(const std::string &filename, const std::pair< unsigned int, unsigned int > &chunk_size) const
Definition: scalapack.cc:2806
void least_squares(ScaLAPACKMatrix< NumberType > &B, const bool transpose=false)
Definition: scalapack.cc:2133
ScaLAPACKMatrix< NumberType > & operator=(const FullMatrix< NumberType > &)
Definition: scalapack.cc:331
void Tadd(const NumberType b, const ScaLAPACKMatrix< NumberType > &B)
Definition: scalapack.cc:1056
void mTmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
Definition: scalapack.cc:1211
void add(const ScaLAPACKMatrix< NumberType > &B, const NumberType a=0., const NumberType b=1., const bool transpose_B=false)
Definition: scalapack.cc:990
LAPACKSupport::State get_state() const
Definition: scalapack.cc:322
LAPACKSupport::Property get_property() const
Definition: scalapack.cc:313
int column_block_size
Definition: scalapack.h:920
void Tmmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
Definition: scalapack.cc:1197
std::vector< NumberType > eigenpairs_symmetric_MRRR(const bool compute_eigenvectors, const std::pair< unsigned int, unsigned int > &index_limits=std::make_pair(numbers::invalid_unsigned_int, numbers::invalid_unsigned_int), const std::pair< NumberType, NumberType > &value_limits=std::make_pair(std::numeric_limits< NumberType >::quiet_NaN(), std::numeric_limits< NumberType >::quiet_NaN()))
Definition: scalapack.cc:1803
void scale_rows(const InputVector &factors)
Definition: scalapack.cc:3506
std::shared_ptr< const Utilities::MPI::ProcessGrid > grid
Definition: scalapack.h:900
ScaLAPACKMatrix(const size_type n_rows, const size_type n_columns, const std::shared_ptr< const Utilities::MPI::ProcessGrid > &process_grid, const size_type row_block_size=32, const size_type column_block_size=32, const LAPACKSupport::Property property=LAPACKSupport::Property::general)
Definition: scalapack.cc:80
void load(const std::string &filename)
Definition: scalapack.cc:3042
const int submatrix_column
Definition: scalapack.h:981
const int submatrix_row
Definition: scalapack.h:975
void mmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
Definition: scalapack.cc:1183
std::vector< NumberType > eigenpairs_symmetric(const bool compute_eigenvectors, const std::pair< unsigned int, unsigned int > &index_limits=std::make_pair(numbers::invalid_unsigned_int, numbers::invalid_unsigned_int), const std::pair< NumberType, NumberType > &value_limits=std::make_pair(std::numeric_limits< NumberType >::quiet_NaN(), std::numeric_limits< NumberType >::quiet_NaN()))
Definition: scalapack.cc:1472
void save_serial(const std::string &filename, const std::pair< unsigned int, unsigned int > &chunk_size) const
Definition: scalapack.cc:2649
NumberType norm_general(const char type) const
Definition: scalapack.cc:2416
void save(const std::string &filename, const std::pair< unsigned int, unsigned int > &chunk_size=std::make_pair(numbers::invalid_unsigned_int, numbers::invalid_unsigned_int)) const
Definition: scalapack.cc:2607
void load_parallel(const std::string &filename)
Definition: scalapack.cc:3241
NumberType l1_norm() const
Definition: scalapack.cc:2374
void compute_lu_factorization()
Definition: scalapack.cc:1272
NumberType norm_symmetric(const char type) const
Definition: scalapack.cc:2475
void mult(const NumberType b, const ScaLAPACKMatrix< NumberType > &B, const NumberType c, ScaLAPACKMatrix< NumberType > &C, const bool transpose_A=false, const bool transpose_B=false) const
Definition: scalapack.cc:1066
LAPACKSupport::Property property
Definition: scalapack.h:893
void set_property(const LAPACKSupport::Property property)
Definition: scalapack.cc:303
void reinit(const size_type n_rows, const size_type n_columns, const std::shared_ptr< const Utilities::MPI::ProcessGrid > &process_grid, const size_type row_block_size=32, const size_type column_block_size=32, const LAPACKSupport::Property property=LAPACKSupport::Property::general)
Definition: scalapack.cc:216
void load_serial(const std::string &filename)
Definition: scalapack.cc:3063
std::vector< NumberType > eigenpairs_symmetric_by_index_MRRR(const std::pair< unsigned int, unsigned int > &index_limits, const bool compute_eigenvectors)
Definition: scalapack.cc:1762
NumberType reciprocal_condition_number(const NumberType a_norm) const
Definition: scalapack.cc:2312
LAPACKSupport::State state
Definition: scalapack.h:887
void TmTmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
Definition: scalapack.cc:1225
std::vector< NumberType > eigenpairs_symmetric_by_index(const std::pair< unsigned int, unsigned int > &index_limits, const bool compute_eigenvectors)
Definition: scalapack.cc:1429
unsigned int global_column(const unsigned int loc_column) const
Definition: scalapack.cc:514
void copy_to(FullMatrix< NumberType > &matrix) const
Definition: scalapack.cc:666
unsigned int global_row(const unsigned int loc_row) const
Definition: scalapack.cc:497
void compute_cholesky_factorization()
Definition: scalapack.cc:1239
void copy_transposed(const ScaLAPACKMatrix< NumberType > &B)
Definition: scalapack.cc:980
NumberType linfty_norm() const
Definition: scalapack.cc:2388
void scale_columns(const InputVector &factors)
Definition: scalapack.cc:3495
AlignedVector< T > values
Definition: table.h:795
void reinit(const size_type size1, const size_type size2, const bool omit_default_initialization=false)
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:442
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:443
DerivativeForm< 1, spacedim, dim, Number > transpose(const DerivativeForm< 1, dim, spacedim, Number > &DF)
const unsigned int v1
Definition: grid_tools.cc:1000
static ::ExceptionBase & ExcIO()
static ::ExceptionBase & ExcNotImplemented()
static ::ExceptionBase & ExcErrorCode(std::string arg1, types::blas_int arg2)
#define Assert(cond, exc)
Definition: exceptions.h:1473
#define AssertIsFinite(number)
Definition: exceptions.h:1758
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1790
#define AssertIndexRange(index, range)
Definition: exceptions.h:1732
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcIndexRange(std::size_t arg1, std::size_t arg2, std::size_t arg3)
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
Definition: exceptions.h:1583
@ cholesky
Contents is a Cholesky decomposition.
@ lu
Contents is an LU decomposition.
@ matrix
Contents is actually a matrix.
@ unusable
Contents is something useless.
@ inverse_matrix
Contents is the inverse of a matrix.
@ svd
Matrix contains singular value decomposition,.
@ inverse_svd
Matrix is the inverse of a singular value decomposition.
@ eigenvalues
Eigenvalue vector is filled.
@ symmetric
Matrix is symmetric.
@ hessenberg
Matrix is in upper Hessenberg form.
@ diagonal
Matrix is diagonal.
@ upper_triangular
Matrix is upper triangular.
@ lower_triangular
Matrix is lower triangular.
@ general
No special properties.
@ scalapack_copy_from
ScaLAPACKMatrix<NumberType>::copy_from.
Definition: mpi_tags.h:121
@ scalapack_copy_to2
ScaLAPACKMatrix<NumberType>::copy_to.
Definition: mpi_tags.h:119
@ scalapack_copy_to
ScaLAPACKMatrix<NumberType>::copy_to.
Definition: mpi_tags.h:117
void free_communicator(MPI_Comm &mpi_communicator)
Definition: mpi.cc:194
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:151
T min(const T &t, const MPI_Comm &mpi_communicator)
T sum(const T &t, const MPI_Comm &mpi_communicator)
const MPI_Datatype mpi_type_id_for_type
Definition: mpi.h:1552
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:140
T max(const T &t, const MPI_Comm &mpi_communicator)
static const unsigned int invalid_unsigned_int
Definition: types.h:201
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
hid_t hdf5_type_id(const number *)
Definition: scalapack.cc:39
std::array< std::pair< Number, Tensor< 1, dim, Number > >, std::integral_constant< int, dim >::value > eigenvectors(const SymmetricTensor< 2, dim, Number > &T, const SymmetricTensorEigenvectorMethod method=SymmetricTensorEigenvectorMethod::ql_implicit_shifts)