Reference documentation for deal.II version 9.3.3
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
scalapack.cc
Go to the documentation of this file.
1// ---------------------------------------------------------------------
2//
3// Copyright (C) 2017 - 2020 by the deal.II authors
4//
5// This file is part of the deal.II library.
6//
7// The deal.II library is free software; you can use it, redistribute
8// it, and/or modify it under the terms of the GNU Lesser General
9// Public License as published by the Free Software Foundation; either
10// version 2.1 of the License, or (at your option) any later version.
11// The full text of the license can be found in the file LICENSE.md at
12// the top level directory of deal.II.
13//
14// ---------------------------------------------------------------------
15
16
18
19#ifdef DEAL_II_WITH_SCALAPACK
20
22# include <deal.II/base/mpi.h>
23# include <deal.II/base/mpi.templates.h>
24
25# include <deal.II/lac/scalapack.templates.h>
26
27# ifdef DEAL_II_WITH_HDF5
28# include <hdf5.h>
29# endif
30
31# include <memory>
32
34
35# ifdef DEAL_II_WITH_HDF5
36
37template <typename number>
38inline hid_t
39hdf5_type_id(const number *)
40{
42 // don't know what to put here; it does not matter
43 return -1;
44}
45
46inline hid_t
47hdf5_type_id(const double *)
48{
49 return H5T_NATIVE_DOUBLE;
50}
51
52inline hid_t
53hdf5_type_id(const float *)
54{
55 return H5T_NATIVE_FLOAT;
56}
57
58inline hid_t
59hdf5_type_id(const int *)
60{
61 return H5T_NATIVE_INT;
62}
63
64inline hid_t
65hdf5_type_id(const unsigned int *)
66{
67 return H5T_NATIVE_UINT;
68}
69
70inline hid_t
71hdf5_type_id(const char *)
72{
73 return H5T_NATIVE_CHAR;
74}
75# endif // DEAL_II_WITH_HDF5
76
77
78
79template <typename NumberType>
81 const size_type n_rows_,
82 const size_type n_columns_,
83 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
84 const size_type row_block_size_,
85 const size_type column_block_size_,
86 const LAPACKSupport::Property property_)
87 : uplo('L')
88 , // for non-symmetric matrices this is not needed
89 first_process_row(0)
90 , first_process_column(0)
91 , submatrix_row(1)
92 , submatrix_column(1)
93{
94 reinit(n_rows_,
95 n_columns_,
96 process_grid,
97 row_block_size_,
98 column_block_size_,
99 property_);
100}
101
102
103
104template <typename NumberType>
106 const size_type size,
107 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
108 const size_type block_size,
109 const LAPACKSupport::Property property)
110 : ScaLAPACKMatrix<NumberType>(size,
111 size,
112 process_grid,
115 property)
116{}
117
118
119
120template <typename NumberType>
122 const std::string & filename,
123 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
124 const size_type row_block_size,
125 const size_type column_block_size)
126 : uplo('L')
127 , // for non-symmetric matrices this is not needed
128 first_process_row(0)
129 , first_process_column(0)
130 , submatrix_row(1)
131 , submatrix_column(1)
132{
133# ifndef DEAL_II_WITH_HDF5
134 (void)filename;
135 (void)process_grid;
136 (void)row_block_size;
137 (void)column_block_size;
138 Assert(
139 false,
141 "This function is only available when deal.II is configured with HDF5"));
142# else
143
144 const unsigned int this_mpi_process(
145 Utilities::MPI::this_mpi_process(process_grid->mpi_communicator));
146
147 // Before reading the content from disk the root process determines the
148 // dimensions of the matrix. Subsequently, memory is allocated by a call to
149 // reinit() and the matrix is loaded by a call to load().
150 if (this_mpi_process == 0)
151 {
152 herr_t status = 0;
153
154 // open file in read-only mode
155 hid_t file = H5Fopen(filename.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT);
156 AssertThrow(file >= 0, ExcIO());
157
158 // get data set in file
159 hid_t dataset = H5Dopen2(file, "/matrix", H5P_DEFAULT);
160 AssertThrow(dataset >= 0, ExcIO());
161
162 // determine file space
163 hid_t filespace = H5Dget_space(dataset);
164
165 // get number of dimensions in data set
166 int rank = H5Sget_simple_extent_ndims(filespace);
167 AssertThrow(rank == 2, ExcIO());
168 hsize_t dims[2];
169 status = H5Sget_simple_extent_dims(filespace, dims, nullptr);
170 AssertThrow(status >= 0, ExcIO());
171
172 // due to ScaLAPACK's column-major memory layout the dimensions are
173 // swapped
174 n_rows = dims[1];
175 n_columns = dims[0];
176
177 // close/release resources
178 status = H5Sclose(filespace);
179 AssertThrow(status >= 0, ExcIO());
180 status = H5Dclose(dataset);
181 AssertThrow(status >= 0, ExcIO());
182 status = H5Fclose(file);
183 AssertThrow(status >= 0, ExcIO());
184 }
185 int ierr = MPI_Bcast(&n_rows,
186 1,
187 Utilities::MPI::internal::mpi_type_id(&n_rows),
188 0 /*from root*/,
189 process_grid->mpi_communicator);
190 AssertThrowMPI(ierr);
191
192 ierr = MPI_Bcast(&n_columns,
193 1,
194 Utilities::MPI::internal::mpi_type_id(&n_columns),
195 0 /*from root*/,
196 process_grid->mpi_communicator);
197 AssertThrowMPI(ierr);
198
199 // the property will be overwritten by the subsequent call to load()
201 n_columns,
202 process_grid,
206
207 load(filename.c_str());
208
209# endif // DEAL_II_WITH_HDF5
210}
211
212
213
214template <typename NumberType>
215void
217 const size_type n_rows_,
218 const size_type n_columns_,
219 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
220 const size_type row_block_size_,
221 const size_type column_block_size_,
222 const LAPACKSupport::Property property_)
223{
224 Assert(row_block_size_ > 0, ExcMessage("Row block size has to be positive."));
225 Assert(column_block_size_ > 0,
226 ExcMessage("Column block size has to be positive."));
227 Assert(
228 row_block_size_ <= n_rows_,
230 "Row block size can not be greater than the number of rows of the matrix"));
231 Assert(
232 column_block_size_ <= n_columns_,
234 "Column block size can not be greater than the number of columns of the matrix"));
235
237 property = property_;
238 grid = process_grid;
239 n_rows = n_rows_;
240 n_columns = n_columns_;
241 row_block_size = row_block_size_;
242 column_block_size = column_block_size_;
243
244 if (grid->mpi_process_is_active)
245 {
246 // Get local sizes:
247 n_local_rows = numroc_(&n_rows,
248 &row_block_size,
249 &(grid->this_process_row),
250 &first_process_row,
251 &(grid->n_process_rows));
252 n_local_columns = numroc_(&n_columns,
253 &column_block_size,
254 &(grid->this_process_column),
255 &first_process_column,
256 &(grid->n_process_columns));
257
258 // LLD_A = MAX(1,NUMROC(M_A, MB_A, MYROW, RSRC_A, NPROW)), different
259 // between processes
260 int lda = std::max(1, n_local_rows);
261
262 int info = 0;
263 descinit_(descriptor,
264 &n_rows,
265 &n_columns,
266 &row_block_size,
267 &column_block_size,
268 &first_process_row,
269 &first_process_column,
270 &(grid->blacs_context),
271 &lda,
272 &info);
273 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("descinit_", info));
274
275 this->TransposeTable<NumberType>::reinit(n_local_rows, n_local_columns);
276 }
277 else
278 {
279 // set process-local variables to something telling:
280 n_local_rows = -1;
281 n_local_columns = -1;
282 std::fill(std::begin(descriptor), std::end(descriptor), -1);
283 }
284}
285
286
287
288template <typename NumberType>
289void
291 const size_type size,
292 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
293 const size_type block_size,
294 const LAPACKSupport::Property property)
295{
296 reinit(size, size, process_grid, block_size, block_size, property);
297}
298
299
300
301template <typename NumberType>
302void
304 const LAPACKSupport::Property property_)
305{
306 property = property_;
307}
308
309
310
311template <typename NumberType>
314{
315 return property;
316}
317
318
319
320template <typename NumberType>
323{
324 return state;
325}
326
327
328
329template <typename NumberType>
332{
333 // FIXME: another way to copy is to use pdgeadd_ PBLAS routine.
334 // This routine computes the sum of two matrices B := a*A + b*B.
335 // Matrices can have different distribution,in particular matrix A can
336 // be owned by only one process, so we can set a=1 and b=0 to copy
337 // non-distributed matrix A into distributed matrix B.
338 Assert(n_rows == int(matrix.m()), ExcDimensionMismatch(n_rows, matrix.m()));
339 Assert(n_columns == int(matrix.n()),
340 ExcDimensionMismatch(n_columns, matrix.n()));
341
342 if (grid->mpi_process_is_active)
343 {
344 for (int i = 0; i < n_local_rows; ++i)
345 {
346 const int glob_i = global_row(i);
347 for (int j = 0; j < n_local_columns; ++j)
348 {
349 const int glob_j = global_column(j);
350 local_el(i, j) = matrix(glob_i, glob_j);
351 }
352 }
353 }
354 state = LAPACKSupport::matrix;
355 return *this;
356}
357
358
359
360template <typename NumberType>
361void
363 const unsigned int rank)
364{
365 if (n_rows * n_columns == 0)
366 return;
367
368 const unsigned int this_mpi_process(
369 Utilities::MPI::this_mpi_process(this->grid->mpi_communicator));
370
371# ifdef DEBUG
372 Assert(Utilities::MPI::max(rank, this->grid->mpi_communicator) == rank,
373 ExcMessage("All processes have to call routine with identical rank"));
374 Assert(Utilities::MPI::min(rank, this->grid->mpi_communicator) == rank,
375 ExcMessage("All processes have to call routine with identical rank"));
376# endif
377
378 // root process has to be active in the grid of A
379 if (this_mpi_process == rank)
380 {
381 Assert(grid->mpi_process_is_active, ExcInternalError());
382 Assert(n_rows == int(B.m()), ExcDimensionMismatch(n_rows, B.m()));
383 Assert(n_columns == int(B.n()), ExcDimensionMismatch(n_columns, B.n()));
384 }
385 // Create 1x1 grid for matrix B.
386 // The underlying grid for matrix B only contains the process #rank.
387 // This grid will be used to copy the serial matrix B to the distributed
388 // matrix using the ScaLAPACK routine pgemr2d.
389 MPI_Group group_A;
390 MPI_Comm_group(this->grid->mpi_communicator, &group_A);
391 const int n = 1;
392 const std::vector<int> ranks(n, rank);
393 MPI_Group group_B;
394 MPI_Group_incl(group_A, n, DEAL_II_MPI_CONST_CAST(ranks.data()), &group_B);
395 MPI_Comm communicator_B;
396
398 Utilities::MPI::create_group(this->grid->mpi_communicator,
399 group_B,
400 mpi_tag,
401 &communicator_B);
402 int n_proc_rows_B = 1, n_proc_cols_B = 1;
403 int this_process_row_B = -1, this_process_column_B = -1;
404 int blacs_context_B = -1;
405 if (MPI_COMM_NULL != communicator_B)
406 {
407 // Initialize Cblas context from the provided communicator
408 blacs_context_B = Csys2blacs_handle(communicator_B);
409 const char *order = "Col";
410 Cblacs_gridinit(&blacs_context_B, order, n_proc_rows_B, n_proc_cols_B);
411 Cblacs_gridinfo(blacs_context_B,
412 &n_proc_rows_B,
413 &n_proc_cols_B,
414 &this_process_row_B,
415 &this_process_column_B);
416 Assert(n_proc_rows_B * n_proc_cols_B == 1, ExcInternalError());
417 // the active process of grid B has to be process #rank of the
418 // communicator attached to A
420 }
421 const bool mpi_process_is_active_B =
422 (this_process_row_B >= 0 && this_process_column_B >= 0);
423
424 // create descriptor for matrix B
425 std::vector<int> descriptor_B(9, -1);
426 const int first_process_row_B = 0, first_process_col_B = 0;
427
428 if (mpi_process_is_active_B)
429 {
430 // Get local sizes:
431 int n_local_rows_B = numroc_(&n_rows,
432 &n_rows,
433 &this_process_row_B,
434 &first_process_row_B,
435 &n_proc_rows_B);
436 int n_local_cols_B = numroc_(&n_columns,
437 &n_columns,
438 &this_process_column_B,
439 &first_process_col_B,
440 &n_proc_cols_B);
441 Assert(n_local_rows_B == n_rows, ExcInternalError());
442 Assert(n_local_cols_B == n_columns, ExcInternalError());
443 (void)n_local_cols_B;
444
445 int lda = std::max(1, n_local_rows_B);
446 int info = 0;
447 descinit_(descriptor_B.data(),
448 &n_rows,
449 &n_columns,
450 &n_rows,
451 &n_columns,
452 &first_process_row_B,
453 &first_process_col_B,
454 &blacs_context_B,
455 &lda,
456 &info);
457 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("descinit_", info));
458 }
459 if (this->grid->mpi_process_is_active)
460 {
461 const int ii = 1;
462 NumberType *loc_vals_A =
463 this->values.size() > 0 ? this->values.data() : nullptr;
464 const NumberType *loc_vals_B =
465 mpi_process_is_active_B ? &(B(0, 0)) : nullptr;
466
467 // pgemr2d has to be called only for processes active on grid attached to
468 // matrix A
469 pgemr2d(&n_rows,
470 &n_columns,
471 loc_vals_B,
472 &ii,
473 &ii,
474 descriptor_B.data(),
475 loc_vals_A,
476 &ii,
477 &ii,
478 this->descriptor,
479 &(this->grid->blacs_context));
480 }
481 if (mpi_process_is_active_B)
482 Cblacs_gridexit(blacs_context_B);
483
484 MPI_Group_free(&group_A);
485 MPI_Group_free(&group_B);
486 if (MPI_COMM_NULL != communicator_B)
487 MPI_Comm_free(&communicator_B);
488
489 state = LAPACKSupport::matrix;
490}
491
492
493
494template <typename NumberType>
495unsigned int
496ScaLAPACKMatrix<NumberType>::global_row(const unsigned int loc_row) const
497{
498 Assert(n_local_rows >= 0 && loc_row < static_cast<unsigned int>(n_local_rows),
499 ExcIndexRange(loc_row, 0, n_local_rows));
500 const int i = loc_row + 1;
501 return indxl2g_(&i,
502 &row_block_size,
503 &(grid->this_process_row),
504 &first_process_row,
505 &(grid->n_process_rows)) -
506 1;
507}
508
509
510
511template <typename NumberType>
512unsigned int
513ScaLAPACKMatrix<NumberType>::global_column(const unsigned int loc_column) const
514{
515 Assert(n_local_columns >= 0 &&
516 loc_column < static_cast<unsigned int>(n_local_columns),
517 ExcIndexRange(loc_column, 0, n_local_columns));
518 const int j = loc_column + 1;
519 return indxl2g_(&j,
520 &column_block_size,
521 &(grid->this_process_column),
522 &first_process_column,
523 &(grid->n_process_columns)) -
524 1;
525}
526
527
528
529template <typename NumberType>
530void
532 const unsigned int rank) const
533{
534 if (n_rows * n_columns == 0)
535 return;
536
537 const unsigned int this_mpi_process(
538 Utilities::MPI::this_mpi_process(this->grid->mpi_communicator));
539
540# ifdef DEBUG
541 Assert(Utilities::MPI::max(rank, this->grid->mpi_communicator) == rank,
542 ExcMessage("All processes have to call routine with identical rank"));
543 Assert(Utilities::MPI::min(rank, this->grid->mpi_communicator) == rank,
544 ExcMessage("All processes have to call routine with identical rank"));
545# endif
546
547 if (this_mpi_process == rank)
548 {
549 // the process which gets the serial copy has to be in the process grid
550 Assert(this->grid->is_process_active(), ExcInternalError());
551 Assert(n_rows == int(B.m()), ExcDimensionMismatch(n_rows, B.m()));
552 Assert(n_columns == int(B.n()), ExcDimensionMismatch(n_columns, B.n()));
553 }
554
555 // Create 1x1 grid for matrix B.
556 // The underlying grid for matrix B only contains the process #rank.
557 // This grid will be used to copy to the distributed matrix to the serial
558 // matrix B using the ScaLAPACK routine pgemr2d.
559 MPI_Group group_A;
560 MPI_Comm_group(this->grid->mpi_communicator, &group_A);
561 const int n = 1;
562 const std::vector<int> ranks(n, rank);
563 MPI_Group group_B;
564 MPI_Group_incl(group_A, n, DEAL_II_MPI_CONST_CAST(ranks.data()), &group_B);
565 MPI_Comm communicator_B;
566
568 Utilities::MPI::create_group(this->grid->mpi_communicator,
569 group_B,
570 mpi_tag,
571 &communicator_B);
572 int n_proc_rows_B = 1, n_proc_cols_B = 1;
573 int this_process_row_B = -1, this_process_column_B = -1;
574 int blacs_context_B = -1;
575 if (MPI_COMM_NULL != communicator_B)
576 {
577 // Initialize Cblas context from the provided communicator
578 blacs_context_B = Csys2blacs_handle(communicator_B);
579 const char *order = "Col";
580 Cblacs_gridinit(&blacs_context_B, order, n_proc_rows_B, n_proc_cols_B);
581 Cblacs_gridinfo(blacs_context_B,
582 &n_proc_rows_B,
583 &n_proc_cols_B,
584 &this_process_row_B,
585 &this_process_column_B);
586 Assert(n_proc_rows_B * n_proc_cols_B == 1, ExcInternalError());
587 // the active process of grid B has to be process #rank of the
588 // communicator attached to A
590 }
591 const bool mpi_process_is_active_B =
592 (this_process_row_B >= 0 && this_process_column_B >= 0);
593
594 // create descriptor for matrix B
595 std::vector<int> descriptor_B(9, -1);
596 const int first_process_row_B = 0, first_process_col_B = 0;
597
598 if (mpi_process_is_active_B)
599 {
600 // Get local sizes:
601 int n_local_rows_B = numroc_(&n_rows,
602 &n_rows,
603 &this_process_row_B,
604 &first_process_row_B,
605 &n_proc_rows_B);
606 int n_local_cols_B = numroc_(&n_columns,
607 &n_columns,
608 &this_process_column_B,
609 &first_process_col_B,
610 &n_proc_cols_B);
611 Assert(n_local_rows_B == n_rows, ExcInternalError());
612 Assert(n_local_cols_B == n_columns, ExcInternalError());
613 (void)n_local_cols_B;
614
615 int lda = std::max(1, n_local_rows_B);
616 int info = 0;
617 // fill descriptor for matrix B
618 descinit_(descriptor_B.data(),
619 &n_rows,
620 &n_columns,
621 &n_rows,
622 &n_columns,
623 &first_process_row_B,
624 &first_process_col_B,
625 &blacs_context_B,
626 &lda,
627 &info);
628 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("descinit_", info));
629 }
630 // pgemr2d has to be called only for processes active on grid attached to
631 // matrix A
632 if (this->grid->mpi_process_is_active)
633 {
634 const int ii = 1;
635 const NumberType *loc_vals_A =
636 this->values.size() > 0 ? this->values.data() : nullptr;
637 NumberType *loc_vals_B = mpi_process_is_active_B ? &(B(0, 0)) : nullptr;
638
639 pgemr2d(&n_rows,
640 &n_columns,
641 loc_vals_A,
642 &ii,
643 &ii,
644 this->descriptor,
645 loc_vals_B,
646 &ii,
647 &ii,
648 descriptor_B.data(),
649 &(this->grid->blacs_context));
650 }
651 if (mpi_process_is_active_B)
652 Cblacs_gridexit(blacs_context_B);
653
654 MPI_Group_free(&group_A);
655 MPI_Group_free(&group_B);
656 if (MPI_COMM_NULL != communicator_B)
657 MPI_Comm_free(&communicator_B);
658}
659
660
661
662template <typename NumberType>
663void
665{
666 // FIXME: use PDGEMR2D for copying?
667 // PDGEMR2D copies a submatrix of A on a submatrix of B.
668 // A and B can have different distributions
669 // see http://icl.cs.utk.edu/lapack-forum/viewtopic.php?t=50
670 Assert(n_rows == int(matrix.m()), ExcDimensionMismatch(n_rows, matrix.m()));
671 Assert(n_columns == int(matrix.n()),
672 ExcDimensionMismatch(n_columns, matrix.n()));
673
674 matrix = 0.;
675 if (grid->mpi_process_is_active)
676 {
677 for (int i = 0; i < n_local_rows; ++i)
678 {
679 const int glob_i = global_row(i);
680 for (int j = 0; j < n_local_columns; ++j)
681 {
682 const int glob_j = global_column(j);
683 matrix(glob_i, glob_j) = local_el(i, j);
684 }
685 }
686 }
687 Utilities::MPI::sum(matrix, grid->mpi_communicator, matrix);
688
689 // we could move the following lines under the main loop above,
690 // but they would be dependent on glob_i and glob_j, which
691 // won't make it much prettier
692 if (state == LAPACKSupport::cholesky)
693 {
694 if (property == LAPACKSupport::lower_triangular)
695 for (unsigned int i = 0; i < matrix.n(); ++i)
696 for (unsigned int j = i + 1; j < matrix.m(); ++j)
697 matrix(i, j) = 0.;
698 else if (property == LAPACKSupport::upper_triangular)
699 for (unsigned int i = 0; i < matrix.n(); ++i)
700 for (unsigned int j = 0; j < i; ++j)
701 matrix(i, j) = 0.;
702 }
703 else if (property == LAPACKSupport::symmetric &&
705 {
706 if (uplo == 'L')
707 for (unsigned int i = 0; i < matrix.n(); ++i)
708 for (unsigned int j = i + 1; j < matrix.m(); ++j)
709 matrix(i, j) = matrix(j, i);
710 else if (uplo == 'U')
711 for (unsigned int i = 0; i < matrix.n(); ++i)
712 for (unsigned int j = 0; j < i; ++j)
713 matrix(i, j) = matrix(j, i);
714 }
715}
716
717
718
719template <typename NumberType>
720void
723 const std::pair<unsigned int, unsigned int> &offset_A,
724 const std::pair<unsigned int, unsigned int> &offset_B,
725 const std::pair<unsigned int, unsigned int> &submatrix_size) const
726{
727 // submatrix is empty
728 if (submatrix_size.first == 0 || submatrix_size.second == 0)
729 return;
730
731 // range checking for matrix A
732 AssertIndexRange(offset_A.first, n_rows - submatrix_size.first + 1);
733 AssertIndexRange(offset_A.second, n_columns - submatrix_size.second + 1);
734
735 // range checking for matrix B
736 AssertIndexRange(offset_B.first, B.n_rows - submatrix_size.first + 1);
737 AssertIndexRange(offset_B.second, B.n_columns - submatrix_size.second + 1);
738
739 // Currently, copying of matrices will only be supported if A and B share the
740 // same MPI communicator
741 int ierr, comparison;
742 ierr = MPI_Comm_compare(grid->mpi_communicator,
743 B.grid->mpi_communicator,
744 &comparison);
745 AssertThrowMPI(ierr);
746 Assert(comparison == MPI_IDENT,
747 ExcMessage("Matrix A and B must have a common MPI Communicator"));
748
749 /*
750 * The routine pgemr2d requires a BLACS context resembling at least the union
751 * of process grids described by the BLACS contexts held by the ProcessGrids
752 * of matrix A and B. As A and B share the same MPI communicator, there is no
753 * need to create a union MPI communicator to initialise the BLACS context
754 */
755 int union_blacs_context = Csys2blacs_handle(this->grid->mpi_communicator);
756 const char *order = "Col";
757 int union_n_process_rows =
758 Utilities::MPI::n_mpi_processes(this->grid->mpi_communicator);
759 int union_n_process_columns = 1;
760 Cblacs_gridinit(&union_blacs_context,
761 order,
762 union_n_process_rows,
763 union_n_process_columns);
764
765 int n_grid_rows_A, n_grid_columns_A, my_row_A, my_column_A;
766 Cblacs_gridinfo(this->grid->blacs_context,
767 &n_grid_rows_A,
768 &n_grid_columns_A,
769 &my_row_A,
770 &my_column_A);
771
772 // check whether process is in the BLACS context of matrix A
773 const bool in_context_A =
774 (my_row_A >= 0 && my_row_A < n_grid_rows_A) &&
775 (my_column_A >= 0 && my_column_A < n_grid_columns_A);
776
777 int n_grid_rows_B, n_grid_columns_B, my_row_B, my_column_B;
778 Cblacs_gridinfo(B.grid->blacs_context,
779 &n_grid_rows_B,
780 &n_grid_columns_B,
781 &my_row_B,
782 &my_column_B);
783
784 // check whether process is in the BLACS context of matrix B
785 const bool in_context_B =
786 (my_row_B >= 0 && my_row_B < n_grid_rows_B) &&
787 (my_column_B >= 0 && my_column_B < n_grid_columns_B);
788
789 const int n_rows_submatrix = submatrix_size.first;
790 const int n_columns_submatrix = submatrix_size.second;
791
792 // due to Fortran indexing one has to be added
793 int ia = offset_A.first + 1, ja = offset_A.second + 1;
794 int ib = offset_B.first + 1, jb = offset_B.second + 1;
795
796 std::array<int, 9> desc_A, desc_B;
797
798 const NumberType *loc_vals_A = nullptr;
799 NumberType * loc_vals_B = nullptr;
800
801 // Note: the function pgemr2d has to be called for all processes in the union
802 // BLACS context If the calling process is not part of the BLACS context of A,
803 // desc_A[1] has to be -1 and all other parameters do not have to be set If
804 // the calling process is not part of the BLACS context of B, desc_B[1] has to
805 // be -1 and all other parameters do not have to be set
806 if (in_context_A)
807 {
808 if (this->values.size() != 0)
809 loc_vals_A = this->values.data();
810
811 for (unsigned int i = 0; i < desc_A.size(); ++i)
812 desc_A[i] = this->descriptor[i];
813 }
814 else
815 desc_A[1] = -1;
816
817 if (in_context_B)
818 {
819 if (B.values.size() != 0)
820 loc_vals_B = B.values.data();
821
822 for (unsigned int i = 0; i < desc_B.size(); ++i)
823 desc_B[i] = B.descriptor[i];
824 }
825 else
826 desc_B[1] = -1;
827
828 pgemr2d(&n_rows_submatrix,
829 &n_columns_submatrix,
830 loc_vals_A,
831 &ia,
832 &ja,
833 desc_A.data(),
834 loc_vals_B,
835 &ib,
836 &jb,
837 desc_B.data(),
838 &union_blacs_context);
839
841
842 // releasing the union BLACS context
843 Cblacs_gridexit(union_blacs_context);
844}
845
846
847
848template <typename NumberType>
849void
851{
852 Assert(n_rows == dest.n_rows, ExcDimensionMismatch(n_rows, dest.n_rows));
853 Assert(n_columns == dest.n_columns,
854 ExcDimensionMismatch(n_columns, dest.n_columns));
855
856 if (this->grid->mpi_process_is_active)
858 this->descriptor[0] == 1,
860 "Copying of ScaLAPACK matrices only implemented for dense matrices"));
861 if (dest.grid->mpi_process_is_active)
863 dest.descriptor[0] == 1,
865 "Copying of ScaLAPACK matrices only implemented for dense matrices"));
866
867 /*
868 * just in case of different process grids or block-cyclic distributions
869 * inter-process communication is necessary
870 * if distributed matrices have the same process grid and block sizes, local
871 * copying is enough
872 */
873 if ((this->grid != dest.grid) || (row_block_size != dest.row_block_size) ||
874 (column_block_size != dest.column_block_size))
875 {
876 /*
877 * get the MPI communicator, which is the union of the source and
878 * destination MPI communicator
879 */
880 int ierr = 0;
881 MPI_Group group_source, group_dest, group_union;
882 ierr = MPI_Comm_group(this->grid->mpi_communicator, &group_source);
883 AssertThrowMPI(ierr);
884 ierr = MPI_Comm_group(dest.grid->mpi_communicator, &group_dest);
885 AssertThrowMPI(ierr);
886 ierr = MPI_Group_union(group_source, group_dest, &group_union);
887 AssertThrowMPI(ierr);
888 MPI_Comm mpi_communicator_union;
889
890 // to create a communicator representing the union of the source
891 // and destination MPI communicator we need a communicator that
892 // is guaranteed to contain all desired processes -- i.e.,
893 // MPI_COMM_WORLD. on the other hand, as documented in the MPI
894 // standard, MPI_Comm_create_group is not collective on all
895 // processes in the first argument, but instead is collective on
896 // only those processes listed in the group. in other words,
897 // there is really no harm in passing MPI_COMM_WORLD as the
898 // first argument, even if the program we are currently running
899 // and that is calling this function only works on a subset of
900 // processes. the same holds for the wrapper/fallback we are using here.
901
903 ierr = Utilities::MPI::create_group(MPI_COMM_WORLD,
904 group_union,
905 mpi_tag,
906 &mpi_communicator_union);
907 AssertThrowMPI(ierr);
908
909 /*
910 * The routine pgemr2d requires a BLACS context resembling at least the
911 * union of process grids described by the BLACS contexts of matrix A and
912 * B
913 */
914 int union_blacs_context = Csys2blacs_handle(mpi_communicator_union);
915 const char *order = "Col";
916 int union_n_process_rows =
917 Utilities::MPI::n_mpi_processes(mpi_communicator_union);
918 int union_n_process_columns = 1;
919 Cblacs_gridinit(&union_blacs_context,
920 order,
921 union_n_process_rows,
922 union_n_process_columns);
923
924 const NumberType *loc_vals_source = nullptr;
925 NumberType * loc_vals_dest = nullptr;
926
927 if (this->grid->mpi_process_is_active && (this->values.size() > 0))
928 {
929 AssertThrow(this->values.size() > 0,
931 "source: process is active but local matrix empty"));
932 loc_vals_source = this->values.data();
933 }
934 if (dest.grid->mpi_process_is_active && (dest.values.size() > 0))
935 {
937 dest.values.size() > 0,
939 "destination: process is active but local matrix empty"));
940 loc_vals_dest = dest.values.data();
941 }
942 pgemr2d(&n_rows,
943 &n_columns,
944 loc_vals_source,
945 &submatrix_row,
946 &submatrix_column,
947 descriptor,
948 loc_vals_dest,
949 &dest.submatrix_row,
950 &dest.submatrix_column,
951 dest.descriptor,
952 &union_blacs_context);
953
954 Cblacs_gridexit(union_blacs_context);
955
956 if (mpi_communicator_union != MPI_COMM_NULL)
957 {
958 ierr = MPI_Comm_free(&mpi_communicator_union);
959 AssertThrowMPI(ierr);
960 }
961 ierr = MPI_Group_free(&group_source);
962 AssertThrowMPI(ierr);
963 ierr = MPI_Group_free(&group_dest);
964 AssertThrowMPI(ierr);
965 ierr = MPI_Group_free(&group_union);
966 AssertThrowMPI(ierr);
967 }
968 else
969 // process is active in the process grid
970 if (this->grid->mpi_process_is_active)
971 dest.values = this->values;
972
973 dest.state = state;
974 dest.property = property;
975}
976
977
978
979template <typename NumberType>
980void
983{
984 add(B, 0, 1, true);
985}
986
987
988
989template <typename NumberType>
990void
992 const NumberType alpha,
993 const NumberType beta,
994 const bool transpose_B)
995{
996 if (transpose_B)
997 {
998 Assert(n_rows == B.n_columns, ExcDimensionMismatch(n_rows, B.n_columns));
999 Assert(n_columns == B.n_rows, ExcDimensionMismatch(n_columns, B.n_rows));
1000 Assert(column_block_size == B.row_block_size,
1001 ExcDimensionMismatch(column_block_size, B.row_block_size));
1002 Assert(row_block_size == B.column_block_size,
1003 ExcDimensionMismatch(row_block_size, B.column_block_size));
1004 }
1005 else
1006 {
1007 Assert(n_rows == B.n_rows, ExcDimensionMismatch(n_rows, B.n_rows));
1008 Assert(n_columns == B.n_columns,
1009 ExcDimensionMismatch(n_columns, B.n_columns));
1010 Assert(column_block_size == B.column_block_size,
1011 ExcDimensionMismatch(column_block_size, B.column_block_size));
1012 Assert(row_block_size == B.row_block_size,
1013 ExcDimensionMismatch(row_block_size, B.row_block_size));
1014 }
1015 Assert(this->grid == B.grid,
1016 ExcMessage("The matrices A and B need to have the same process grid"));
1017
1018 if (this->grid->mpi_process_is_active)
1019 {
1020 char trans_b = transpose_B ? 'T' : 'N';
1021 NumberType *A_loc =
1022 (this->values.size() > 0) ? this->values.data() : nullptr;
1023 const NumberType *B_loc =
1024 (B.values.size() > 0) ? B.values.data() : nullptr;
1025
1026 pgeadd(&trans_b,
1027 &n_rows,
1028 &n_columns,
1029 &beta,
1030 B_loc,
1031 &B.submatrix_row,
1033 B.descriptor,
1034 &alpha,
1035 A_loc,
1036 &submatrix_row,
1037 &submatrix_column,
1038 descriptor);
1039 }
1040 state = LAPACKSupport::matrix;
1041}
1042
1043
1044
1045template <typename NumberType>
1046void
1049{
1050 add(B, 1, a, false);
1051}
1052
1053
1054
1055template <typename NumberType>
1056void
1059{
1060 add(B, 1, a, true);
1061}
1062
1063
1064
1065template <typename NumberType>
1066void
1069 const NumberType c,
1071 const bool transpose_A,
1072 const bool transpose_B) const
1073{
1074 Assert(this->grid == B.grid,
1075 ExcMessage("The matrices A and B need to have the same process grid"));
1076 Assert(C.grid == B.grid,
1077 ExcMessage("The matrices B and C need to have the same process grid"));
1078
1079 // see for further info:
1080 // https://www.ibm.com/support/knowledgecenter/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_lgemm.htm
1081 if (!transpose_A && !transpose_B)
1082 {
1083 Assert(this->n_columns == B.n_rows,
1084 ExcDimensionMismatch(this->n_columns, B.n_rows));
1085 Assert(this->n_rows == C.n_rows,
1086 ExcDimensionMismatch(this->n_rows, C.n_rows));
1087 Assert(B.n_columns == C.n_columns,
1088 ExcDimensionMismatch(B.n_columns, C.n_columns));
1089 Assert(this->row_block_size == C.row_block_size,
1090 ExcDimensionMismatch(this->row_block_size, C.row_block_size));
1091 Assert(this->column_block_size == B.row_block_size,
1092 ExcDimensionMismatch(this->column_block_size, B.row_block_size));
1093 Assert(B.column_block_size == C.column_block_size,
1094 ExcDimensionMismatch(B.column_block_size, C.column_block_size));
1095 }
1096 else if (transpose_A && !transpose_B)
1097 {
1098 Assert(this->n_rows == B.n_rows,
1099 ExcDimensionMismatch(this->n_rows, B.n_rows));
1100 Assert(this->n_columns == C.n_rows,
1101 ExcDimensionMismatch(this->n_columns, C.n_rows));
1102 Assert(B.n_columns == C.n_columns,
1103 ExcDimensionMismatch(B.n_columns, C.n_columns));
1104 Assert(this->column_block_size == C.row_block_size,
1105 ExcDimensionMismatch(this->column_block_size, C.row_block_size));
1106 Assert(this->row_block_size == B.row_block_size,
1107 ExcDimensionMismatch(this->row_block_size, B.row_block_size));
1108 Assert(B.column_block_size == C.column_block_size,
1109 ExcDimensionMismatch(B.column_block_size, C.column_block_size));
1110 }
1111 else if (!transpose_A && transpose_B)
1112 {
1113 Assert(this->n_columns == B.n_columns,
1114 ExcDimensionMismatch(this->n_columns, B.n_columns));
1115 Assert(this->n_rows == C.n_rows,
1116 ExcDimensionMismatch(this->n_rows, C.n_rows));
1117 Assert(B.n_rows == C.n_columns,
1118 ExcDimensionMismatch(B.n_rows, C.n_columns));
1119 Assert(this->row_block_size == C.row_block_size,
1120 ExcDimensionMismatch(this->row_block_size, C.row_block_size));
1121 Assert(this->column_block_size == B.column_block_size,
1122 ExcDimensionMismatch(this->column_block_size,
1124 Assert(B.row_block_size == C.column_block_size,
1125 ExcDimensionMismatch(B.row_block_size, C.column_block_size));
1126 }
1127 else // if (transpose_A && transpose_B)
1128 {
1129 Assert(this->n_rows == B.n_columns,
1130 ExcDimensionMismatch(this->n_rows, B.n_columns));
1131 Assert(this->n_columns == C.n_rows,
1132 ExcDimensionMismatch(this->n_columns, C.n_rows));
1133 Assert(B.n_rows == C.n_columns,
1134 ExcDimensionMismatch(B.n_rows, C.n_columns));
1135 Assert(this->column_block_size == C.row_block_size,
1136 ExcDimensionMismatch(this->row_block_size, C.row_block_size));
1137 Assert(this->row_block_size == B.column_block_size,
1138 ExcDimensionMismatch(this->column_block_size, B.row_block_size));
1139 Assert(B.row_block_size == C.column_block_size,
1140 ExcDimensionMismatch(B.column_block_size, C.column_block_size));
1141 }
1142
1143 if (this->grid->mpi_process_is_active)
1144 {
1145 char trans_a = transpose_A ? 'T' : 'N';
1146 char trans_b = transpose_B ? 'T' : 'N';
1147
1148 const NumberType *A_loc =
1149 (this->values.size() > 0) ? this->values.data() : nullptr;
1150 const NumberType *B_loc =
1151 (B.values.size() > 0) ? B.values.data() : nullptr;
1152 NumberType *C_loc = (C.values.size() > 0) ? C.values.data() : nullptr;
1153 int m = C.n_rows;
1154 int n = C.n_columns;
1155 int k = transpose_A ? this->n_rows : this->n_columns;
1156
1157 pgemm(&trans_a,
1158 &trans_b,
1159 &m,
1160 &n,
1161 &k,
1162 &b,
1163 A_loc,
1164 &(this->submatrix_row),
1165 &(this->submatrix_column),
1166 this->descriptor,
1167 B_loc,
1168 &B.submatrix_row,
1170 B.descriptor,
1171 &c,
1172 C_loc,
1173 &C.submatrix_row,
1174 &C.submatrix_column,
1175 C.descriptor);
1176 }
1177 C.state = LAPACKSupport::matrix;
1178}
1179
1180
1181
1182template <typename NumberType>
1183void
1186 const bool adding) const
1187{
1188 if (adding)
1189 mult(1., B, 1., C, false, false);
1190 else
1191 mult(1., B, 0, C, false, false);
1192}
1193
1194
1195
1196template <typename NumberType>
1197void
1200 const bool adding) const
1201{
1202 if (adding)
1203 mult(1., B, 1., C, true, false);
1204 else
1205 mult(1., B, 0, C, true, false);
1206}
1207
1208
1209
1210template <typename NumberType>
1211void
1214 const bool adding) const
1215{
1216 if (adding)
1217 mult(1., B, 1., C, false, true);
1218 else
1219 mult(1., B, 0, C, false, true);
1220}
1221
1222
1223
1224template <typename NumberType>
1225void
1228 const bool adding) const
1229{
1230 if (adding)
1231 mult(1., B, 1., C, true, true);
1232 else
1233 mult(1., B, 0, C, true, true);
1234}
1235
1236
1237
1238template <typename NumberType>
1239void
1241{
1242 Assert(
1243 n_columns == n_rows && property == LAPACKSupport::Property::symmetric,
1244 ExcMessage(
1245 "Cholesky factorization can be applied to symmetric matrices only."));
1247 ExcMessage(
1248 "Matrix has to be in Matrix state before calling this function."));
1249
1250 if (grid->mpi_process_is_active)
1251 {
1252 int info = 0;
1253 NumberType *A_loc = this->values.data();
1254 // pdpotrf_(&uplo,&n_columns,A_loc,&submatrix_row,&submatrix_column,descriptor,&info);
1255 ppotrf(&uplo,
1256 &n_columns,
1257 A_loc,
1258 &submatrix_row,
1259 &submatrix_column,
1260 descriptor,
1261 &info);
1262 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("ppotrf", info));
1263 }
1265 property = (uplo == 'L' ? LAPACKSupport::lower_triangular :
1267}
1268
1269
1270
1271template <typename NumberType>
1272void
1274{
1276 ExcMessage(
1277 "Matrix has to be in Matrix state before calling this function."));
1278
1279 if (grid->mpi_process_is_active)
1280 {
1281 int info = 0;
1282 NumberType *A_loc = this->values.data();
1283
1284 const int iarow = indxg2p_(&submatrix_row,
1285 &row_block_size,
1286 &(grid->this_process_row),
1287 &first_process_row,
1288 &(grid->n_process_rows));
1289 const int mp = numroc_(&n_rows,
1290 &row_block_size,
1291 &(grid->this_process_row),
1292 &iarow,
1293 &(grid->n_process_rows));
1294 ipiv.resize(mp + row_block_size);
1295
1296 pgetrf(&n_rows,
1297 &n_columns,
1298 A_loc,
1299 &submatrix_row,
1300 &submatrix_column,
1301 descriptor,
1302 ipiv.data(),
1303 &info);
1304 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("pgetrf", info));
1305 }
1308}
1309
1310
1311
1312template <typename NumberType>
1313void
1315{
1316 // Check whether matrix is symmetric and save flag.
1317 // If a Cholesky factorization has been applied previously,
1318 // the original matrix was symmetric.
1319 const bool is_symmetric = (property == LAPACKSupport::symmetric ||
1321
1322 // Check whether matrix is triangular and is in an unfactorized state.
1323 const bool is_triangular = (property == LAPACKSupport::upper_triangular ||
1324 property == LAPACKSupport::lower_triangular) &&
1325 (state == LAPACKSupport::State::matrix ||
1327
1328 if (is_triangular)
1329 {
1330 if (grid->mpi_process_is_active)
1331 {
1332 const char uploTriangular =
1333 property == LAPACKSupport::upper_triangular ? 'U' : 'L';
1334 const char diag = 'N';
1335 int info = 0;
1336 NumberType *A_loc = this->values.data();
1337 ptrtri(&uploTriangular,
1338 &diag,
1339 &n_columns,
1340 A_loc,
1341 &submatrix_row,
1342 &submatrix_column,
1343 descriptor,
1344 &info);
1345 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("ptrtri", info));
1346 // The inversion is stored in the same part as the triangular matrix,
1347 // so we don't need to re-set the property here.
1348 }
1349 }
1350 else
1351 {
1352 // Matrix is neither in Cholesky nor LU state.
1353 // Compute the required factorizations based on the property of the
1354 // matrix.
1355 if (!(state == LAPACKSupport::State::lu ||
1357 {
1358 if (is_symmetric)
1359 compute_cholesky_factorization();
1360 else
1361 compute_lu_factorization();
1362 }
1363 if (grid->mpi_process_is_active)
1364 {
1365 int info = 0;
1366 NumberType *A_loc = this->values.data();
1367
1368 if (is_symmetric)
1369 {
1370 ppotri(&uplo,
1371 &n_columns,
1372 A_loc,
1373 &submatrix_row,
1374 &submatrix_column,
1375 descriptor,
1376 &info);
1377 AssertThrow(info == 0,
1378 LAPACKSupport::ExcErrorCode("ppotri", info));
1380 }
1381 else
1382 {
1383 int lwork = -1, liwork = -1;
1384 work.resize(1);
1385 iwork.resize(1);
1386
1387 pgetri(&n_columns,
1388 A_loc,
1389 &submatrix_row,
1390 &submatrix_column,
1391 descriptor,
1392 ipiv.data(),
1393 work.data(),
1394 &lwork,
1395 iwork.data(),
1396 &liwork,
1397 &info);
1398
1399 AssertThrow(info == 0,
1400 LAPACKSupport::ExcErrorCode("pgetri", info));
1401 lwork = static_cast<int>(work[0]);
1402 liwork = iwork[0];
1403 work.resize(lwork);
1404 iwork.resize(liwork);
1405
1406 pgetri(&n_columns,
1407 A_loc,
1408 &submatrix_row,
1409 &submatrix_column,
1410 descriptor,
1411 ipiv.data(),
1412 work.data(),
1413 &lwork,
1414 iwork.data(),
1415 &liwork,
1416 &info);
1417
1418 AssertThrow(info == 0,
1419 LAPACKSupport::ExcErrorCode("pgetri", info));
1420 }
1421 }
1422 }
1424}
1425
1426
1427
1428template <typename NumberType>
1429std::vector<NumberType>
1431 const std::pair<unsigned int, unsigned int> &index_limits,
1432 const bool compute_eigenvectors)
1433{
1434 // check validity of index limits
1435 AssertIndexRange(index_limits.first, n_rows);
1436 AssertIndexRange(index_limits.second, n_rows);
1437
1438 std::pair<unsigned int, unsigned int> idx =
1439 std::make_pair(std::min(index_limits.first, index_limits.second),
1440 std::max(index_limits.first, index_limits.second));
1441
1442 // compute all eigenvalues/eigenvectors
1443 if (idx.first == 0 && idx.second == static_cast<unsigned int>(n_rows - 1))
1444 return eigenpairs_symmetric(compute_eigenvectors);
1445 else
1446 return eigenpairs_symmetric(compute_eigenvectors, idx);
1447}
1448
1449
1450
1451template <typename NumberType>
1452std::vector<NumberType>
1454 const std::pair<NumberType, NumberType> &value_limits,
1455 const bool compute_eigenvectors)
1456{
1457 Assert(!std::isnan(value_limits.first),
1458 ExcMessage("value_limits.first is NaN"));
1459 Assert(!std::isnan(value_limits.second),
1460 ExcMessage("value_limits.second is NaN"));
1461
1462 std::pair<unsigned int, unsigned int> indices =
1463 std::make_pair(numbers::invalid_unsigned_int,
1465
1466 return eigenpairs_symmetric(compute_eigenvectors, indices, value_limits);
1467}
1468
1469
1470
1471template <typename NumberType>
1472std::vector<NumberType>
1474 const bool compute_eigenvectors,
1475 const std::pair<unsigned int, unsigned int> &eigenvalue_idx,
1476 const std::pair<NumberType, NumberType> & eigenvalue_limits)
1477{
1479 ExcMessage(
1480 "Matrix has to be in Matrix state before calling this function."));
1481 Assert(property == LAPACKSupport::symmetric,
1482 ExcMessage("Matrix has to be symmetric for this operation."));
1483
1484 std::lock_guard<std::mutex> lock(mutex);
1485
1486 const bool use_values = (std::isnan(eigenvalue_limits.first) ||
1487 std::isnan(eigenvalue_limits.second)) ?
1488 false :
1489 true;
1490 const bool use_indices =
1491 ((eigenvalue_idx.first == numbers::invalid_unsigned_int) ||
1492 (eigenvalue_idx.second == numbers::invalid_unsigned_int)) ?
1493 false :
1494 true;
1495
1496 Assert(
1497 !(use_values && use_indices),
1498 ExcMessage(
1499 "Prescribing both the index and value range for the eigenvalues is ambiguous"));
1500
1501 // if computation of eigenvectors is not required use a sufficiently small
1502 // distributed matrix
1503 std::unique_ptr<ScaLAPACKMatrix<NumberType>> eigenvectors =
1504 compute_eigenvectors ?
1505 std::make_unique<ScaLAPACKMatrix<NumberType>>(n_rows,
1506 grid,
1507 row_block_size) :
1508 std::make_unique<ScaLAPACKMatrix<NumberType>>(
1509 grid->n_process_rows, grid->n_process_columns, grid, 1, 1);
1510
1511 eigenvectors->property = property;
1512 // number of eigenvalues to be returned from psyevx; upon successful exit ev
1513 // contains the m seclected eigenvalues in ascending order set to all
1514 // eigenvaleus in case we will be using psyev.
1515 int m = n_rows;
1516 std::vector<NumberType> ev(n_rows);
1517
1518 if (grid->mpi_process_is_active)
1519 {
1520 int info = 0;
1521 /*
1522 * for jobz==N only eigenvalues are computed, for jobz='V' also the
1523 * eigenvectors of the matrix are computed
1524 */
1525 char jobz = compute_eigenvectors ? 'V' : 'N';
1526 char range = 'A';
1527 // default value is to compute all eigenvalues and optionally eigenvectors
1528 bool all_eigenpairs = true;
1529 NumberType vl = NumberType(), vu = NumberType();
1530 int il = 1, iu = 1;
1531 // number of eigenvectors to be returned;
1532 // upon successful exit the first m=nz columns contain the selected
1533 // eigenvectors (only if jobz=='V')
1534 int nz = 0;
1535 NumberType abstol = NumberType();
1536
1537 // orfac decides which eigenvectors should be reorthogonalized
1538 // see
1539 // http://www.netlib.org/scalapack/explore-html/df/d1a/pdsyevx_8f_source.html
1540 // for explanation to keeps simple no reorthogonalized will be done by
1541 // setting orfac to 0
1542 NumberType orfac = 0;
1543 // contains the indices of eigenvectors that failed to converge
1544 std::vector<int> ifail;
1545 // This array contains indices of eigenvectors corresponding to
1546 // a cluster of eigenvalues that could not be reorthogonalized
1547 // due to insufficient workspace
1548 // see
1549 // http://www.netlib.org/scalapack/explore-html/df/d1a/pdsyevx_8f_source.html
1550 // for explanation
1551 std::vector<int> iclustr;
1552 // This array contains the gap between eigenvalues whose
1553 // eigenvectors could not be reorthogonalized.
1554 // see
1555 // http://www.netlib.org/scalapack/explore-html/df/d1a/pdsyevx_8f_source.html
1556 // for explanation
1557 std::vector<NumberType> gap(n_local_rows * n_local_columns);
1558
1559 // index range for eigenvalues is not specified
1560 if (!use_indices)
1561 {
1562 // interval for eigenvalues is not specified and consequently all
1563 // eigenvalues/eigenpairs will be computed
1564 if (!use_values)
1565 {
1566 range = 'A';
1567 all_eigenpairs = true;
1568 }
1569 else
1570 {
1571 range = 'V';
1572 all_eigenpairs = false;
1573 vl = std::min(eigenvalue_limits.first, eigenvalue_limits.second);
1574 vu = std::max(eigenvalue_limits.first, eigenvalue_limits.second);
1575 }
1576 }
1577 else
1578 {
1579 range = 'I';
1580 all_eigenpairs = false;
1581 // as Fortran starts counting/indexing from 1 unlike C/C++, where it
1582 // starts from 0
1583 il = std::min(eigenvalue_idx.first, eigenvalue_idx.second) + 1;
1584 iu = std::max(eigenvalue_idx.first, eigenvalue_idx.second) + 1;
1585 }
1586 NumberType *A_loc = this->values.data();
1587 /*
1588 * by setting lwork to -1 a workspace query for optimal length of work is
1589 * performed
1590 */
1591 int lwork = -1;
1592 int liwork = -1;
1593 NumberType *eigenvectors_loc =
1594 (compute_eigenvectors ? eigenvectors->values.data() : nullptr);
1595 work.resize(1);
1596 iwork.resize(1);
1597
1598 if (all_eigenpairs)
1599 {
1600 psyev(&jobz,
1601 &uplo,
1602 &n_rows,
1603 A_loc,
1604 &submatrix_row,
1605 &submatrix_column,
1606 descriptor,
1607 ev.data(),
1608 eigenvectors_loc,
1609 &eigenvectors->submatrix_row,
1610 &eigenvectors->submatrix_column,
1611 eigenvectors->descriptor,
1612 work.data(),
1613 &lwork,
1614 &info);
1615 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("psyev", info));
1616 }
1617 else
1618 {
1619 char cmach = compute_eigenvectors ? 'U' : 'S';
1620 plamch(&(this->grid->blacs_context), &cmach, abstol);
1621 abstol *= 2;
1622 ifail.resize(n_rows);
1623 iclustr.resize(2 * grid->n_process_rows * grid->n_process_columns);
1624 gap.resize(grid->n_process_rows * grid->n_process_columns);
1625
1626 psyevx(&jobz,
1627 &range,
1628 &uplo,
1629 &n_rows,
1630 A_loc,
1631 &submatrix_row,
1632 &submatrix_column,
1633 descriptor,
1634 &vl,
1635 &vu,
1636 &il,
1637 &iu,
1638 &abstol,
1639 &m,
1640 &nz,
1641 ev.data(),
1642 &orfac,
1643 eigenvectors_loc,
1644 &eigenvectors->submatrix_row,
1645 &eigenvectors->submatrix_column,
1646 eigenvectors->descriptor,
1647 work.data(),
1648 &lwork,
1649 iwork.data(),
1650 &liwork,
1651 ifail.data(),
1652 iclustr.data(),
1653 gap.data(),
1654 &info);
1655 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("psyevx", info));
1656 }
1657 lwork = static_cast<int>(work[0]);
1658 work.resize(lwork);
1659
1660 if (all_eigenpairs)
1661 {
1662 psyev(&jobz,
1663 &uplo,
1664 &n_rows,
1665 A_loc,
1666 &submatrix_row,
1667 &submatrix_column,
1668 descriptor,
1669 ev.data(),
1670 eigenvectors_loc,
1671 &eigenvectors->submatrix_row,
1672 &eigenvectors->submatrix_column,
1673 eigenvectors->descriptor,
1674 work.data(),
1675 &lwork,
1676 &info);
1677
1678 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("psyev", info));
1679 }
1680 else
1681 {
1682 liwork = iwork[0];
1683 AssertThrow(liwork > 0, ExcInternalError());
1684 iwork.resize(liwork);
1685
1686 psyevx(&jobz,
1687 &range,
1688 &uplo,
1689 &n_rows,
1690 A_loc,
1691 &submatrix_row,
1692 &submatrix_column,
1693 descriptor,
1694 &vl,
1695 &vu,
1696 &il,
1697 &iu,
1698 &abstol,
1699 &m,
1700 &nz,
1701 ev.data(),
1702 &orfac,
1703 eigenvectors_loc,
1704 &eigenvectors->submatrix_row,
1705 &eigenvectors->submatrix_column,
1706 eigenvectors->descriptor,
1707 work.data(),
1708 &lwork,
1709 iwork.data(),
1710 &liwork,
1711 ifail.data(),
1712 iclustr.data(),
1713 gap.data(),
1714 &info);
1715
1716 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("psyevx", info));
1717 }
1718 // if eigenvectors are queried copy eigenvectors to original matrix
1719 // as the temporary matrix eigenvectors has identical dimensions and
1720 // block-cyclic distribution we simply swap the local array
1721 if (compute_eigenvectors)
1722 this->values.swap(eigenvectors->values);
1723
1724 // adapt the size of ev to fit m upon return
1725 while (ev.size() > static_cast<size_type>(m))
1726 ev.pop_back();
1727 }
1728 /*
1729 * send number of computed eigenvalues to inactive processes
1730 */
1731 grid->send_to_inactive(&m, 1);
1732
1733 /*
1734 * inactive processes have to resize array of eigenvalues
1735 */
1736 if (!grid->mpi_process_is_active)
1737 ev.resize(m);
1738 /*
1739 * send the eigenvalues to processors not being part of the process grid
1740 */
1741 grid->send_to_inactive(ev.data(), ev.size());
1742
1743 /*
1744 * if only eigenvalues are queried the content of the matrix will be destroyed
1745 * if the eigenpairs are queried matrix A on exit stores the eigenvectors in
1746 * the columns
1747 */
1748 if (compute_eigenvectors)
1749 {
1752 }
1753 else
1755
1756 return ev;
1757}
1758
1759
1760
1761template <typename NumberType>
1762std::vector<NumberType>
1764 const std::pair<unsigned int, unsigned int> &index_limits,
1765 const bool compute_eigenvectors)
1766{
1767 // Check validity of index limits.
1768 AssertIndexRange(index_limits.first, static_cast<unsigned int>(n_rows));
1769 AssertIndexRange(index_limits.second, static_cast<unsigned int>(n_rows));
1770
1771 const std::pair<unsigned int, unsigned int> idx =
1772 std::make_pair(std::min(index_limits.first, index_limits.second),
1773 std::max(index_limits.first, index_limits.second));
1774
1775 // Compute all eigenvalues/eigenvectors.
1776 if (idx.first == 0 && idx.second == static_cast<unsigned int>(n_rows - 1))
1777 return eigenpairs_symmetric_MRRR(compute_eigenvectors);
1778 else
1779 return eigenpairs_symmetric_MRRR(compute_eigenvectors, idx);
1780}
1781
1782
1783
1784template <typename NumberType>
1785std::vector<NumberType>
1787 const std::pair<NumberType, NumberType> &value_limits,
1788 const bool compute_eigenvectors)
1789{
1790 AssertIsFinite(value_limits.first);
1791 AssertIsFinite(value_limits.second);
1792
1793 const std::pair<unsigned int, unsigned int> indices =
1794 std::make_pair(numbers::invalid_unsigned_int,
1796
1797 return eigenpairs_symmetric_MRRR(compute_eigenvectors, indices, value_limits);
1798}
1799
1800
1801
1802template <typename NumberType>
1803std::vector<NumberType>
1805 const bool compute_eigenvectors,
1806 const std::pair<unsigned int, unsigned int> &eigenvalue_idx,
1807 const std::pair<NumberType, NumberType> & eigenvalue_limits)
1808{
1810 ExcMessage(
1811 "Matrix has to be in Matrix state before calling this function."));
1812 Assert(property == LAPACKSupport::symmetric,
1813 ExcMessage("Matrix has to be symmetric for this operation."));
1814
1815 std::lock_guard<std::mutex> lock(mutex);
1816
1817 const bool use_values = (std::isnan(eigenvalue_limits.first) ||
1818 std::isnan(eigenvalue_limits.second)) ?
1819 false :
1820 true;
1821 const bool use_indices =
1822 ((eigenvalue_idx.first == numbers::invalid_unsigned_int) ||
1823 (eigenvalue_idx.second == numbers::invalid_unsigned_int)) ?
1824 false :
1825 true;
1826
1827 Assert(
1828 !(use_values && use_indices),
1829 ExcMessage(
1830 "Prescribing both the index and value range for the eigenvalues is ambiguous"));
1831
1832 // If computation of eigenvectors is not required, use a sufficiently small
1833 // distributed matrix.
1834 std::unique_ptr<ScaLAPACKMatrix<NumberType>> eigenvectors =
1835 compute_eigenvectors ?
1836 std::make_unique<ScaLAPACKMatrix<NumberType>>(n_rows,
1837 grid,
1838 row_block_size) :
1839 std::make_unique<ScaLAPACKMatrix<NumberType>>(
1840 grid->n_process_rows, grid->n_process_columns, grid, 1, 1);
1841
1842 eigenvectors->property = property;
1843 // Number of eigenvalues to be returned from psyevr; upon successful exit ev
1844 // contains the m seclected eigenvalues in ascending order.
1845 int m = n_rows;
1846 std::vector<NumberType> ev(n_rows);
1847
1848 // Number of eigenvectors to be returned;
1849 // Upon successful exit the first m=nz columns contain the selected
1850 // eigenvectors (only if jobz=='V').
1851 int nz = 0;
1852
1853 if (grid->mpi_process_is_active)
1854 {
1855 int info = 0;
1856 /*
1857 * For jobz==N only eigenvalues are computed, for jobz='V' also the
1858 * eigenvectors of the matrix are computed.
1859 */
1860 char jobz = compute_eigenvectors ? 'V' : 'N';
1861 // Default value is to compute all eigenvalues and optionally
1862 // eigenvectors.
1863 char range = 'A';
1864 NumberType vl = NumberType(), vu = NumberType();
1865 int il = 1, iu = 1;
1866
1867 // Index range for eigenvalues is not specified.
1868 if (!use_indices)
1869 {
1870 // Interval for eigenvalues is not specified and consequently all
1871 // eigenvalues/eigenpairs will be computed.
1872 if (!use_values)
1873 {
1874 range = 'A';
1875 }
1876 else
1877 {
1878 range = 'V';
1879 vl = std::min(eigenvalue_limits.first, eigenvalue_limits.second);
1880 vu = std::max(eigenvalue_limits.first, eigenvalue_limits.second);
1881 }
1882 }
1883 else
1884 {
1885 range = 'I';
1886 // As Fortran starts counting/indexing from 1 unlike C/C++, where it
1887 // starts from 0.
1888 il = std::min(eigenvalue_idx.first, eigenvalue_idx.second) + 1;
1889 iu = std::max(eigenvalue_idx.first, eigenvalue_idx.second) + 1;
1890 }
1891 NumberType *A_loc = this->values.data();
1892
1893 /*
1894 * By setting lwork to -1 a workspace query for optimal length of work is
1895 * performed.
1896 */
1897 int lwork = -1;
1898 int liwork = -1;
1899 NumberType *eigenvectors_loc =
1900 (compute_eigenvectors ? eigenvectors->values.data() : nullptr);
1901 work.resize(1);
1902 iwork.resize(1);
1903
1904 psyevr(&jobz,
1905 &range,
1906 &uplo,
1907 &n_rows,
1908 A_loc,
1909 &submatrix_row,
1910 &submatrix_column,
1911 descriptor,
1912 &vl,
1913 &vu,
1914 &il,
1915 &iu,
1916 &m,
1917 &nz,
1918 ev.data(),
1919 eigenvectors_loc,
1920 &eigenvectors->submatrix_row,
1921 &eigenvectors->submatrix_column,
1922 eigenvectors->descriptor,
1923 work.data(),
1924 &lwork,
1925 iwork.data(),
1926 &liwork,
1927 &info);
1928
1929 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("psyevr", info));
1930
1931 lwork = static_cast<int>(work[0]);
1932 work.resize(lwork);
1933 liwork = iwork[0];
1934 iwork.resize(liwork);
1935
1936 psyevr(&jobz,
1937 &range,
1938 &uplo,
1939 &n_rows,
1940 A_loc,
1941 &submatrix_row,
1942 &submatrix_column,
1943 descriptor,
1944 &vl,
1945 &vu,
1946 &il,
1947 &iu,
1948 &m,
1949 &nz,
1950 ev.data(),
1951 eigenvectors_loc,
1952 &eigenvectors->submatrix_row,
1953 &eigenvectors->submatrix_column,
1954 eigenvectors->descriptor,
1955 work.data(),
1956 &lwork,
1957 iwork.data(),
1958 &liwork,
1959 &info);
1960
1961 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("psyevr", info));
1962
1963 if (compute_eigenvectors)
1965 m == nz,
1966 ExcMessage(
1967 "psyevr failed to compute all eigenvectors for the selected eigenvalues"));
1968
1969 // If eigenvectors are queried, copy eigenvectors to original matrix.
1970 // As the temporary matrix eigenvectors has identical dimensions and
1971 // block-cyclic distribution we simply swap the local array.
1972 if (compute_eigenvectors)
1973 this->values.swap(eigenvectors->values);
1974
1975 // Adapt the size of ev to fit m upon return.
1976 while (ev.size() > static_cast<size_type>(m))
1977 ev.pop_back();
1978 }
1979 /*
1980 * Send number of computed eigenvalues to inactive processes.
1981 */
1982 grid->send_to_inactive(&m, 1);
1983
1984 /*
1985 * Inactive processes have to resize array of eigenvalues.
1986 */
1987 if (!grid->mpi_process_is_active)
1988 ev.resize(m);
1989 /*
1990 * Send the eigenvalues to processors not being part of the process grid.
1991 */
1992 grid->send_to_inactive(ev.data(), ev.size());
1993
1994 /*
1995 * If only eigenvalues are queried, the content of the matrix will be
1996 * destroyed. If the eigenpairs are queried, matrix A on exit stores the
1997 * eigenvectors in the columns.
1998 */
1999 if (compute_eigenvectors)
2000 {
2003 }
2004 else
2006
2007 return ev;
2008}
2009
2010
2011
2012template <typename NumberType>
2013std::vector<NumberType>
2016{
2018 ExcMessage(
2019 "Matrix has to be in Matrix state before calling this function."));
2020 Assert(row_block_size == column_block_size,
2021 ExcDimensionMismatch(row_block_size, column_block_size));
2022
2023 const bool left_singluar_vectors = (U != nullptr) ? true : false;
2024 const bool right_singluar_vectors = (VT != nullptr) ? true : false;
2025
2026 if (left_singluar_vectors)
2027 {
2028 Assert(n_rows == U->n_rows, ExcDimensionMismatch(n_rows, U->n_rows));
2029 Assert(U->n_rows == U->n_columns,
2030 ExcDimensionMismatch(U->n_rows, U->n_columns));
2031 Assert(row_block_size == U->row_block_size,
2032 ExcDimensionMismatch(row_block_size, U->row_block_size));
2033 Assert(column_block_size == U->column_block_size,
2034 ExcDimensionMismatch(column_block_size, U->column_block_size));
2035 Assert(grid->blacs_context == U->grid->blacs_context,
2036 ExcDimensionMismatch(grid->blacs_context, U->grid->blacs_context));
2037 }
2038 if (right_singluar_vectors)
2039 {
2040 Assert(n_columns == VT->n_rows,
2041 ExcDimensionMismatch(n_columns, VT->n_rows));
2042 Assert(VT->n_rows == VT->n_columns,
2044 Assert(row_block_size == VT->row_block_size,
2045 ExcDimensionMismatch(row_block_size, VT->row_block_size));
2046 Assert(column_block_size == VT->column_block_size,
2047 ExcDimensionMismatch(column_block_size, VT->column_block_size));
2048 Assert(grid->blacs_context == VT->grid->blacs_context,
2049 ExcDimensionMismatch(grid->blacs_context,
2050 VT->grid->blacs_context));
2051 }
2052 std::lock_guard<std::mutex> lock(mutex);
2053
2054 std::vector<NumberType> sv(std::min(n_rows, n_columns));
2055
2056 if (grid->mpi_process_is_active)
2057 {
2058 char jobu = left_singluar_vectors ? 'V' : 'N';
2059 char jobvt = right_singluar_vectors ? 'V' : 'N';
2060 NumberType *A_loc = this->values.data();
2061 NumberType *U_loc = left_singluar_vectors ? U->values.data() : nullptr;
2062 NumberType *VT_loc = right_singluar_vectors ? VT->values.data() : nullptr;
2063 int info = 0;
2064 /*
2065 * by setting lwork to -1 a workspace query for optimal length of work is
2066 * performed
2067 */
2068 int lwork = -1;
2069 work.resize(1);
2070
2071 pgesvd(&jobu,
2072 &jobvt,
2073 &n_rows,
2074 &n_columns,
2075 A_loc,
2076 &submatrix_row,
2077 &submatrix_column,
2078 descriptor,
2079 &*sv.begin(),
2080 U_loc,
2081 &U->submatrix_row,
2082 &U->submatrix_column,
2083 U->descriptor,
2084 VT_loc,
2085 &VT->submatrix_row,
2086 &VT->submatrix_column,
2087 VT->descriptor,
2088 work.data(),
2089 &lwork,
2090 &info);
2091 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("pgesvd", info));
2092
2093 lwork = static_cast<int>(work[0]);
2094 work.resize(lwork);
2095
2096 pgesvd(&jobu,
2097 &jobvt,
2098 &n_rows,
2099 &n_columns,
2100 A_loc,
2101 &submatrix_row,
2102 &submatrix_column,
2103 descriptor,
2104 &*sv.begin(),
2105 U_loc,
2106 &U->submatrix_row,
2107 &U->submatrix_column,
2108 U->descriptor,
2109 VT_loc,
2110 &VT->submatrix_row,
2111 &VT->submatrix_column,
2112 VT->descriptor,
2113 work.data(),
2114 &lwork,
2115 &info);
2116 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("pgesvd", info));
2117 }
2118
2119 /*
2120 * send the singular values to processors not being part of the process grid
2121 */
2122 grid->send_to_inactive(sv.data(), sv.size());
2123
2126
2127 return sv;
2128}
2129
2130
2131
2132template <typename NumberType>
2133void
2135 const bool transpose)
2136{
2137 Assert(grid == B.grid,
2138 ExcMessage("The matrices A and B need to have the same process grid"));
2140 ExcMessage(
2141 "Matrix has to be in Matrix state before calling this function."));
2143 ExcMessage(
2144 "Matrix B has to be in Matrix state before calling this function."));
2145
2146 if (transpose)
2147 {
2148 Assert(n_columns == B.n_rows, ExcDimensionMismatch(n_columns, B.n_rows));
2149 }
2150 else
2151 {
2152 Assert(n_rows == B.n_rows, ExcDimensionMismatch(n_rows, B.n_rows));
2153 }
2154
2155 // see
2156 // https://www.ibm.com/support/knowledgecenter/en/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_lgels.htm
2157 Assert(row_block_size == column_block_size,
2158 ExcMessage(
2159 "Use identical block sizes for rows and columns of matrix A"));
2161 ExcMessage(
2162 "Use identical block sizes for rows and columns of matrix B"));
2163 Assert(row_block_size == B.row_block_size,
2164 ExcMessage(
2165 "Use identical block-cyclic distribution for matrices A and B"));
2166
2167 std::lock_guard<std::mutex> lock(mutex);
2168
2169 if (grid->mpi_process_is_active)
2170 {
2171 char trans = transpose ? 'T' : 'N';
2172 NumberType *A_loc = this->values.data();
2173 NumberType *B_loc = B.values.data();
2174 int info = 0;
2175 /*
2176 * by setting lwork to -1 a workspace query for optimal length of work is
2177 * performed
2178 */
2179 int lwork = -1;
2180 work.resize(1);
2181
2182 pgels(&trans,
2183 &n_rows,
2184 &n_columns,
2185 &B.n_columns,
2186 A_loc,
2187 &submatrix_row,
2188 &submatrix_column,
2189 descriptor,
2190 B_loc,
2191 &B.submatrix_row,
2193 B.descriptor,
2194 work.data(),
2195 &lwork,
2196 &info);
2197 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("pgels", info));
2198
2199 lwork = static_cast<int>(work[0]);
2200 work.resize(lwork);
2201
2202 pgels(&trans,
2203 &n_rows,
2204 &n_columns,
2205 &B.n_columns,
2206 A_loc,
2207 &submatrix_row,
2208 &submatrix_column,
2209 descriptor,
2210 B_loc,
2211 &B.submatrix_row,
2213 B.descriptor,
2214 work.data(),
2215 &lwork,
2216 &info);
2217 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("pgels", info));
2218 }
2220}
2221
2222
2223
2224template <typename NumberType>
2225unsigned int
2227{
2229 ExcMessage(
2230 "Matrix has to be in Matrix state before calling this function."));
2231 Assert(row_block_size == column_block_size,
2232 ExcMessage(
2233 "Use identical block sizes for rows and columns of matrix A"));
2234 Assert(
2235 ratio > 0. && ratio < 1.,
2236 ExcMessage(
2237 "input parameter ratio has to be larger than zero and smaller than 1"));
2238
2240 n_rows,
2241 grid,
2242 row_block_size,
2243 row_block_size,
2245 ScaLAPACKMatrix<NumberType> VT(n_columns,
2246 n_columns,
2247 grid,
2248 row_block_size,
2249 row_block_size,
2251 std::vector<NumberType> sv = this->compute_SVD(&U, &VT);
2253 ExcMessage("Matrix has rank 0"));
2254
2255 // Get number of singular values fulfilling the following: sv[i] > sv[0] *
2256 // ratio Obviously, 0-th element already satisfies sv[0] > sv[0] * ratio The
2257 // singular values in sv are ordered by descending value so we break out of
2258 // the loop if a singular value is smaller than sv[0] * ratio.
2259 unsigned int n_sv = 1;
2260 std::vector<NumberType> inv_sigma;
2261 inv_sigma.push_back(1 / sv[0]);
2262
2263 for (unsigned int i = 1; i < sv.size(); ++i)
2264 if (sv[i] > sv[0] * ratio)
2265 {
2266 ++n_sv;
2267 inv_sigma.push_back(1 / sv[i]);
2268 }
2269 else
2270 break;
2271
2272 // For the matrix multiplication we use only the columns of U and rows of VT
2273 // which are associated with singular values larger than the limit. That saves
2274 // computational time for matrices with rank significantly smaller than
2275 // min(n_rows,n_columns)
2276 ScaLAPACKMatrix<NumberType> U_R(n_rows,
2277 n_sv,
2278 grid,
2279 row_block_size,
2280 row_block_size,
2283 n_columns,
2284 grid,
2285 row_block_size,
2286 row_block_size,
2288 U.copy_to(U_R,
2289 std::make_pair(0, 0),
2290 std::make_pair(0, 0),
2291 std::make_pair(n_rows, n_sv));
2292 VT.copy_to(VT_R,
2293 std::make_pair(0, 0),
2294 std::make_pair(0, 0),
2295 std::make_pair(n_sv, n_columns));
2296
2297 VT_R.scale_rows(inv_sigma);
2298 this->reinit(n_columns,
2299 n_rows,
2300 this->grid,
2301 row_block_size,
2302 column_block_size,
2304 VT_R.mult(1, U_R, 0, *this, true, true);
2306 return n_sv;
2307}
2308
2309
2310
2311template <typename NumberType>
2312NumberType
2314 const NumberType a_norm) const
2315{
2317 ExcMessage(
2318 "Matrix has to be in Cholesky state before calling this function."));
2319 std::lock_guard<std::mutex> lock(mutex);
2320 NumberType rcond = 0.;
2321
2322 if (grid->mpi_process_is_active)
2323 {
2324 int liwork = n_local_rows;
2325 iwork.resize(liwork);
2326
2327 int info = 0;
2328 const NumberType *A_loc = this->values.data();
2329
2330 // by setting lwork to -1 a workspace query for optimal length of work is
2331 // performed
2332 int lwork = -1;
2333 work.resize(1);
2334 ppocon(&uplo,
2335 &n_columns,
2336 A_loc,
2337 &submatrix_row,
2338 &submatrix_column,
2339 descriptor,
2340 &a_norm,
2341 &rcond,
2342 work.data(),
2343 &lwork,
2344 iwork.data(),
2345 &liwork,
2346 &info);
2347 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("pdpocon", info));
2348 lwork = static_cast<int>(std::ceil(work[0]));
2349 work.resize(lwork);
2350
2351 // now the actual run:
2352 ppocon(&uplo,
2353 &n_columns,
2354 A_loc,
2355 &submatrix_row,
2356 &submatrix_column,
2357 descriptor,
2358 &a_norm,
2359 &rcond,
2360 work.data(),
2361 &lwork,
2362 iwork.data(),
2363 &liwork,
2364 &info);
2365 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("pdpocon", info));
2366 }
2367 grid->send_to_inactive(&rcond);
2368 return rcond;
2369}
2370
2371
2372
2373template <typename NumberType>
2374NumberType
2376{
2377 const char type('O');
2378
2379 if (property == LAPACKSupport::symmetric)
2380 return norm_symmetric(type);
2381 else
2382 return norm_general(type);
2383}
2384
2385
2386
2387template <typename NumberType>
2388NumberType
2390{
2391 const char type('I');
2392
2393 if (property == LAPACKSupport::symmetric)
2394 return norm_symmetric(type);
2395 else
2396 return norm_general(type);
2397}
2398
2399
2400
2401template <typename NumberType>
2402NumberType
2404{
2405 const char type('F');
2406
2407 if (property == LAPACKSupport::symmetric)
2408 return norm_symmetric(type);
2409 else
2410 return norm_general(type);
2411}
2412
2413
2414
2415template <typename NumberType>
2416NumberType
2418{
2419 Assert(state == LAPACKSupport::matrix ||
2421 ExcMessage("norms can be called in matrix state only."));
2422 std::lock_guard<std::mutex> lock(mutex);
2423 NumberType res = 0.;
2424
2425 if (grid->mpi_process_is_active)
2426 {
2427 const int iarow = indxg2p_(&submatrix_row,
2428 &row_block_size,
2429 &(grid->this_process_row),
2430 &first_process_row,
2431 &(grid->n_process_rows));
2432 const int iacol = indxg2p_(&submatrix_column,
2433 &column_block_size,
2434 &(grid->this_process_column),
2435 &first_process_column,
2436 &(grid->n_process_columns));
2437 const int mp0 = numroc_(&n_rows,
2438 &row_block_size,
2439 &(grid->this_process_row),
2440 &iarow,
2441 &(grid->n_process_rows));
2442 const int nq0 = numroc_(&n_columns,
2443 &column_block_size,
2444 &(grid->this_process_column),
2445 &iacol,
2446 &(grid->n_process_columns));
2447
2448 // type='M': compute largest absolute value
2449 // type='F' || type='E': compute Frobenius norm
2450 // type='0' || type='1': compute infinity norm
2451 int lwork = 0; // for type == 'M' || type == 'F' || type == 'E'
2452 if (type == 'O' || type == '1')
2453 lwork = nq0;
2454 else if (type == 'I')
2455 lwork = mp0;
2456
2457 work.resize(lwork);
2458 const NumberType *A_loc = this->values.begin();
2459 res = plange(&type,
2460 &n_rows,
2461 &n_columns,
2462 A_loc,
2463 &submatrix_row,
2464 &submatrix_column,
2465 descriptor,
2466 work.data());
2467 }
2468 grid->send_to_inactive(&res);
2469 return res;
2470}
2471
2472
2473
2474template <typename NumberType>
2475NumberType
2477{
2478 Assert(state == LAPACKSupport::matrix ||
2480 ExcMessage("norms can be called in matrix state only."));
2481 Assert(property == LAPACKSupport::symmetric,
2482 ExcMessage("Matrix has to be symmetric for this operation."));
2483 std::lock_guard<std::mutex> lock(mutex);
2484 NumberType res = 0.;
2485
2486 if (grid->mpi_process_is_active)
2487 {
2488 // int IROFFA = MOD( IA-1, MB_A )
2489 // int ICOFFA = MOD( JA-1, NB_A )
2490 const int lcm =
2491 ilcm_(&(grid->n_process_rows), &(grid->n_process_columns));
2492 const int v2 = lcm / (grid->n_process_rows);
2493
2494 const int IAROW = indxg2p_(&submatrix_row,
2495 &row_block_size,
2496 &(grid->this_process_row),
2497 &first_process_row,
2498 &(grid->n_process_rows));
2499 const int IACOL = indxg2p_(&submatrix_column,
2500 &column_block_size,
2501 &(grid->this_process_column),
2502 &first_process_column,
2503 &(grid->n_process_columns));
2504 const int Np0 = numroc_(&n_columns /*+IROFFA*/,
2505 &row_block_size,
2506 &(grid->this_process_row),
2507 &IAROW,
2508 &(grid->n_process_rows));
2509 const int Nq0 = numroc_(&n_columns /*+ICOFFA*/,
2510 &column_block_size,
2511 &(grid->this_process_column),
2512 &IACOL,
2513 &(grid->n_process_columns));
2514
2515 const int v1 = iceil_(&Np0, &row_block_size);
2516 const int ldw = (n_local_rows == n_local_columns) ?
2517 0 :
2518 row_block_size * iceil_(&v1, &v2);
2519
2520 const int lwork =
2521 (type == 'M' || type == 'F' || type == 'E') ? 0 : 2 * Nq0 + Np0 + ldw;
2522 work.resize(lwork);
2523 const NumberType *A_loc = this->values.begin();
2524 res = plansy(&type,
2525 &uplo,
2526 &n_columns,
2527 A_loc,
2528 &submatrix_row,
2529 &submatrix_column,
2530 descriptor,
2531 work.data());
2532 }
2533 grid->send_to_inactive(&res);
2534 return res;
2535}
2536
2537
2538
2539# ifdef DEAL_II_WITH_HDF5
2540namespace internal
2541{
2542 namespace
2543 {
2544 void
2545 create_HDF5_state_enum_id(hid_t &state_enum_id)
2546 {
2547 // create HDF5 enum type for LAPACKSupport::State
2549 state_enum_id = H5Tcreate(H5T_ENUM, sizeof(LAPACKSupport::State));
2551 herr_t status = H5Tenum_insert(state_enum_id, "cholesky", &val);
2552 AssertThrow(status >= 0, ExcInternalError());
2554 status = H5Tenum_insert(state_enum_id, "eigenvalues", &val);
2555 AssertThrow(status >= 0, ExcInternalError());
2557 status = H5Tenum_insert(state_enum_id, "inverse_matrix", &val);
2558 AssertThrow(status >= 0, ExcInternalError());
2560 status = H5Tenum_insert(state_enum_id, "inverse_svd", &val);
2561 AssertThrow(status >= 0, ExcInternalError());
2563 status = H5Tenum_insert(state_enum_id, "lu", &val);
2564 AssertThrow(status >= 0, ExcInternalError());
2566 status = H5Tenum_insert(state_enum_id, "matrix", &val);
2567 AssertThrow(status >= 0, ExcInternalError());
2569 status = H5Tenum_insert(state_enum_id, "svd", &val);
2570 AssertThrow(status >= 0, ExcInternalError());
2572 status = H5Tenum_insert(state_enum_id, "unusable", &val);
2573 AssertThrow(status >= 0, ExcInternalError());
2574 }
2575
2576 void
2577 create_HDF5_property_enum_id(hid_t &property_enum_id)
2578 {
2579 // create HDF5 enum type for LAPACKSupport::Property
2580 property_enum_id = H5Tcreate(H5T_ENUM, sizeof(LAPACKSupport::Property));
2582 herr_t status = H5Tenum_insert(property_enum_id, "diagonal", &prop);
2583 AssertThrow(status >= 0, ExcInternalError());
2585 status = H5Tenum_insert(property_enum_id, "general", &prop);
2586 AssertThrow(status >= 0, ExcInternalError());
2588 status = H5Tenum_insert(property_enum_id, "hessenberg", &prop);
2589 AssertThrow(status >= 0, ExcInternalError());
2591 status = H5Tenum_insert(property_enum_id, "lower_triangular", &prop);
2592 AssertThrow(status >= 0, ExcInternalError());
2594 status = H5Tenum_insert(property_enum_id, "symmetric", &prop);
2595 AssertThrow(status >= 0, ExcInternalError());
2597 status = H5Tenum_insert(property_enum_id, "upper_triangular", &prop);
2598 AssertThrow(status >= 0, ExcInternalError());
2599 }
2600 } // namespace
2601} // namespace internal
2602# endif
2603
2604
2605
2606template <typename NumberType>
2607void
2609 const std::string & filename,
2610 const std::pair<unsigned int, unsigned int> &chunk_size) const
2611{
2612# ifndef DEAL_II_WITH_HDF5
2613 (void)filename;
2614 (void)chunk_size;
2615 AssertThrow(false, ExcMessage("HDF5 support is disabled."));
2616# else
2617
2618 std::pair<unsigned int, unsigned int> chunks_size_ = chunk_size;
2619
2620 if (chunks_size_.first == numbers::invalid_unsigned_int ||
2621 chunks_size_.second == numbers::invalid_unsigned_int)
2622 {
2623 // default: store the matrix in chunks of columns
2624 chunks_size_.first = n_rows;
2625 chunks_size_.second = 1;
2626 }
2627 Assert(chunks_size_.first > 0,
2628 ExcMessage("The row chunk size must be larger than 0."));
2629 AssertIndexRange(chunks_size_.first, n_rows + 1);
2630 Assert(chunks_size_.second > 0,
2631 ExcMessage("The column chunk size must be larger than 0."));
2632 AssertIndexRange(chunks_size_.second, n_columns + 1);
2633
2634# ifdef H5_HAVE_PARALLEL
2635 // implementation for configurations equipped with a parallel file system
2636 save_parallel(filename, chunks_size_);
2637
2638# else
2639 // implementation for configurations with no parallel file system
2640 save_serial(filename, chunks_size_);
2641
2642# endif
2643# endif
2644}
2645
2646
2647
2648template <typename NumberType>
2649void
2651 const std::string & filename,
2652 const std::pair<unsigned int, unsigned int> &chunk_size) const
2653{
2654# ifndef DEAL_II_WITH_HDF5
2655 (void)filename;
2656 (void)chunk_size;
2657 Assert(false, ExcInternalError());
2658# else
2659
2660 /*
2661 * The content of the distributed matrix is copied to a matrix using a 1x1
2662 * process grid. Therefore, one process has all the data and can write it to a
2663 * file.
2664 *
2665 * Create a 1x1 column grid which will be used to initialize
2666 * an effectively serial ScaLAPACK matrix to gather the contents from the
2667 * current object
2668 */
2669 const auto column_grid =
2670 std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,
2671 1,
2672 1);
2673
2674 const int MB = n_rows, NB = n_columns;
2675 ScaLAPACKMatrix<NumberType> tmp(n_rows, n_columns, column_grid, MB, NB);
2676 copy_to(tmp);
2677
2678 // the 1x1 grid has only one process and this one writes
2679 // the content of the matrix to the HDF5 file
2680 if (tmp.grid->mpi_process_is_active)
2681 {
2682 herr_t status;
2683
2684 // create a new file using default properties
2685 hid_t file_id =
2686 H5Fcreate(filename.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
2687
2688 // modify dataset creation properties, i.e. enable chunking
2689 hsize_t chunk_dims[2];
2690 // revert order of rows and columns as ScaLAPACK uses column-major
2691 // ordering
2692 chunk_dims[0] = chunk_size.second;
2693 chunk_dims[1] = chunk_size.first;
2694 hid_t data_property = H5Pcreate(H5P_DATASET_CREATE);
2695 status = H5Pset_chunk(data_property, 2, chunk_dims);
2696 AssertThrow(status >= 0, ExcIO());
2697
2698 // create the data space for the dataset
2699 hsize_t dims[2];
2700 // change order of rows and columns as ScaLAPACKMatrix uses column major
2701 // ordering
2702 dims[0] = n_columns;
2703 dims[1] = n_rows;
2704 hid_t dataspace_id = H5Screate_simple(2, dims, nullptr);
2705
2706 // create the dataset within the file using chunk creation properties
2707 hid_t type_id = hdf5_type_id(tmp.values.data());
2708 hid_t dataset_id = H5Dcreate2(file_id,
2709 "/matrix",
2710 type_id,
2711 dataspace_id,
2712 H5P_DEFAULT,
2713 data_property,
2714 H5P_DEFAULT);
2715
2716 // write the dataset
2717 status = H5Dwrite(
2718 dataset_id, type_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, tmp.values.data());
2719 AssertThrow(status >= 0, ExcIO());
2720
2721 // create HDF5 enum type for LAPACKSupport::State and
2722 // LAPACKSupport::Property
2723 hid_t state_enum_id, property_enum_id;
2724 internal::create_HDF5_state_enum_id(state_enum_id);
2725 internal::create_HDF5_property_enum_id(property_enum_id);
2726
2727 // create the data space for the state enum
2728 hsize_t dims_state[1];
2729 dims_state[0] = 1;
2730 hid_t state_enum_dataspace = H5Screate_simple(1, dims_state, nullptr);
2731 // create the dataset for the state enum
2732 hid_t state_enum_dataset = H5Dcreate2(file_id,
2733 "/state",
2734 state_enum_id,
2735 state_enum_dataspace,
2736 H5P_DEFAULT,
2737 H5P_DEFAULT,
2738 H5P_DEFAULT);
2739 // write the dataset for the state enum
2740 status = H5Dwrite(state_enum_dataset,
2741 state_enum_id,
2742 H5S_ALL,
2743 H5S_ALL,
2744 H5P_DEFAULT,
2745 &state);
2746 AssertThrow(status >= 0, ExcIO());
2747
2748 // create the data space for the property enum
2749 hsize_t dims_property[1];
2750 dims_property[0] = 1;
2751 hid_t property_enum_dataspace =
2752 H5Screate_simple(1, dims_property, nullptr);
2753 // create the dataset for the property enum
2754 hid_t property_enum_dataset = H5Dcreate2(file_id,
2755 "/property",
2756 property_enum_id,
2757 property_enum_dataspace,
2758 H5P_DEFAULT,
2759 H5P_DEFAULT,
2760 H5P_DEFAULT);
2761 // write the dataset for the property enum
2762 status = H5Dwrite(property_enum_dataset,
2763 property_enum_id,
2764 H5S_ALL,
2765 H5S_ALL,
2766 H5P_DEFAULT,
2767 &property);
2768 AssertThrow(status >= 0, ExcIO());
2769
2770 // end access to the datasets and release resources used by them
2771 status = H5Dclose(dataset_id);
2772 AssertThrow(status >= 0, ExcIO());
2773 status = H5Dclose(state_enum_dataset);
2774 AssertThrow(status >= 0, ExcIO());
2775 status = H5Dclose(property_enum_dataset);
2776 AssertThrow(status >= 0, ExcIO());
2777
2778 // terminate access to the data spaces
2779 status = H5Sclose(dataspace_id);
2780 AssertThrow(status >= 0, ExcIO());
2781 status = H5Sclose(state_enum_dataspace);
2782 AssertThrow(status >= 0, ExcIO());
2783 status = H5Sclose(property_enum_dataspace);
2784 AssertThrow(status >= 0, ExcIO());
2785
2786 // release enum data types
2787 status = H5Tclose(state_enum_id);
2788 AssertThrow(status >= 0, ExcIO());
2789 status = H5Tclose(property_enum_id);
2790 AssertThrow(status >= 0, ExcIO());
2791
2792 // release the creation property
2793 status = H5Pclose(data_property);
2794 AssertThrow(status >= 0, ExcIO());
2795
2796 // close the file.
2797 status = H5Fclose(file_id);
2798 AssertThrow(status >= 0, ExcIO());
2799 }
2800# endif
2801}
2802
2803
2804
2805template <typename NumberType>
2806void
2808 const std::string & filename,
2809 const std::pair<unsigned int, unsigned int> &chunk_size) const
2810{
2811# ifndef DEAL_II_WITH_HDF5
2812 (void)filename;
2813 (void)chunk_size;
2814 Assert(false, ExcInternalError());
2815# else
2816
2817 const unsigned int n_mpi_processes(
2818 Utilities::MPI::n_mpi_processes(this->grid->mpi_communicator));
2819 MPI_Info info = MPI_INFO_NULL;
2820 /*
2821 * The content of the distributed matrix is copied to a matrix using a
2822 * 1xn_processes process grid. Therefore, the processes hold contiguous chunks
2823 * of the matrix, which they can write to the file
2824 *
2825 * Create a 1xn_processes column grid
2826 */
2827 const auto column_grid =
2828 std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,
2829 1,
2831
2832 const int MB = n_rows;
2833 /*
2834 * If the ratio n_columns/n_mpi_processes is smaller than the column block
2835 * size of the original matrix, the redistribution and saving of the matrix
2836 * requires a significant amount of MPI communication. Therefore, it is better
2837 * to set a minimum value for the block size NB, causing only
2838 * ceil(n_columns/NB) processes being actively involved in saving the matrix.
2839 * Example: A 2*10^9 x 400 matrix is distributed on a 80 x 5 process grid
2840 * using block size 32. Instead of distributing the matrix on a 1 x 400
2841 * process grid with a row block size of 2*10^9 and a column block size of 1,
2842 * the minimum value for NB yields that only ceil(400/32)=13 processes will be
2843 * writing the matrix to disk.
2844 */
2845 const int NB = std::max(static_cast<int>(std::ceil(
2846 static_cast<double>(n_columns) / n_mpi_processes)),
2847 column_block_size);
2848
2849 ScaLAPACKMatrix<NumberType> tmp(n_rows, n_columns, column_grid, MB, NB);
2850 copy_to(tmp);
2851
2852 // get pointer to data held by the process
2853 NumberType *data = (tmp.values.size() > 0) ? tmp.values.data() : nullptr;
2854
2855 herr_t status;
2856 // dataset dimensions
2857 hsize_t dims[2];
2858
2859 // set up file access property list with parallel I/O access
2860 hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
2861 status = H5Pset_fapl_mpio(plist_id, tmp.grid->mpi_communicator, info);
2862 AssertThrow(status >= 0, ExcIO());
2863
2864 // create a new file collectively and release property list identifier
2865 hid_t file_id =
2866 H5Fcreate(filename.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, plist_id);
2867 status = H5Pclose(plist_id);
2868 AssertThrow(status >= 0, ExcIO());
2869
2870 // As ScaLAPACK, and therefore the class ScaLAPACKMatrix, uses column-major
2871 // ordering but HDF5 row-major ordering, we have to reverse entries related to
2872 // columns and rows in the following. create the dataspace for the dataset
2873 dims[0] = tmp.n_columns;
2874 dims[1] = tmp.n_rows;
2875
2876 hid_t filespace = H5Screate_simple(2, dims, nullptr);
2877
2878 // create the chunked dataset with default properties and close filespace
2879 hsize_t chunk_dims[2];
2880 // revert order of rows and columns as ScaLAPACK uses column-major ordering
2881 chunk_dims[0] = chunk_size.second;
2882 chunk_dims[1] = chunk_size.first;
2883 plist_id = H5Pcreate(H5P_DATASET_CREATE);
2884 H5Pset_chunk(plist_id, 2, chunk_dims);
2885 hid_t type_id = hdf5_type_id(data);
2886 hid_t dset_id = H5Dcreate2(
2887 file_id, "/matrix", type_id, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
2888
2889 status = H5Sclose(filespace);
2890 AssertThrow(status >= 0, ExcIO());
2891
2892 status = H5Pclose(plist_id);
2893 AssertThrow(status >= 0, ExcIO());
2894
2895 // gather the number of local rows and columns from all processes
2896 std::vector<int> proc_n_local_rows(n_mpi_processes),
2897 proc_n_local_columns(n_mpi_processes);
2898 int ierr = MPI_Allgather(&tmp.n_local_rows,
2899 1,
2900 MPI_INT,
2901 proc_n_local_rows.data(),
2902 1,
2903 MPI_INT,
2904 tmp.grid->mpi_communicator);
2905 AssertThrowMPI(ierr);
2906 ierr = MPI_Allgather(&tmp.n_local_columns,
2907 1,
2908 MPI_INT,
2909 proc_n_local_columns.data(),
2910 1,
2911 MPI_INT,
2912 tmp.grid->mpi_communicator);
2913 AssertThrowMPI(ierr);
2914
2915 const unsigned int my_rank(
2916 Utilities::MPI::this_mpi_process(tmp.grid->mpi_communicator));
2917
2918 // hyperslab selection parameters
2919 // each process defines dataset in memory and writes it to the hyperslab in
2920 // the file
2921 hsize_t count[2];
2922 count[0] = tmp.n_local_columns;
2923 count[1] = tmp.n_rows;
2924 hid_t memspace = H5Screate_simple(2, count, nullptr);
2925
2926 hsize_t offset[2] = {0};
2927 for (unsigned int i = 0; i < my_rank; ++i)
2928 offset[0] += proc_n_local_columns[i];
2929
2930 // select hyperslab in the file.
2931 filespace = H5Dget_space(dset_id);
2932 status = H5Sselect_hyperslab(
2933 filespace, H5S_SELECT_SET, offset, nullptr, count, nullptr);
2934 AssertThrow(status >= 0, ExcIO());
2935
2936 // create property list for independent dataset write
2937 plist_id = H5Pcreate(H5P_DATASET_XFER);
2938 status = H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_INDEPENDENT);
2939 AssertThrow(status >= 0, ExcIO());
2940
2941 // process with no data will not participate in writing to the file
2942 if (tmp.values.size() > 0)
2943 {
2944 status = H5Dwrite(dset_id, type_id, memspace, filespace, plist_id, data);
2945 AssertThrow(status >= 0, ExcIO());
2946 }
2947 // close/release sources
2948 status = H5Dclose(dset_id);
2949 AssertThrow(status >= 0, ExcIO());
2950 status = H5Sclose(filespace);
2951 AssertThrow(status >= 0, ExcIO());
2952 status = H5Sclose(memspace);
2953 AssertThrow(status >= 0, ExcIO());
2954 status = H5Pclose(plist_id);
2955 AssertThrow(status >= 0, ExcIO());
2956 status = H5Fclose(file_id);
2957 AssertThrow(status >= 0, ExcIO());
2958
2959 // before writing the state and property to file wait for
2960 // all processes to finish writing the matrix content to the file
2961 ierr = MPI_Barrier(tmp.grid->mpi_communicator);
2962 AssertThrowMPI(ierr);
2963
2964 // only root process will write state and property to the file
2965 if (tmp.grid->this_mpi_process == 0)
2966 {
2967 // open file using default properties
2968 hid_t file_id_reopen =
2969 H5Fopen(filename.c_str(), H5F_ACC_RDWR, H5P_DEFAULT);
2970
2971 // create HDF5 enum type for LAPACKSupport::State and
2972 // LAPACKSupport::Property
2973 hid_t state_enum_id, property_enum_id;
2974 internal::create_HDF5_state_enum_id(state_enum_id);
2975 internal::create_HDF5_property_enum_id(property_enum_id);
2976
2977 // create the data space for the state enum
2978 hsize_t dims_state[1];
2979 dims_state[0] = 1;
2980 hid_t state_enum_dataspace = H5Screate_simple(1, dims_state, nullptr);
2981 // create the dataset for the state enum
2982 hid_t state_enum_dataset = H5Dcreate2(file_id_reopen,
2983 "/state",
2984 state_enum_id,
2985 state_enum_dataspace,
2986 H5P_DEFAULT,
2987 H5P_DEFAULT,
2988 H5P_DEFAULT);
2989 // write the dataset for the state enum
2990 status = H5Dwrite(state_enum_dataset,
2991 state_enum_id,
2992 H5S_ALL,
2993 H5S_ALL,
2994 H5P_DEFAULT,
2995 &state);
2996 AssertThrow(status >= 0, ExcIO());
2997
2998 // create the data space for the property enum
2999 hsize_t dims_property[1];
3000 dims_property[0] = 1;
3001 hid_t property_enum_dataspace =
3002 H5Screate_simple(1, dims_property, nullptr);
3003 // create the dataset for the property enum
3004 hid_t property_enum_dataset = H5Dcreate2(file_id_reopen,
3005 "/property",
3006 property_enum_id,
3007 property_enum_dataspace,
3008 H5P_DEFAULT,
3009 H5P_DEFAULT,
3010 H5P_DEFAULT);
3011 // write the dataset for the property enum
3012 status = H5Dwrite(property_enum_dataset,
3013 property_enum_id,
3014 H5S_ALL,
3015 H5S_ALL,
3016 H5P_DEFAULT,
3017 &property);
3018 AssertThrow(status >= 0, ExcIO());
3019
3020 status = H5Dclose(state_enum_dataset);
3021 AssertThrow(status >= 0, ExcIO());
3022 status = H5Dclose(property_enum_dataset);
3023 AssertThrow(status >= 0, ExcIO());
3024 status = H5Sclose(state_enum_dataspace);
3025 AssertThrow(status >= 0, ExcIO());
3026 status = H5Sclose(property_enum_dataspace);
3027 AssertThrow(status >= 0, ExcIO());
3028 status = H5Tclose(state_enum_id);
3029 AssertThrow(status >= 0, ExcIO());
3030 status = H5Tclose(property_enum_id);
3031 AssertThrow(status >= 0, ExcIO());
3032 status = H5Fclose(file_id_reopen);
3033 AssertThrow(status >= 0, ExcIO());
3034 }
3035
3036# endif
3037}
3038
3039
3040
3041template <typename NumberType>
3042void
3043ScaLAPACKMatrix<NumberType>::load(const std::string &filename)
3044{
3045# ifndef DEAL_II_WITH_HDF5
3046 (void)filename;
3047 AssertThrow(false, ExcMessage("HDF5 support is disabled."));
3048# else
3049# ifdef H5_HAVE_PARALLEL
3050 // implementation for configurations equipped with a parallel file system
3051 load_parallel(filename);
3052
3053# else
3054 // implementation for configurations with no parallel file system
3055 load_serial(filename);
3056# endif
3057# endif
3058}
3059
3060
3061
3062template <typename NumberType>
3063void
3065{
3066# ifndef DEAL_II_WITH_HDF5
3067 (void)filename;
3068 Assert(false, ExcInternalError());
3069# else
3070
3071 /*
3072 * The content of the distributed matrix is copied to a matrix using a 1x1
3073 * process grid. Therefore, one process has all the data and can write it to a
3074 * file
3075 */
3076 // create a 1xP column grid with P being the number of MPI processes
3077 const auto one_grid =
3078 std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,
3079 1,
3080 1);
3081
3082 const int MB = n_rows, NB = n_columns;
3083 ScaLAPACKMatrix<NumberType> tmp(n_rows, n_columns, one_grid, MB, NB);
3084
3085 int state_int = -1;
3086 int property_int = -1;
3087
3088 // the 1x1 grid has only one process and this one reads
3089 // the content of the matrix from the HDF5 file
3090 if (tmp.grid->mpi_process_is_active)
3091 {
3092 herr_t status;
3093
3094 // open the file in read-only mode
3095 hid_t file_id = H5Fopen(filename.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT);
3096
3097 // open the dataset in the file
3098 hid_t dataset_id = H5Dopen2(file_id, "/matrix", H5P_DEFAULT);
3099
3100 // check the datatype of the data in the file
3101 // datatype of source and destination must have the same class
3102 // see HDF User's Guide: 6.10. Data Transfer: Datatype Conversion and
3103 // Selection
3104 hid_t datatype = H5Dget_type(dataset_id);
3105 H5T_class_t t_class_in = H5Tget_class(datatype);
3106 H5T_class_t t_class = H5Tget_class(hdf5_type_id(tmp.values.data()));
3108 t_class_in == t_class,
3109 ExcMessage(
3110 "The data type of the matrix to be read does not match the archive"));
3111
3112 // get dataspace handle
3113 hid_t dataspace_id = H5Dget_space(dataset_id);
3114 // get number of dimensions
3115 const int ndims = H5Sget_simple_extent_ndims(dataspace_id);
3116 AssertThrow(ndims == 2, ExcIO());
3117 // get every dimension
3118 hsize_t dims[2];
3119 H5Sget_simple_extent_dims(dataspace_id, dims, nullptr);
3121 static_cast<int>(dims[0]) == n_columns,
3122 ExcMessage(
3123 "The number of columns of the matrix does not match the content of the archive"));
3125 static_cast<int>(dims[1]) == n_rows,
3126 ExcMessage(
3127 "The number of rows of the matrix does not match the content of the archive"));
3128
3129 // read data
3130 status = H5Dread(dataset_id,
3131 hdf5_type_id(tmp.values.data()),
3132 H5S_ALL,
3133 H5S_ALL,
3134 H5P_DEFAULT,
3135 tmp.values.data());
3136 AssertThrow(status >= 0, ExcIO());
3137
3138 // create HDF5 enum type for LAPACKSupport::State and
3139 // LAPACKSupport::Property
3140 hid_t state_enum_id, property_enum_id;
3141 internal::create_HDF5_state_enum_id(state_enum_id);
3142 internal::create_HDF5_property_enum_id(property_enum_id);
3143
3144 // open the datasets for the state and property enum in the file
3145 hid_t dataset_state_id = H5Dopen2(file_id, "/state", H5P_DEFAULT);
3146 hid_t datatype_state = H5Dget_type(dataset_state_id);
3147 H5T_class_t t_class_state = H5Tget_class(datatype_state);
3148 AssertThrow(t_class_state == H5T_ENUM, ExcIO());
3149
3150 hid_t dataset_property_id = H5Dopen2(file_id, "/property", H5P_DEFAULT);
3151 hid_t datatype_property = H5Dget_type(dataset_property_id);
3152 H5T_class_t t_class_property = H5Tget_class(datatype_property);
3153 AssertThrow(t_class_property == H5T_ENUM, ExcIO());
3154
3155 // get dataspace handles
3156 hid_t dataspace_state = H5Dget_space(dataset_state_id);
3157 hid_t dataspace_property = H5Dget_space(dataset_property_id);
3158 // get number of dimensions
3159 const int ndims_state = H5Sget_simple_extent_ndims(dataspace_state);
3160 AssertThrow(ndims_state == 1, ExcIO());
3161 const int ndims_property = H5Sget_simple_extent_ndims(dataspace_property);
3162 AssertThrow(ndims_property == 1, ExcIO());
3163 // get every dimension
3164 hsize_t dims_state[1];
3165 H5Sget_simple_extent_dims(dataspace_state, dims_state, nullptr);
3166 AssertThrow(static_cast<int>(dims_state[0]) == 1, ExcIO());
3167 hsize_t dims_property[1];
3168 H5Sget_simple_extent_dims(dataspace_property, dims_property, nullptr);
3169 AssertThrow(static_cast<int>(dims_property[0]) == 1, ExcIO());
3170
3171 // read data
3172 status = H5Dread(dataset_state_id,
3173 state_enum_id,
3174 H5S_ALL,
3175 H5S_ALL,
3176 H5P_DEFAULT,
3177 &tmp.state);
3178 AssertThrow(status >= 0, ExcIO());
3179 // To send the state from the root process to the other processes
3180 // the state enum is casted to an integer, that will be broadcasted and
3181 // subsequently casted back to the enum type
3182 state_int = static_cast<int>(tmp.state);
3183
3184 status = H5Dread(dataset_property_id,
3185 property_enum_id,
3186 H5S_ALL,
3187 H5S_ALL,
3188 H5P_DEFAULT,
3189 &tmp.property);
3190 AssertThrow(status >= 0, ExcIO());
3191 // To send the property from the root process to the other processes
3192 // the state enum is casted to an integer, that will be broadcasted and
3193 // subsequently casted back to the enum type
3194 property_int = static_cast<int>(tmp.property);
3195
3196 // terminate access to the data spaces
3197 status = H5Sclose(dataspace_id);
3198 AssertThrow(status >= 0, ExcIO());
3199 status = H5Sclose(dataspace_state);
3200 AssertThrow(status >= 0, ExcIO());
3201 status = H5Sclose(dataspace_property);
3202 AssertThrow(status >= 0, ExcIO());
3203
3204 // release data type handles
3205 status = H5Tclose(datatype);
3206 AssertThrow(status >= 0, ExcIO());
3207 status = H5Tclose(state_enum_id);
3208 AssertThrow(status >= 0, ExcIO());
3209 status = H5Tclose(property_enum_id);
3210 AssertThrow(status >= 0, ExcIO());
3211
3212 // end access to the data sets and release resources used by them
3213 status = H5Dclose(dataset_state_id);
3214 AssertThrow(status >= 0, ExcIO());
3215 status = H5Dclose(dataset_id);
3216 AssertThrow(status >= 0, ExcIO());
3217 status = H5Dclose(dataset_property_id);
3218 AssertThrow(status >= 0, ExcIO());
3219
3220 // close the file.
3221 status = H5Fclose(file_id);
3222 AssertThrow(status >= 0, ExcIO());
3223 }
3224 // so far only the root process has the correct state integer --> broadcasting
3225 tmp.grid->send_to_inactive(&state_int, 1);
3226 // so far only the root process has the correct property integer -->
3227 // broadcasting
3228 tmp.grid->send_to_inactive(&property_int, 1);
3229
3230 tmp.state = static_cast<LAPACKSupport::State>(state_int);
3231 tmp.property = static_cast<LAPACKSupport::Property>(property_int);
3232
3233 tmp.copy_to(*this);
3234
3235# endif // DEAL_II_WITH_HDF5
3236}
3237
3238
3239
3240template <typename NumberType>
3241void
3243{
3244# ifndef DEAL_II_WITH_HDF5
3245 (void)filename;
3246 Assert(false, ExcInternalError());
3247# else
3248# ifndef H5_HAVE_PARALLEL
3249 Assert(false, ExcInternalError());
3250# else
3251
3252 const unsigned int n_mpi_processes(
3253 Utilities::MPI::n_mpi_processes(this->grid->mpi_communicator));
3254 MPI_Info info = MPI_INFO_NULL;
3255 /*
3256 * The content of the distributed matrix is copied to a matrix using a
3257 * 1xn_processes process grid. Therefore, the processes hold contiguous chunks
3258 * of the matrix, which they can write to the file
3259 */
3260 // create a 1xP column grid with P being the number of MPI processes
3261 const auto column_grid =
3262 std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,
3263 1,
3265
3266 const int MB = n_rows;
3267 // for the choice of NB see explanation in save_parallel()
3268 const int NB = std::max(static_cast<int>(std::ceil(
3269 static_cast<double>(n_columns) / n_mpi_processes)),
3270 column_block_size);
3271
3272 ScaLAPACKMatrix<NumberType> tmp(n_rows, n_columns, column_grid, MB, NB);
3273
3274 // get pointer to data held by the process
3275 NumberType *data = (tmp.values.size() > 0) ? tmp.values.data() : nullptr;
3276
3277 herr_t status;
3278
3279 // set up file access property list with parallel I/O access
3280 hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
3281 status = H5Pset_fapl_mpio(plist_id, tmp.grid->mpi_communicator, info);
3282 AssertThrow(status >= 0, ExcIO());
3283
3284 // open file collectively in read-only mode and release property list
3285 // identifier
3286 hid_t file_id = H5Fopen(filename.c_str(), H5F_ACC_RDONLY, plist_id);
3287 status = H5Pclose(plist_id);
3288 AssertThrow(status >= 0, ExcIO());
3289
3290 // open the dataset in the file collectively
3291 hid_t dataset_id = H5Dopen2(file_id, "/matrix", H5P_DEFAULT);
3292
3293 // check the datatype of the dataset in the file
3294 // if the classes of type of the dataset and the matrix do not match abort
3295 // see HDF User's Guide: 6.10. Data Transfer: Datatype Conversion and
3296 // Selection
3297 hid_t datatype = hdf5_type_id(data);
3298 hid_t datatype_inp = H5Dget_type(dataset_id);
3299 H5T_class_t t_class_inp = H5Tget_class(datatype_inp);
3300 H5T_class_t t_class = H5Tget_class(datatype);
3302 t_class_inp == t_class,
3303 ExcMessage(
3304 "The data type of the matrix to be read does not match the archive"));
3305
3306 // get the dimensions of the matrix stored in the file
3307 // get dataspace handle
3308 hid_t dataspace_id = H5Dget_space(dataset_id);
3309 // get number of dimensions
3310 const int ndims = H5Sget_simple_extent_ndims(dataspace_id);
3311 AssertThrow(ndims == 2, ExcIO());
3312 // get every dimension
3313 hsize_t dims[2];
3314 status = H5Sget_simple_extent_dims(dataspace_id, dims, nullptr);
3315 AssertThrow(status >= 0, ExcIO());
3317 static_cast<int>(dims[0]) == n_columns,
3318 ExcMessage(
3319 "The number of columns of the matrix does not match the content of the archive"));
3321 static_cast<int>(dims[1]) == n_rows,
3322 ExcMessage(
3323 "The number of rows of the matrix does not match the content of the archive"));
3324
3325 // gather the number of local rows and columns from all processes
3326 std::vector<int> proc_n_local_rows(n_mpi_processes),
3327 proc_n_local_columns(n_mpi_processes);
3328 int ierr = MPI_Allgather(&tmp.n_local_rows,
3329 1,
3330 MPI_INT,
3331 proc_n_local_rows.data(),
3332 1,
3333 MPI_INT,
3334 tmp.grid->mpi_communicator);
3335 AssertThrowMPI(ierr);
3336 ierr = MPI_Allgather(&tmp.n_local_columns,
3337 1,
3338 MPI_INT,
3339 proc_n_local_columns.data(),
3340 1,
3341 MPI_INT,
3342 tmp.grid->mpi_communicator);
3343 AssertThrowMPI(ierr);
3344
3345 const unsigned int my_rank(
3346 Utilities::MPI::this_mpi_process(tmp.grid->mpi_communicator));
3347
3348 // hyperslab selection parameters
3349 // each process defines dataset in memory and writes it to the hyperslab in
3350 // the file
3351 hsize_t count[2];
3352 count[0] = tmp.n_local_columns;
3353 count[1] = tmp.n_local_rows;
3354
3355 hsize_t offset[2] = {0};
3356 for (unsigned int i = 0; i < my_rank; ++i)
3357 offset[0] += proc_n_local_columns[i];
3358
3359 // select hyperslab in the file
3360 status = H5Sselect_hyperslab(
3361 dataspace_id, H5S_SELECT_SET, offset, nullptr, count, nullptr);
3362 AssertThrow(status >= 0, ExcIO());
3363
3364 // create a memory dataspace independently
3365 hid_t memspace = H5Screate_simple(2, count, nullptr);
3366
3367 // read data independently
3368 status =
3369 H5Dread(dataset_id, datatype, memspace, dataspace_id, H5P_DEFAULT, data);
3370 AssertThrow(status >= 0, ExcIO());
3371
3372 // create HDF5 enum type for LAPACKSupport::State and LAPACKSupport::Property
3373 hid_t state_enum_id, property_enum_id;
3374 internal::create_HDF5_state_enum_id(state_enum_id);
3375 internal::create_HDF5_property_enum_id(property_enum_id);
3376
3377 // open the datasets for the state and property enum in the file
3378 hid_t dataset_state_id = H5Dopen2(file_id, "/state", H5P_DEFAULT);
3379 hid_t datatype_state = H5Dget_type(dataset_state_id);
3380 H5T_class_t t_class_state = H5Tget_class(datatype_state);
3381 AssertThrow(t_class_state == H5T_ENUM, ExcIO());
3382
3383 hid_t dataset_property_id = H5Dopen2(file_id, "/property", H5P_DEFAULT);
3384 hid_t datatype_property = H5Dget_type(dataset_property_id);
3385 H5T_class_t t_class_property = H5Tget_class(datatype_property);
3386 AssertThrow(t_class_property == H5T_ENUM, ExcIO());
3387
3388 // get dataspace handles
3389 hid_t dataspace_state = H5Dget_space(dataset_state_id);
3390 hid_t dataspace_property = H5Dget_space(dataset_property_id);
3391 // get number of dimensions
3392 const int ndims_state = H5Sget_simple_extent_ndims(dataspace_state);
3393 AssertThrow(ndims_state == 1, ExcIO());
3394 const int ndims_property = H5Sget_simple_extent_ndims(dataspace_property);
3395 AssertThrow(ndims_property == 1, ExcIO());
3396 // get every dimension
3397 hsize_t dims_state[1];
3398 H5Sget_simple_extent_dims(dataspace_state, dims_state, nullptr);
3399 AssertThrow(static_cast<int>(dims_state[0]) == 1, ExcIO());
3400 hsize_t dims_property[1];
3401 H5Sget_simple_extent_dims(dataspace_property, dims_property, nullptr);
3402 AssertThrow(static_cast<int>(dims_property[0]) == 1, ExcIO());
3403
3404 // read data
3405 status = H5Dread(
3406 dataset_state_id, state_enum_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, &tmp.state);
3407 AssertThrow(status >= 0, ExcIO());
3408
3409 status = H5Dread(dataset_property_id,
3410 property_enum_id,
3411 H5S_ALL,
3412 H5S_ALL,
3413 H5P_DEFAULT,
3414 &tmp.property);
3415 AssertThrow(status >= 0, ExcIO());
3416
3417 // close/release sources
3418 status = H5Sclose(memspace);
3419 AssertThrow(status >= 0, ExcIO());
3420 status = H5Dclose(dataset_id);
3421 AssertThrow(status >= 0, ExcIO());
3422 status = H5Dclose(dataset_state_id);
3423 AssertThrow(status >= 0, ExcIO());
3424 status = H5Dclose(dataset_property_id);
3425 AssertThrow(status >= 0, ExcIO());
3426 status = H5Sclose(dataspace_id);
3427 AssertThrow(status >= 0, ExcIO());
3428 status = H5Sclose(dataspace_state);
3429 AssertThrow(status >= 0, ExcIO());
3430 status = H5Sclose(dataspace_property);
3431 AssertThrow(status >= 0, ExcIO());
3432 // status = H5Tclose(datatype);
3433 // AssertThrow(status >= 0, ExcIO());
3434 status = H5Tclose(state_enum_id);
3435 AssertThrow(status >= 0, ExcIO());
3436 status = H5Tclose(property_enum_id);
3437 AssertThrow(status >= 0, ExcIO());
3438 status = H5Fclose(file_id);
3439 AssertThrow(status >= 0, ExcIO());
3440
3441 // copying the distributed matrices
3442 tmp.copy_to(*this);
3443
3444# endif // H5_HAVE_PARALLEL
3445# endif // DEAL_II_WITH_HDF5
3446}
3447
3448
3449
3450namespace internal
3451{
3452 namespace
3453 {
3454 template <typename NumberType>
3455 void
3456 scale_columns(ScaLAPACKMatrix<NumberType> & matrix,
3457 const ArrayView<const NumberType> &factors)
3458 {
3459 Assert(matrix.n() == factors.size(),
3460 ExcDimensionMismatch(matrix.n(), factors.size()));
3461
3462 for (unsigned int i = 0; i < matrix.local_n(); ++i)
3463 {
3464 const NumberType s = factors[matrix.global_column(i)];
3465
3466 for (unsigned int j = 0; j < matrix.local_m(); ++j)
3467 matrix.local_el(j, i) *= s;
3468 }
3469 }
3470
3471 template <typename NumberType>
3472 void
3474 const ArrayView<const NumberType> &factors)
3475 {
3476 Assert(matrix.m() == factors.size(),
3477 ExcDimensionMismatch(matrix.m(), factors.size()));
3478
3479 for (unsigned int i = 0; i < matrix.local_m(); ++i)
3480 {
3481 const NumberType s = factors[matrix.global_row(i)];
3482
3483 for (unsigned int j = 0; j < matrix.local_n(); ++j)
3484 matrix.local_el(i, j) *= s;
3485 }
3486 }
3487
3488 } // namespace
3489} // namespace internal
3490
3491
3492
3493template <typename NumberType>
3494template <class InputVector>
3495void
3497{
3498 if (this->grid->mpi_process_is_active)
3499 internal::scale_columns(*this, make_array_view(factors));
3500}
3501
3502
3503
3504template <typename NumberType>
3505template <class InputVector>
3506void
3508{
3509 if (this->grid->mpi_process_is_active)
3510 internal::scale_rows(*this, make_array_view(factors));
3511}
3512
3513
3514
3515// instantiations
3516# include "scalapack.inst"
3517
3518
3520
3521#endif // DEAL_II_WITH_SCALAPACK
pointer data()
size_type size() const
ArrayView< typename std::remove_reference< typename std::iterator_traits< Iterator >::reference >::type, MemorySpaceType > make_array_view(const Iterator begin, const Iterator end)
Definition: array_view.h:697
std::size_t size() const
Definition: array_view.h:574
DerivativeForm< 1, spacedim, dim, Number > transpose(const DerivativeForm< 1, dim, spacedim, Number > &DF)
size_type m() const
size_type n() const
int descriptor[9]
Definition: scalapack.h:935
std::vector< NumberType > compute_SVD(ScaLAPACKMatrix< NumberType > *U=nullptr, ScaLAPACKMatrix< NumberType > *VT=nullptr)
Definition: scalapack.cc:2014
std::vector< NumberType > eigenpairs_symmetric_by_value(const std::pair< NumberType, NumberType > &value_limits, const bool compute_eigenvectors)
Definition: scalapack.cc:1453
NumberType frobenius_norm() const
Definition: scalapack.cc:2403
unsigned int pseudoinverse(const NumberType ratio)
Definition: scalapack.cc:2226
std::vector< NumberType > eigenpairs_symmetric_by_value_MRRR(const std::pair< NumberType, NumberType > &value_limits, const bool compute_eigenvectors)
Definition: scalapack.cc:1786
void copy_from(const LAPACKFullMatrix< NumberType > &matrix, const unsigned int rank)
Definition: scalapack.cc:362
void save_parallel(const std::string &filename, const std::pair< unsigned int, unsigned int > &chunk_size) const
Definition: scalapack.cc:2807
void least_squares(ScaLAPACKMatrix< NumberType > &B, const bool transpose=false)
Definition: scalapack.cc:2134
ScaLAPACKMatrix< NumberType > & operator=(const FullMatrix< NumberType > &)
Definition: scalapack.cc:331
void Tadd(const NumberType b, const ScaLAPACKMatrix< NumberType > &B)
Definition: scalapack.cc:1057
void mTmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
Definition: scalapack.cc:1212
void add(const ScaLAPACKMatrix< NumberType > &B, const NumberType a=0., const NumberType b=1., const bool transpose_B=false)
Definition: scalapack.cc:991
LAPACKSupport::State get_state() const
Definition: scalapack.cc:322
LAPACKSupport::Property get_property() const
Definition: scalapack.cc:313
int column_block_size
Definition: scalapack.h:920
void Tmmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
Definition: scalapack.cc:1198
std::vector< NumberType > eigenpairs_symmetric_MRRR(const bool compute_eigenvectors, const std::pair< unsigned int, unsigned int > &index_limits=std::make_pair(numbers::invalid_unsigned_int, numbers::invalid_unsigned_int), const std::pair< NumberType, NumberType > &value_limits=std::make_pair(std::numeric_limits< NumberType >::quiet_NaN(), std::numeric_limits< NumberType >::quiet_NaN()))
Definition: scalapack.cc:1804
void scale_rows(const InputVector &factors)
Definition: scalapack.cc:3507
std::shared_ptr< const Utilities::MPI::ProcessGrid > grid
Definition: scalapack.h:900
ScaLAPACKMatrix(const size_type n_rows, const size_type n_columns, const std::shared_ptr< const Utilities::MPI::ProcessGrid > &process_grid, const size_type row_block_size=32, const size_type column_block_size=32, const LAPACKSupport::Property property=LAPACKSupport::Property::general)
Definition: scalapack.cc:80
void load(const std::string &filename)
Definition: scalapack.cc:3043
const int submatrix_column
Definition: scalapack.h:981
const int submatrix_row
Definition: scalapack.h:975
void mmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
Definition: scalapack.cc:1184
std::vector< NumberType > eigenpairs_symmetric(const bool compute_eigenvectors, const std::pair< unsigned int, unsigned int > &index_limits=std::make_pair(numbers::invalid_unsigned_int, numbers::invalid_unsigned_int), const std::pair< NumberType, NumberType > &value_limits=std::make_pair(std::numeric_limits< NumberType >::quiet_NaN(), std::numeric_limits< NumberType >::quiet_NaN()))
Definition: scalapack.cc:1473
void save_serial(const std::string &filename, const std::pair< unsigned int, unsigned int > &chunk_size) const
Definition: scalapack.cc:2650
NumberType norm_general(const char type) const
Definition: scalapack.cc:2417
void save(const std::string &filename, const std::pair< unsigned int, unsigned int > &chunk_size=std::make_pair(numbers::invalid_unsigned_int, numbers::invalid_unsigned_int)) const
Definition: scalapack.cc:2608
void load_parallel(const std::string &filename)
Definition: scalapack.cc:3242
NumberType l1_norm() const
Definition: scalapack.cc:2375
void compute_lu_factorization()
Definition: scalapack.cc:1273
NumberType norm_symmetric(const char type) const
Definition: scalapack.cc:2476
void mult(const NumberType b, const ScaLAPACKMatrix< NumberType > &B, const NumberType c, ScaLAPACKMatrix< NumberType > &C, const bool transpose_A=false, const bool transpose_B=false) const
Definition: scalapack.cc:1067
LAPACKSupport::Property property
Definition: scalapack.h:893
void set_property(const LAPACKSupport::Property property)
Definition: scalapack.cc:303
void reinit(const size_type n_rows, const size_type n_columns, const std::shared_ptr< const Utilities::MPI::ProcessGrid > &process_grid, const size_type row_block_size=32, const size_type column_block_size=32, const LAPACKSupport::Property property=LAPACKSupport::Property::general)
Definition: scalapack.cc:216
void load_serial(const std::string &filename)
Definition: scalapack.cc:3064
std::vector< NumberType > eigenpairs_symmetric_by_index_MRRR(const std::pair< unsigned int, unsigned int > &index_limits, const bool compute_eigenvectors)
Definition: scalapack.cc:1763
NumberType reciprocal_condition_number(const NumberType a_norm) const
Definition: scalapack.cc:2313
LAPACKSupport::State state
Definition: scalapack.h:887
void TmTmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
Definition: scalapack.cc:1226
std::vector< NumberType > eigenpairs_symmetric_by_index(const std::pair< unsigned int, unsigned int > &index_limits, const bool compute_eigenvectors)
Definition: scalapack.cc:1430
unsigned int global_column(const unsigned int loc_column) const
Definition: scalapack.cc:513
void copy_to(FullMatrix< NumberType > &matrix) const
Definition: scalapack.cc:664
unsigned int global_row(const unsigned int loc_row) const
Definition: scalapack.cc:496
void compute_cholesky_factorization()
Definition: scalapack.cc:1240
void copy_transposed(const ScaLAPACKMatrix< NumberType > &B)
Definition: scalapack.cc:981
NumberType linfty_norm() const
Definition: scalapack.cc:2389
void scale_columns(const InputVector &factors)
Definition: scalapack.cc:3496
std::array< std::pair< Number, Tensor< 1, dim, Number > >, std::integral_constant< int, dim >::value > eigenvectors(const SymmetricTensor< 2, dim, Number > &T, const SymmetricTensorEigenvectorMethod method=SymmetricTensorEigenvectorMethod::ql_implicit_shifts)
AlignedVector< T > values
Definition: table.h:787
void reinit(const size_type size1, const size_type size2, const bool omit_default_initialization=false)
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:402
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:403
const unsigned int v1
Definition: grid_tools.cc:963
static ::ExceptionBase & ExcIO()
static ::ExceptionBase & ExcNotImplemented()
static ::ExceptionBase & ExcErrorCode(std::string arg1, types::blas_int arg2)
static ::ExceptionBase & ExcIndexRange(int arg1, int arg2, int arg3)
#define Assert(cond, exc)
Definition: exceptions.h:1465
#define AssertIsFinite(number)
Definition: exceptions.h:1721
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1746
#define AssertIndexRange(index, range)
Definition: exceptions.h:1690
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
Definition: exceptions.h:1575
#define DEAL_II_MPI_CONST_CAST(expr)
Definition: mpi.h:82
constexpr int chunk_size
Definition: cuda_size.h:35
constexpr int block_size
Definition: cuda_size.h:29
Expression ceil(const Expression &x)
@ cholesky
Contents is a Cholesky decomposition.
@ lu
Contents is an LU decomposition.
@ matrix
Contents is actually a matrix.
@ unusable
Contents is something useless.
@ inverse_matrix
Contents is the inverse of a matrix.
@ svd
Matrix contains singular value decomposition,.
@ inverse_svd
Matrix is the inverse of a singular value decomposition.
@ eigenvalues
Eigenvalue vector is filled.
static const char U
@ symmetric
Matrix is symmetric.
@ hessenberg
Matrix is in upper Hessenberg form.
@ diagonal
Matrix is diagonal.
@ upper_triangular
Matrix is upper triangular.
@ lower_triangular
Matrix is lower triangular.
@ general
No special properties.
SymmetricTensor< 2, dim, Number > C(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > b(const Tensor< 2, dim, Number > &F)
VectorType::value_type * end(VectorType &V)
VectorType::value_type * begin(VectorType &V)
@ scalapack_copy_from
ScaLAPACKMatrix<NumberType>::copy_from.
Definition: mpi_tags.h:121
@ scalapack_copy_to2
ScaLAPACKMatrix<NumberType>::copy_to.
Definition: mpi_tags.h:119
@ scalapack_copy_to
ScaLAPACKMatrix<NumberType>::copy_to.
Definition: mpi_tags.h:117
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:128
T min(const T &t, const MPI_Comm &mpi_communicator)
T sum(const T &t, const MPI_Comm &mpi_communicator)
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:117
T max(const T &t, const MPI_Comm &mpi_communicator)
int create_group(const MPI_Comm &comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
Definition: mpi.cc:181
void reinit(MatrixBlock< MatrixType > &v, const BlockSparsityPattern &p)
Definition: matrix_block.h:618
static const unsigned int invalid_unsigned_int
Definition: types.h:196
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
hid_t hdf5_type_id(const number *)
Definition: scalapack.cc:39