Reference documentation for deal.II version 9.6.0
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
lapack_full_matrix.cc
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2005 - 2023 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
16
25#include <deal.II/lac/vector.h>
26
27#include <iomanip>
28#include <iostream>
29
31
32using namespace LAPACKSupport;
33
34namespace internal
35{
36 namespace LAPACKFullMatrixImplementation
37 {
38 // ZGEEV/CGEEV and DGEEV/SGEEV need different work arrays and different
39 // output arrays for eigenvalues. This makes working with generic scalar
40 // types a bit difficult. To get around this, geev_helper has the same
41 // signature for real and complex arguments, but it ignores some
42 // parameters when called with a real type and ignores different
43 // parameters when called with a complex type.
44 template <typename T>
45 void
46 geev_helper(const char vl,
47 const char vr,
49 const types::blas_int n_rows,
50 std::vector<T> &real_part_eigenvalues,
51 std::vector<T> &imag_part_eigenvalues,
52 std::vector<T> &left_eigenvectors,
53 std::vector<T> &right_eigenvectors,
54 std::vector<T> &real_work,
55 std::vector<T> & /*complex_work*/,
56 const types::blas_int work_flag,
57 types::blas_int &info)
58 {
59 static_assert(std::is_same_v<T, double> || std::is_same_v<T, float>,
60 "Only implemented for double and float");
61 Assert(matrix.size() == static_cast<std::size_t>(n_rows * n_rows),
63 Assert(static_cast<std::size_t>(n_rows) <= real_part_eigenvalues.size(),
65 Assert(static_cast<std::size_t>(n_rows) <= imag_part_eigenvalues.size(),
67 if (vl == 'V')
68 Assert(static_cast<std::size_t>(n_rows * n_rows) <=
69 left_eigenvectors.size(),
71 if (vr == 'V')
72 Assert(static_cast<std::size_t>(n_rows * n_rows) <=
73 right_eigenvectors.size(),
75 Assert(work_flag == -1 ||
76 static_cast<std::size_t>(2 * n_rows) <= real_work.size(),
78 Assert(work_flag == -1 || std::max<long int>(1, 3 * n_rows) <= work_flag,
80 geev(&vl,
81 &vr,
82 &n_rows,
83 matrix.data(),
84 &n_rows,
85 real_part_eigenvalues.data(),
86 imag_part_eigenvalues.data(),
87 left_eigenvectors.data(),
88 &n_rows,
89 right_eigenvectors.data(),
90 &n_rows,
91 real_work.data(),
92 &work_flag,
93 &info);
94 }
95
96
97
98 template <typename T>
99 void
100 geev_helper(const char vl,
101 const char vr,
102 AlignedVector<std::complex<T>> &matrix,
103 const types::blas_int n_rows,
104 std::vector<T> & /*real_part_eigenvalues*/,
105 std::vector<std::complex<T>> &eigenvalues,
106 std::vector<std::complex<T>> &left_eigenvectors,
107 std::vector<std::complex<T>> &right_eigenvectors,
108 std::vector<std::complex<T>> &complex_work,
109 std::vector<T> &real_work,
110 const types::blas_int work_flag,
111 types::blas_int &info)
112 {
113 static_assert(
114 std::is_same_v<T, double> || std::is_same_v<T, float>,
115 "Only implemented for std::complex<double> and std::complex<float>");
116 Assert(matrix.size() == static_cast<std::size_t>(n_rows * n_rows),
118 Assert(static_cast<std::size_t>(n_rows) <= eigenvalues.size(),
120 if (vl == 'V')
121 Assert(static_cast<std::size_t>(n_rows * n_rows) <=
122 left_eigenvectors.size(),
124 if (vr == 'V')
125 Assert(static_cast<std::size_t>(n_rows * n_rows) <=
126 right_eigenvectors.size(),
128 Assert(work_flag == -1 ||
129 std::max<std::size_t>(1, work_flag) <= real_work.size(),
131 Assert(work_flag == -1 || std::max<long int>(1, 2 * n_rows) <= work_flag,
133
134 geev(&vl,
135 &vr,
136 &n_rows,
137 matrix.data(),
138 &n_rows,
139 eigenvalues.data(),
140 left_eigenvectors.data(),
141 &n_rows,
142 right_eigenvectors.data(),
143 &n_rows,
144 complex_work.data(),
145 &work_flag,
146 real_work.data(),
147 &info);
148 }
149
150
151
152 template <typename T>
153 void
154 gesdd_helper(const char job,
155 const types::blas_int n_rows,
156 const types::blas_int n_cols,
158 std::vector<T> &singular_values,
159 AlignedVector<T> &left_vectors,
160 AlignedVector<T> &right_vectors,
161 std::vector<T> &real_work,
162 std::vector<T> & /*complex work*/,
163 std::vector<types::blas_int> &integer_work,
164 const types::blas_int work_flag,
165 types::blas_int &info)
166 {
167 Assert(job == 'A' || job == 'S' || job == 'O' || job == 'N',
169 Assert(static_cast<std::size_t>(n_rows * n_cols) == matrix.size(),
171 Assert(std::min<std::size_t>(n_rows, n_cols) <= singular_values.size(),
173 Assert(8 * std::min<std::size_t>(n_rows, n_cols) <= integer_work.size(),
175 Assert(work_flag == -1 ||
176 static_cast<std::size_t>(work_flag) <= real_work.size(),
178 gesdd(&job,
179 &n_rows,
180 &n_cols,
181 matrix.data(),
182 &n_rows,
183 singular_values.data(),
184 left_vectors.data(),
185 &n_rows,
186 right_vectors.data(),
187 &n_cols,
188 real_work.data(),
189 &work_flag,
190 integer_work.data(),
191 &info);
192 }
193
194
195
196 template <typename T>
197 void
198 gesdd_helper(const char job,
199 const types::blas_int n_rows,
200 const types::blas_int n_cols,
201 AlignedVector<std::complex<T>> &matrix,
202 std::vector<T> &singular_values,
203 AlignedVector<std::complex<T>> &left_vectors,
204 AlignedVector<std::complex<T>> &right_vectors,
205 std::vector<std::complex<T>> &work,
206 std::vector<T> &real_work,
207 std::vector<types::blas_int> &integer_work,
208 const types::blas_int &work_flag,
209 types::blas_int &info)
210 {
211 Assert(job == 'A' || job == 'S' || job == 'O' || job == 'N',
213 Assert(static_cast<std::size_t>(n_rows * n_cols) == matrix.size(),
215 Assert(static_cast<std::size_t>(std::min(n_rows, n_cols)) <=
216 singular_values.size(),
218 Assert(8 * std::min<std::size_t>(n_rows, n_cols) <= integer_work.size(),
220 Assert(work_flag == -1 ||
221 static_cast<std::size_t>(work_flag) <= real_work.size(),
223
224 gesdd(&job,
225 &n_rows,
226 &n_cols,
227 matrix.data(),
228 &n_rows,
229 singular_values.data(),
230 left_vectors.data(),
231 &n_rows,
232 right_vectors.data(),
233 &n_cols,
234 work.data(),
235 &work_flag,
236 real_work.data(),
237 integer_work.data(),
238 &info);
239 }
240 } // namespace LAPACKFullMatrixImplementation
241} // namespace internal
242
243
244
245template <typename number>
247 : TransposeTable<number>(n, n)
248 , state(matrix)
249 , property(general)
250{}
251
252
253
254template <typename number>
256 : TransposeTable<number>(m, n)
257 , state(matrix)
258 , property(general)
259{}
260
261
262
263template <typename number>
265 : TransposeTable<number>(M)
266 , state(matrix)
267 , property(general)
268{}
269
270
271
272template <typename number>
275{
277 state = M.state;
278 property = M.property;
279 return *this;
280}
281
282
283
284template <typename number>
285void
291
292
293
294template <typename number>
295void
297{
298 const size_type s = std::min(std::min(this->m(), n), this->n());
299 TransposeTable<number> copy(std::move(*this));
301 for (size_type i = 0; i < s; ++i)
302 for (size_type j = 0; j < s; ++j)
303 (*this)(i, j) = copy(i, j);
304}
305
306
307
308template <typename number>
309void
311 const std::array<number, 3> &csr,
312 const size_type i,
313 const size_type k,
314 const bool left)
315{
316 auto &A = *this;
317 // see Golub 2013 "Matrix computations", p241 5.1.9 Applying Givens
318 // Rotations but note the difference in notation, namely the sign of s: we
319 // have G * A, where G[1,1] = s
320 if (left)
321 {
322 for (size_type j = 0; j < A.n(); ++j)
323 {
324 const number t = A(i, j);
325 A(i, j) = csr[0] * A(i, j) + csr[1] * A(k, j);
326 A(k, j) = -csr[1] * t + csr[0] * A(k, j);
327 }
328 }
329 else
330 {
331 for (size_type j = 0; j < A.m(); ++j)
332 {
333 const number t = A(j, i);
334 A(j, i) = csr[0] * A(j, i) + csr[1] * A(j, k);
335 A(j, k) = -csr[1] * t + csr[0] * A(j, k);
336 }
337 }
338}
339
340
341
342template <typename number>
343void
345 const size_type col)
346{
347 AssertIndexRange(row, this->m());
348 AssertIndexRange(col, this->n());
349
350 const size_type nrows = this->m() - 1;
351 const size_type ncols = this->n() - 1;
352
353 TransposeTable<number> copy(std::move(*this));
354 this->TransposeTable<number>::reinit(nrows, ncols);
355
356 for (size_type j = 0; j < ncols; ++j)
357 {
358 const size_type jj = (j < col ? j : j + 1);
359 for (size_type i = 0; i < nrows; ++i)
360 {
361 const size_type ii = (i < row ? i : i + 1);
362 (*this)(i, j) = copy(ii, jj);
363 }
364 }
365}
366
367
368
369template <typename number>
370void
376
377
378
379template <typename number>
380template <typename number2>
383{
384 Assert(this->m() == M.m(), ExcDimensionMismatch(this->m(), M.m()));
385 Assert(this->n() == M.n(), ExcDimensionMismatch(this->n(), M.n()));
386 for (size_type i = 0; i < this->m(); ++i)
387 for (size_type j = 0; j < this->n(); ++j)
388 (*this)(i, j) = M(i, j);
389
390 state = LAPACKSupport::matrix;
391 property = LAPACKSupport::general;
392 return *this;
393}
394
395
396
397template <typename number>
398template <typename number2>
401{
402 Assert(this->m() == M.n(), ExcDimensionMismatch(this->m(), M.n()));
403 Assert(this->n() == M.m(), ExcDimensionMismatch(this->n(), M.m()));
404 for (size_type i = 0; i < this->m(); ++i)
405 for (size_type j = 0; j < this->n(); ++j)
406 (*this)(i, j) = M.el(i, j);
407
408 state = LAPACKSupport::matrix;
409 property = LAPACKSupport::general;
410 return *this;
411}
412
413
414
415template <typename number>
418{
419 (void)d;
421
422 if (this->n_elements() != 0)
423 this->reset_values();
424
425 state = LAPACKSupport::matrix;
426 return *this;
427}
429
430
431template <typename number>
434{
435 Assert(state == LAPACKSupport::matrix ||
437 ExcState(state));
438
439 AssertIsFinite(factor);
440 const char type = 'G';
441 const number cfrom = 1.;
442 const types::blas_int m = this->m();
443 const types::blas_int n = this->n();
444 const types::blas_int lda = this->m();
445 types::blas_int info = 0;
446 // kl and ku will not be referenced for type = G (dense matrices).
447 const types::blas_int kl = 0;
448 number *values = this->values.data();
449
450 lascl(&type, &kl, &kl, &cfrom, &factor, &m, &n, values, &lda, &info);
451
452 // Negative return value implies a wrong argument. This should be internal.
453 Assert(info >= 0, ExcInternalError());
454
455 return *this;
457
458
459
460template <typename number>
463{
464 Assert(state == LAPACKSupport::matrix ||
466 ExcState(state));
467
468 AssertIsFinite(factor);
469 Assert(factor != number(0.), ExcZero());
470
471 const char type = 'G';
472 const number cto = 1.;
473 const types::blas_int m = this->m();
474 const types::blas_int n = this->n();
475 const types::blas_int lda = this->m();
476 types::blas_int info = 0;
477 // kl and ku will not be referenced for type = G (dense matrices).
478 const types::blas_int kl = 0;
479 number *values = this->values.data();
480
481 lascl(&type, &kl, &kl, &factor, &cto, &m, &n, values, &lda, &info);
482
483 // Negative return value implies a wrong argument. This should be internal.
484 Assert(info >= 0, ExcInternalError());
485
486 return *this;
487}
488
489
490
491template <typename number>
492void
494{
495 Assert(state == LAPACKSupport::matrix ||
497 ExcState(state));
498
499 Assert(m() == A.m(), ExcDimensionMismatch(m(), A.m()));
500 Assert(n() == A.n(), ExcDimensionMismatch(n(), A.n()));
501
503
504 // BLAS does not offer functions to add matrices.
505 // LapackFullMatrix is stored in contiguous array
506 // ==> use BLAS 1 for adding vectors
507 const types::blas_int n = this->m() * this->n();
508 const types::blas_int inc = 1;
509 number *values = this->values.data();
510 const number *values_A = A.values.data();
511
512 axpy(&n, &a, values_A, &inc, values, &inc);
513}
514
516
517namespace
518{
519 template <typename number>
520 void
521 cholesky_rank1(LAPACKFullMatrix<number> &A,
522 const number a,
523 const Vector<number> &v)
524 {
525 const typename LAPACKFullMatrix<number>::size_type N = A.n();
526 Vector<number> z(v);
527 // Cholesky update / downdate, see
528 // 6.5.4 Cholesky Updating and Downdating, Golub 2013 Matrix computations
529 // Note that potrf() is called with LAPACKSupport::L , so the
530 // factorization is stored in lower triangular part.
531 // Also see discussion here
532 // http://icl.cs.utk.edu/lapack-forum/viewtopic.php?f=2&t=2646
533 if (a > 0.)
534 {
535 // simple update via a sequence of Givens rotations.
536 // Observe that
537 //
538 // | L^T |T | L^T |
539 // A <-- | | | | = L L^T + z z^T
540 // | z^T | | z^T |
541 //
542 // so we can get updated factor by doing a sequence of Givens
543 // rotations to make the matrix lower-triangular
544 // Also see LINPACK's dchud http://www.netlib.org/linpack/dchud.f
545 z *= std::sqrt(a);
546 for (typename LAPACKFullMatrix<number>::size_type k = 0; k < N; ++k)
547 {
548 const std::array<number, 3> csr =
550 A(k, k) = csr[2];
551 for (typename LAPACKFullMatrix<number>::size_type i = k + 1; i < N;
552 ++i)
553 {
554 const number t = A(i, k);
555 A(i, k) = csr[0] * A(i, k) + csr[1] * z(i);
556 z(i) = -csr[1] * t + csr[0] * z(i);
557 }
559 }
560 else
561 {
562 // downdating is not always possible as we may end up with
563 // negative definite matrix. If it's possible, then it boils
564 // down to application of hyperbolic rotations.
565 // Observe that
566 //
567 // | L^T |T | L^T |
568 // A <-- | | S | | = L L^T - z z^T
569 // | z^T | | z^T |
570 //
571 // |In 0 |
572 // S := | |
573 // |0 -1 |
574 //
575 // We are looking for H which is S-orthogonal (HSH^T=S) and
576 // can restore upper-triangular factor of the factorization of A above.
577 // We will use Hyperbolic rotations to do the job
578 //
579 // | c -s | | x1 | | r |
580 // | | = | | = | |, c^2 - s^2 = 1
581 // |-s c | | x2 | | 0 |
582 //
583 // which have real solution only if x2 <= x1.
584 // See also Linpack's http://www.netlib.org/linpack/dchdd.f and
585 // https://infoscience.epfl.ch/record/161468/files/cholupdate.pdf and
586 // "Analysis of a recursive Least Squares Hyperbolic Rotation Algorithm
587 // for Signal Processing", Alexander, Pan, Plemmons, 1988.
588 z *= std::sqrt(-a);
589 for (typename LAPACKFullMatrix<number>::size_type k = 0; k < N; ++k)
590 {
591 const std::array<number, 3> csr =
593 A(k, k) = csr[2];
594 for (typename LAPACKFullMatrix<number>::size_type i = k + 1; i < N;
595 ++i)
596 {
597 const number t = A(i, k);
598 A(i, k) = csr[0] * A(i, k) - csr[1] * z(i);
599 z(i) = -csr[1] * t + csr[0] * z(i);
600 }
601 }
602 }
604
605
606 template <typename number>
607 void
608 cholesky_rank1(LAPACKFullMatrix<std::complex<number>> & /*A*/,
609 const std::complex<number> /*a*/,
610 const Vector<std::complex<number>> & /*v*/)
611 {
614} // namespace
615
616
617
618template <typename number>
619void
622 Assert(property == LAPACKSupport::symmetric, ExcProperty(property));
623
624 Assert(n() == m(), ExcInternalError());
625 Assert(m() == v.size(), ExcDimensionMismatch(m(), v.size()));
626
628
629 if (state == LAPACKSupport::matrix)
630 {
631 {
632 const types::blas_int N = this->m();
633 const char uplo = LAPACKSupport::U;
634 const types::blas_int lda = N;
635 const types::blas_int incx = 1;
636
637 syr(&uplo, &N, &a, v.begin(), &incx, this->values.begin(), &lda);
638 }
640 const size_type N = this->m();
641 // FIXME: we should really only update upper or lower triangular parts
642 // of symmetric matrices and make sure the interface is consistent,
643 // for example operator(i,j) gives correct results regardless of storage.
644 for (size_type i = 0; i < N; ++i)
645 for (size_type j = 0; j < i; ++j)
646 (*this)(i, j) = (*this)(j, i);
647 }
648 else if (state == LAPACKSupport::cholesky)
649 {
650 cholesky_rank1(*this, a, v);
651 }
652 else
653 AssertThrow(false, ExcState(state));
655
656
657
658template <typename number>
659void
661 const Vector<number> &v,
662 const bool adding) const
663{
664 const types::blas_int mm = this->m();
665 const types::blas_int nn = this->n();
666 const number alpha = 1.;
667 const number beta = (adding ? 1. : 0.);
668 const number null = 0.;
669
670 // use trmv for triangular matrices
671 if ((property == upper_triangular || property == lower_triangular) &&
672 (mm == nn) && state == matrix)
673 {
674 Assert(adding == false, ExcNotImplemented());
675
676 AssertDimension(v.size(), this->n());
677 AssertDimension(w.size(), this->m());
678
679 const char diag = 'N';
680 const char trans = 'N';
681 const char uplo =
683
684 w = v;
685
686 const types::blas_int N = mm;
687 const types::blas_int lda = N;
688 const types::blas_int incx = 1;
689
690 trmv(
691 &uplo, &trans, &diag, &N, this->values.data(), &lda, w.data(), &incx);
692
693 return;
694 }
695
696 switch (state)
697 {
698 case matrix:
699 case inverse_matrix:
700 {
701 AssertDimension(v.size(), this->n());
702 AssertDimension(w.size(), this->m());
703
704 gemv("N",
705 &mm,
706 &nn,
707 &alpha,
708 this->values.data(),
709 &mm,
710 v.data(),
711 &one,
712 &beta,
713 w.data(),
714 &one);
715 break;
717 case svd:
718 {
719 std::lock_guard<std::mutex> lock(mutex);
720 AssertDimension(v.size(), this->n());
721 AssertDimension(w.size(), this->m());
722 // Compute V^T v
723 work.resize(std::max(mm, nn));
724 gemv("N",
725 &nn,
726 &nn,
727 &alpha,
728 svd_vt->values.data(),
729 &nn,
730 v.data(),
731 &one,
732 &null,
733 work.data(),
734 &one);
735 // Multiply by singular values
736 for (size_type i = 0; i < wr.size(); ++i)
737 work[i] *= wr[i];
738 // Multiply with U
739 gemv("N",
740 &mm,
741 &mm,
742 &alpha,
743 svd_u->values.data(),
744 &mm,
745 work.data(),
746 &one,
747 &beta,
748 w.data(),
750 break;
751 }
752 case inverse_svd:
753 {
754 std::lock_guard<std::mutex> lock(mutex);
755 AssertDimension(w.size(), this->n());
756 AssertDimension(v.size(), this->m());
757 // Compute U^T v
758 work.resize(std::max(mm, nn));
759 gemv("T",
760 &mm,
761 &mm,
762 &alpha,
763 svd_u->values.data(),
764 &mm,
765 v.data(),
766 &one,
767 &null,
768 work.data(),
769 &one);
770 // Multiply by singular values
771 for (size_type i = 0; i < wr.size(); ++i)
772 work[i] *= wr[i];
773 // Multiply with V
774 gemv("T",
775 &nn,
776 &nn,
777 &alpha,
778 svd_vt->values.data(),
779 &nn,
780 work.data(),
781 &one,
782 &beta,
783 w.data(),
784 &one);
785 break;
786 }
787 default:
788 Assert(false, ExcState(state));
789 }
790}
791
792
793
794template <typename number>
795void
797 const Vector<number> &v,
798 const bool adding) const
800 const types::blas_int mm = this->m();
801 const types::blas_int nn = this->n();
802 const number alpha = 1.;
803 const number beta = (adding ? 1. : 0.);
804 const number null = 0.;
805
806 // use trmv for triangular matrices
807 if ((property == upper_triangular || property == lower_triangular) &&
808 (mm == nn) && state == matrix)
809 {
810 Assert(adding == false, ExcNotImplemented());
811
812 AssertDimension(v.size(), this->n());
813 AssertDimension(w.size(), this->m());
814
815 const char diag = 'N';
816 const char trans = 'T';
817 const char uplo =
819
820 w = v;
822 const types::blas_int N = mm;
823 const types::blas_int lda = N;
824 const types::blas_int incx = 1;
825
826 trmv(
827 &uplo, &trans, &diag, &N, this->values.data(), &lda, w.data(), &incx);
829 return;
830 }
831
832
833 switch (state)
834 {
835 case matrix:
836 case inverse_matrix:
837 {
838 AssertDimension(w.size(), this->n());
839 AssertDimension(v.size(), this->m());
840
841 gemv("T",
842 &mm,
843 &nn,
844 &alpha,
845 this->values.data(),
846 &mm,
847 v.data(),
848 &one,
849 &beta,
850 w.data(),
851 &one);
852 break;
853 }
854 case svd:
855 {
856 std::lock_guard<std::mutex> lock(mutex);
857 AssertDimension(w.size(), this->n());
858 AssertDimension(v.size(), this->m());
859
860 // Compute U^T v
861 work.resize(std::max(mm, nn));
862 gemv("T",
863 &mm,
864 &mm,
865 &alpha,
866 svd_u->values.data(),
867 &mm,
868 v.data(),
869 &one,
870 &null,
871 work.data(),
872 &one);
873 // Multiply by singular values
874 for (size_type i = 0; i < wr.size(); ++i)
875 work[i] *= wr[i];
876 // Multiply with V
877 gemv("T",
878 &nn,
879 &nn,
880 &alpha,
881 svd_vt->values.data(),
882 &nn,
883 work.data(),
884 &one,
885 &beta,
886 w.data(),
887 &one);
888 break;
889 }
890 case inverse_svd:
891 {
892 std::lock_guard<std::mutex> lock(mutex);
893 AssertDimension(v.size(), this->n());
894 AssertDimension(w.size(), this->m());
895
896 // Compute V^T v
897 work.resize(std::max(mm, nn));
898 gemv("N",
899 &nn,
900 &nn,
901 &alpha,
902 svd_vt->values.data(),
903 &nn,
904 v.data(),
905 &one,
906 &null,
907 work.data(),
908 &one);
909 // Multiply by singular values
910 for (size_type i = 0; i < wr.size(); ++i)
911 work[i] *= wr[i];
912 // Multiply with U
913 gemv("N",
914 &mm,
915 &mm,
916 &alpha,
917 svd_u->values.data(),
918 &mm,
919 work.data(),
920 &one,
921 &beta,
922 w.data(),
923 &one);
924 break;
926 default:
927 Assert(false, ExcState(state));
928 }
929}
930
931
932
933template <typename number>
934void
936 const Vector<number> &v) const
937{
938 vmult(w, v, true);
939}
940
941
942
943template <typename number>
944void
946 const Vector<number> &v) const
947{
948 Tvmult(w, v, true);
949}
950
951
952
953template <typename number>
954void
957 const bool adding) const
958{
959 Assert(state == matrix || state == inverse_matrix, ExcState(state));
961 Assert(C.state == matrix || C.state == inverse_matrix, ExcState(C.state));
962 Assert(this->n() == B.m(), ExcDimensionMismatch(this->n(), B.m()));
963 Assert(C.n() == B.n(), ExcDimensionMismatch(C.n(), B.n()));
964 Assert(C.m() == this->m(), ExcDimensionMismatch(this->m(), C.m()));
965 const types::blas_int mm = this->m();
966 const types::blas_int nn = B.n();
967 const types::blas_int kk = this->n();
968 const number alpha = 1.;
969 const number beta = (adding ? 1. : 0.);
970
971 gemm("N",
972 "N",
973 &mm,
974 &nn,
975 &kk,
976 &alpha,
977 this->values.data(),
978 &mm,
979 B.values.data(),
980 &kk,
981 &beta,
982 C.values.data(),
983 &mm);
984}
985
986
987
988template <typename number>
989void
992 const bool adding) const
993{
994 Assert(state == matrix || state == inverse_matrix, ExcState(state));
996 Assert(this->n() == B.m(), ExcDimensionMismatch(this->n(), B.m()));
997 Assert(C.n() == B.n(), ExcDimensionMismatch(C.n(), B.n()));
998 Assert(C.m() == this->m(), ExcDimensionMismatch(this->m(), C.m()));
999 const types::blas_int mm = this->m();
1000 const types::blas_int nn = B.n();
1001 const types::blas_int kk = this->n();
1002 const number alpha = 1.;
1003 const number beta = (adding ? 1. : 0.);
1004
1005 // since FullMatrix stores the matrix in transposed order compared to this
1006 // matrix, compute B^T * A^T = (A * B)^T
1007 gemm("T",
1008 "T",
1009 &nn,
1010 &mm,
1011 &kk,
1012 &alpha,
1013 B.values.data(),
1014 &kk,
1015 this->values.data(),
1016 &mm,
1017 &beta,
1018 &C(0, 0),
1019 &nn);
1020}
1021
1022
1023
1024template <typename number>
1025void
1027 const LAPACKFullMatrix<number> &B,
1028 const Vector<number> &V,
1029 const bool adding) const
1030{
1031 Assert(state == matrix || state == inverse_matrix, ExcState(state));
1033 Assert(C.state == matrix || C.state == inverse_matrix, ExcState(C.state));
1034
1035 const LAPACKFullMatrix<number> &A = *this;
1036
1037 Assert(A.m() == B.m(), ExcDimensionMismatch(A.m(), B.m()));
1038 Assert(C.n() == B.n(), ExcDimensionMismatch(C.n(), B.n()));
1039 Assert(C.m() == A.n(), ExcDimensionMismatch(A.n(), C.m()));
1040 Assert(V.size() == A.m(), ExcDimensionMismatch(A.m(), V.size()));
1041
1042 const types::blas_int mm = A.n();
1043 const types::blas_int nn = B.n();
1044 const types::blas_int kk = B.m();
1045
1046 // lapack does not have any triple product routines, including the case of
1047 // diagonal matrix in the middle, see
1048 // https://stackoverflow.com/questions/3548069/multiplying-three-matrices-in-blas-with-the-middle-one-being-diagonal
1049 // http://mathforum.org/kb/message.jspa?messageID=3546564
1050
1051 std::lock_guard<std::mutex> lock(mutex);
1052 // First, get V*B into "work" array
1053 work.resize(kk * nn);
1054 // following http://icl.cs.utk.edu/lapack-forum/viewtopic.php?f=2&t=768#p2577
1055 // do left-multiplication manually. Note that Xscal would require to first
1056 // copy the input vector as multiplication is done inplace.
1057 for (types::blas_int j = 0; j < nn; ++j)
1058 for (types::blas_int i = 0; i < kk; ++i)
1059 {
1060 Assert(j * kk + i < static_cast<types::blas_int>(work.size()),
1062 work[j * kk + i] = V(i) * B(i, j);
1063 }
1064
1065 // Now do the standard Tmmult:
1066 const number alpha = 1.;
1067 const number beta = (adding ? 1. : 0.);
1068
1069 gemm("T",
1070 "N",
1071 &mm,
1072 &nn,
1073 &kk,
1074 &alpha,
1075 this->values.data(),
1076 &kk,
1077 work.data(),
1078 &kk,
1079 &beta,
1080 C.values.data(),
1081 &mm);
1082}
1083
1084
1085
1086template <typename number>
1087void
1089{
1090 const LAPACKFullMatrix<number> &A = *this;
1091 AssertDimension(A.m(), B.n());
1092 AssertDimension(A.n(), B.m());
1093 const types::blas_int m = B.m();
1094 const types::blas_int n = B.n();
1095#ifdef DEAL_II_LAPACK_WITH_MKL
1096 const number one = 1.;
1097 omatcopy('C', 'C', n, m, one, A.values.data(), n, B.values.data(), m);
1098#else
1099 for (types::blas_int i = 0; i < m; ++i)
1100 for (types::blas_int j = 0; j < n; ++j)
1102#endif
1103}
1104
1105
1106
1107template <typename number>
1108void
1110{
1111 LAPACKFullMatrix<number> &A = *this;
1112 Assert(state == matrix || state == inverse_matrix, ExcState(state));
1113 Assert(V.size() == A.m(), ExcDimensionMismatch(A.m(), V.size()));
1114
1115 const types::blas_int nn = A.n();
1116 const types::blas_int kk = A.m();
1117 for (types::blas_int j = 0; j < nn; ++j)
1118 for (types::blas_int i = 0; i < kk; ++i)
1119 A(i, j) *= V(i);
1120}
1121
1122
1123
1124template <typename number>
1125void
1127 const LAPACKFullMatrix<number> &B,
1128 const bool adding) const
1129{
1130 Assert(state == matrix || state == inverse_matrix, ExcState(state));
1132 Assert(C.state == matrix || C.state == inverse_matrix, ExcState(C.state));
1133 Assert(this->m() == B.m(), ExcDimensionMismatch(this->m(), B.m()));
1134 Assert(C.n() == B.n(), ExcDimensionMismatch(C.n(), B.n()));
1135 Assert(C.m() == this->n(), ExcDimensionMismatch(this->n(), C.m()));
1136 const types::blas_int mm = this->n();
1137 const types::blas_int nn = B.n();
1138 const types::blas_int kk = B.m();
1139 const number alpha = 1.;
1140 const number beta = (adding ? 1. : 0.);
1141
1142 if (PointerComparison::equal(this, &B))
1143 {
1145 "T",
1146 &nn,
1147 &kk,
1148 &alpha,
1149 this->values.data(),
1150 &kk,
1151 &beta,
1152 C.values.data(),
1153 &nn);
1154
1155 // fill-in lower triangular part
1156 for (types::blas_int j = 0; j < nn; ++j)
1157 for (types::blas_int i = 0; i < j; ++i)
1158 C(j, i) = C(i, j);
1159
1160 C.property = symmetric;
1161 }
1162 else
1163 {
1164 gemm("T",
1165 "N",
1166 &mm,
1167 &nn,
1168 &kk,
1169 &alpha,
1170 this->values.data(),
1171 &kk,
1172 B.values.data(),
1173 &kk,
1174 &beta,
1175 C.values.data(),
1176 &mm);
1177 }
1178}
1179
1180
1181
1182template <typename number>
1183void
1185 const LAPACKFullMatrix<number> &B,
1186 const bool adding) const
1187{
1188 Assert(state == matrix || state == inverse_matrix, ExcState(state));
1190 Assert(this->m() == B.m(), ExcDimensionMismatch(this->m(), B.m()));
1191 Assert(C.n() == B.n(), ExcDimensionMismatch(C.n(), B.n()));
1192 Assert(C.m() == this->n(), ExcDimensionMismatch(this->n(), C.m()));
1193 const types::blas_int mm = this->n();
1194 const types::blas_int nn = B.n();
1195 const types::blas_int kk = B.m();
1196 const number alpha = 1.;
1197 const number beta = (adding ? 1. : 0.);
1198
1199 // since FullMatrix stores the matrix in transposed order compared to this
1200 // matrix, compute B^T * A = (A^T * B)^T
1201 gemm("T",
1202 "N",
1203 &nn,
1204 &mm,
1205 &kk,
1206 &alpha,
1207 B.values.data(),
1208 &kk,
1209 this->values.data(),
1210 &kk,
1211 &beta,
1212 &C(0, 0),
1213 &nn);
1214}
1215
1216
1217
1218template <typename number>
1219void
1221 const LAPACKFullMatrix<number> &B,
1222 const bool adding) const
1223{
1224 Assert(state == matrix || state == inverse_matrix, ExcState(state));
1226 Assert(C.state == matrix || C.state == inverse_matrix, ExcState(C.state));
1227 Assert(this->n() == B.n(), ExcDimensionMismatch(this->n(), B.n()));
1228 Assert(C.n() == B.m(), ExcDimensionMismatch(C.n(), B.m()));
1229 Assert(C.m() == this->m(), ExcDimensionMismatch(this->m(), C.m()));
1230 const types::blas_int mm = this->m();
1231 const types::blas_int nn = B.m();
1232 const types::blas_int kk = B.n();
1233 const number alpha = 1.;
1234 const number beta = (adding ? 1. : 0.);
1235
1236 if (PointerComparison::equal(this, &B))
1237 {
1239 "N",
1240 &nn,
1241 &kk,
1242 &alpha,
1243 this->values.data(),
1244 &nn,
1245 &beta,
1246 C.values.data(),
1247 &nn);
1248
1249 // fill-in lower triangular part
1250 for (types::blas_int j = 0; j < nn; ++j)
1251 for (types::blas_int i = 0; i < j; ++i)
1252 C(j, i) = C(i, j);
1253
1254 C.property = symmetric;
1255 }
1256 else
1257 {
1258 gemm("N",
1259 "T",
1260 &mm,
1261 &nn,
1262 &kk,
1263 &alpha,
1264 this->values.data(),
1265 &mm,
1266 B.values.data(),
1267 &nn,
1268 &beta,
1269 C.values.data(),
1270 &mm);
1271 }
1272}
1273
1274
1275
1276template <typename number>
1277void
1279 const LAPACKFullMatrix<number> &B,
1280 const bool adding) const
1281{
1282 Assert(state == matrix || state == inverse_matrix, ExcState(state));
1284 Assert(this->n() == B.n(), ExcDimensionMismatch(this->n(), B.n()));
1285 Assert(C.n() == B.m(), ExcDimensionMismatch(C.n(), B.m()));
1286 Assert(C.m() == this->m(), ExcDimensionMismatch(this->m(), C.m()));
1287 const types::blas_int mm = this->m();
1288 const types::blas_int nn = B.m();
1289 const types::blas_int kk = B.n();
1290 const number alpha = 1.;
1291 const number beta = (adding ? 1. : 0.);
1292
1293 // since FullMatrix stores the matrix in transposed order compared to this
1294 // matrix, compute B * A^T = (A * B^T)^T
1295 gemm("N",
1296 "T",
1297 &nn,
1298 &mm,
1299 &kk,
1300 &alpha,
1301 B.values.data(),
1302 &nn,
1303 this->values.data(),
1304 &mm,
1305 &beta,
1306 &C(0, 0),
1307 &nn);
1308}
1309
1310
1311
1312template <typename number>
1313void
1315 const LAPACKFullMatrix<number> &B,
1316 const bool adding) const
1317{
1318 Assert(state == matrix || state == inverse_matrix, ExcState(state));
1320 Assert(C.state == matrix || C.state == inverse_matrix, ExcState(C.state));
1321 Assert(this->m() == B.n(), ExcDimensionMismatch(this->m(), B.n()));
1322 Assert(C.n() == B.m(), ExcDimensionMismatch(C.n(), B.m()));
1323 Assert(C.m() == this->n(), ExcDimensionMismatch(this->n(), C.m()));
1324 const types::blas_int mm = this->n();
1325 const types::blas_int nn = B.m();
1326 const types::blas_int kk = B.n();
1327 const number alpha = 1.;
1328 const number beta = (adding ? 1. : 0.);
1329
1330 gemm("T",
1331 "T",
1332 &mm,
1333 &nn,
1334 &kk,
1335 &alpha,
1336 this->values.data(),
1337 &kk,
1338 B.values.data(),
1339 &nn,
1340 &beta,
1341 C.values.data(),
1342 &mm);
1343}
1344
1345
1346
1347template <typename number>
1348void
1350 const LAPACKFullMatrix<number> &B,
1351 const bool adding) const
1352{
1353 Assert(state == matrix || state == inverse_matrix, ExcState(state));
1355 Assert(this->m() == B.n(), ExcDimensionMismatch(this->m(), B.n()));
1356 Assert(C.n() == B.m(), ExcDimensionMismatch(C.n(), B.m()));
1357 Assert(C.m() == this->n(), ExcDimensionMismatch(this->n(), C.m()));
1358 const types::blas_int mm = this->n();
1359 const types::blas_int nn = B.m();
1360 const types::blas_int kk = B.n();
1361 const number alpha = 1.;
1362 const number beta = (adding ? 1. : 0.);
1363
1364 // since FullMatrix stores the matrix in transposed order compared to this
1365 // matrix, compute B * A = (A^T * B^T)^T
1366 gemm("N",
1367 "N",
1368 &nn,
1369 &mm,
1370 &kk,
1371 &alpha,
1372 B.values.data(),
1373 &nn,
1374 this->values.data(),
1375 &kk,
1376 &beta,
1377 &C(0, 0),
1378 &nn);
1379}
1380
1381
1382
1383template <typename number>
1384void
1386{
1387 Assert(state == matrix, ExcState(state));
1389
1390 const types::blas_int mm = this->m();
1391 const types::blas_int nn = this->n();
1392 number *const values = this->values.data();
1393 ipiv.resize(mm);
1394 types::blas_int info = 0;
1395 getrf(&mm, &nn, values, &mm, ipiv.data(), &info);
1396
1397 Assert(info >= 0, ExcInternalError());
1398
1399 // if info >= 0, the factorization has been completed
1400 state = lu;
1401
1403}
1404
1405
1406
1407template <typename number>
1408void
1410{
1411 property = p;
1412}
1413
1414
1415
1416template <typename number>
1417number
1419{
1420 const char type('O');
1421 return norm(type);
1422}
1423
1424
1425
1426template <typename number>
1427number
1429{
1430 const char type('I');
1431 return norm(type);
1432}
1433
1434
1435
1436template <typename number>
1437number
1439{
1440 const char type('F');
1441 return norm(type);
1442}
1443
1444
1445
1446template <typename number>
1447number
1449{
1450 std::lock_guard<std::mutex> lock(mutex);
1451
1452 Assert(state == LAPACKSupport::matrix ||
1454 ExcMessage("norms can be called in matrix state only."));
1455
1456 const types::blas_int N = this->n();
1457 const types::blas_int M = this->m();
1458 const number *const values = this->values.data();
1459 if (property == symmetric)
1460 {
1462 const types::blas_int lwork =
1463 (type == 'I' || type == 'O') ? std::max<types::blas_int>(1, N) : 0;
1464 work.resize(lwork);
1465 return lansy(&type, &LAPACKSupport::L, &N, values, &lda, work.data());
1466 }
1467 else
1468 {
1470 const types::blas_int lwork =
1471 (type == 'I') ? std::max<types::blas_int>(1, M) : 0;
1472 work.resize(lwork);
1473 return lange(&type, &M, &N, values, &lda, work.data());
1474 }
1475}
1476
1477
1478
1479template <typename number>
1480number
1482{
1483 Assert(state == LAPACKSupport::matrix ||
1485 ExcMessage("Trace can be called in matrix state only."));
1486 Assert(this->n() == this->m(), ExcDimensionMismatch(this->n(), this->m()));
1487
1488 number tr = 0;
1489 for (size_type i = 0; i < this->m(); ++i)
1490 tr += (*this)(i, i);
1491
1492 return tr;
1493}
1494
1495
1496
1497template <typename number>
1498void
1500{
1501 Assert(state == matrix, ExcState(state));
1502 Assert(property == symmetric, ExcProperty(property));
1504
1505 const types::blas_int mm = this->m();
1506 const types::blas_int nn = this->n();
1507 (void)mm;
1508 Assert(mm == nn, ExcDimensionMismatch(mm, nn));
1509
1510 number *const values = this->values.data();
1511 types::blas_int info = 0;
1512 const types::blas_int lda = std::max<types::blas_int>(1, nn);
1513 potrf(&LAPACKSupport::L, &nn, values, &lda, &info);
1514
1515 // info < 0 : the info-th argument had an illegal value
1516 Assert(info >= 0, ExcInternalError());
1517
1518 state = cholesky;
1520}
1521
1522
1523
1524template <typename number>
1525number
1527{
1528 std::lock_guard<std::mutex> lock(mutex);
1529 Assert(state == cholesky, ExcState(state));
1530 number rcond = 0.;
1531
1532 const types::blas_int N = this->m();
1533 const number *values = this->values.data();
1534 types::blas_int info = 0;
1536 work.resize(3 * N);
1537 iwork.resize(N);
1538
1539 // use the same uplo as in Cholesky
1541 &N,
1542 values,
1543 &lda,
1544 &a_norm,
1545 &rcond,
1546 work.data(),
1547 iwork.data(),
1548 &info);
1549
1550 Assert(info >= 0, ExcInternalError());
1551
1552 return rcond;
1553}
1554
1555
1556
1557template <typename number>
1558number
1560{
1561 std::lock_guard<std::mutex> lock(mutex);
1562 Assert(property == upper_triangular || property == lower_triangular,
1563 ExcProperty(property));
1564 number rcond = 0.;
1565
1566 const types::blas_int N = this->m();
1567 const number *const values = this->values.data();
1568 types::blas_int info = 0;
1570 work.resize(3 * N);
1571 iwork.resize(N);
1572
1573 const char norm = '1';
1574 const char diag = 'N';
1575 const char uplo =
1577 trcon(&norm,
1578 &uplo,
1579 &diag,
1580 &N,
1581 values,
1582 &lda,
1583 &rcond,
1584 work.data(),
1585 iwork.data(),
1586 &info);
1587
1588 Assert(info >= 0, ExcInternalError());
1589
1590 return rcond;
1591}
1592
1593
1594
1595template <typename number>
1596void
1598{
1599 Assert(state == matrix, ExcState(state));
1601
1602 const types::blas_int mm = this->m();
1603 const types::blas_int nn = this->n();
1604 wr.resize(std::max(mm, nn));
1605 std::fill(wr.begin(), wr.end(), 0.);
1606 ipiv.resize(8 * mm);
1607
1608 svd_u = std::make_unique<LAPACKFullMatrix<number>>(mm, mm);
1609 svd_vt = std::make_unique<LAPACKFullMatrix<number>>(nn, nn);
1610 types::blas_int info = 0;
1611
1612 // First determine optimal workspace size
1613 work.resize(1);
1614 types::blas_int lwork = -1;
1615
1616 // TODO double check size
1617 std::vector<typename numbers::NumberTraits<number>::real_type> real_work;
1619 {
1620 // This array is only used by the complex versions.
1621 std::size_t min = std::min(this->m(), this->n());
1622 std::size_t max = std::max(this->m(), this->n());
1623 real_work.resize(
1624 std::max(5 * min * min + 5 * min, 2 * max * min + 2 * min * min + min));
1625 }
1626
1627 // make sure that the first entry in the work array is clear, in case the
1628 // routine does not completely overwrite the memory:
1629 work[0] = number();
1631 mm,
1632 nn,
1633 this->values,
1634 wr,
1635 svd_u->values,
1636 svd_vt->values,
1637 work,
1638 real_work,
1639 ipiv,
1640 lwork,
1641 info);
1642
1643 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("gesdd", info));
1644 // Resize the work array. Add one to the size computed by LAPACK to be on
1645 // the safe side.
1646 lwork = static_cast<types::blas_int>(std::abs(work[0]) + 1);
1647
1648 work.resize(lwork);
1649 // Do the actual SVD.
1651 mm,
1652 nn,
1653 this->values,
1654 wr,
1655 svd_u->values,
1656 svd_vt->values,
1657 work,
1658 real_work,
1659 ipiv,
1660 lwork,
1661 info);
1662 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("gesdd", info));
1663
1664 work.resize(0);
1665 ipiv.resize(0);
1666
1667 state = LAPACKSupport::svd;
1668}
1669
1670
1671
1672template <typename number>
1673void
1675{
1676 if (state == LAPACKSupport::matrix)
1677 compute_svd();
1678
1679 Assert(state == LAPACKSupport::svd, ExcState(state));
1680
1682 const double lim = std::abs(wr[0]) * threshold;
1683 for (size_type i = 0; i < wr.size(); ++i)
1684 {
1685 if (std::abs(wr[i]) > lim)
1686 wr[i] = one / wr[i];
1687 else
1688 wr[i] = 0.;
1689 }
1691}
1692
1693
1694
1695template <typename number>
1696void
1698 const unsigned int kernel_size)
1699{
1700 if (state == LAPACKSupport::matrix)
1701 compute_svd();
1702
1703 Assert(state == LAPACKSupport::svd, ExcState(state));
1704
1706 const unsigned int n_wr = wr.size();
1707 for (size_type i = 0; i < n_wr - kernel_size; ++i)
1708 wr[i] = one / wr[i];
1709 for (size_type i = n_wr - kernel_size; i < n_wr; ++i)
1710 wr[i] = 0.;
1712}
1713
1714
1715
1716template <typename number>
1717void
1719{
1720 Assert(state == matrix || state == lu || state == cholesky, ExcState(state));
1721 const types::blas_int mm = this->m();
1722 const types::blas_int nn = this->n();
1723 Assert(nn == mm, ExcNotQuadratic());
1724
1725 number *const values = this->values.data();
1726 types::blas_int info = 0;
1727
1728 if (property != symmetric)
1729 {
1730 if (state == matrix)
1731 compute_lu_factorization();
1732
1733 ipiv.resize(mm);
1734 inv_work.resize(mm);
1735 getri(&mm, values, &mm, ipiv.data(), inv_work.data(), &mm, &info);
1736 }
1737 else
1738 {
1739 if (state == matrix)
1740 compute_cholesky_factorization();
1741
1742 const types::blas_int lda = std::max<types::blas_int>(1, nn);
1743 potri(&LAPACKSupport::L, &nn, values, &lda, &info);
1744 // inverse is stored in lower diagonal, set the upper diagonal
1745 // appropriately:
1746 for (types::blas_int i = 0; i < nn; ++i)
1747 for (types::blas_int j = i + 1; j < nn; ++j)
1748 this->el(i, j) = this->el(j, i);
1749 }
1750
1751 Assert(info >= 0, ExcInternalError());
1753
1754 state = inverse_matrix;
1755}
1756
1757
1758
1759template <typename number>
1760void
1761LAPACKFullMatrix<number>::solve(Vector<number> &v, const bool transposed) const
1762{
1763 Assert(this->m() == this->n(), LACExceptions::ExcNotQuadratic());
1764 AssertDimension(this->m(), v.size());
1765 const char *trans = transposed ? &T : &N;
1766 const types::blas_int nn = this->n();
1767 const number *const values = this->values.data();
1768 const types::blas_int n_rhs = 1;
1769 types::blas_int info = 0;
1770
1771 if (state == lu)
1772 {
1773 getrs(
1774 trans, &nn, &n_rhs, values, &nn, ipiv.data(), v.begin(), &nn, &info);
1775 }
1776 else if (state == cholesky)
1777 {
1778 potrs(&LAPACKSupport::L, &nn, &n_rhs, values, &nn, v.begin(), &nn, &info);
1779 }
1780 else if (property == upper_triangular || property == lower_triangular)
1781 {
1782 const char uplo =
1784
1785 const types::blas_int lda = nn;
1786 const types::blas_int ldb = nn;
1787 trtrs(
1788 &uplo, trans, "N", &nn, &n_rhs, values, &lda, v.begin(), &ldb, &info);
1789 }
1790 else
1791 {
1792 Assert(false,
1793 ExcMessage(
1794 "The matrix has to be either factorized or triangular."));
1795 }
1796
1797 Assert(info == 0, ExcInternalError());
1798}
1799
1800
1801
1802template <typename number>
1803void
1805 const bool transposed) const
1806{
1807 Assert(B.state == matrix, ExcState(B.state));
1808
1809 Assert(this->m() == this->n(), LACExceptions::ExcNotQuadratic());
1810 AssertDimension(this->m(), B.m());
1811 const char *trans = transposed ? &T : &N;
1812 const types::blas_int nn = this->n();
1813 const number *const values = this->values.data();
1814 const types::blas_int n_rhs = B.n();
1815 types::blas_int info = 0;
1816
1817 if (state == lu)
1818 {
1819 getrs(trans,
1820 &nn,
1821 &n_rhs,
1822 values,
1823 &nn,
1824 ipiv.data(),
1825 B.values.data(),
1826 &nn,
1827 &info);
1828 }
1829 else if (state == cholesky)
1830 {
1832 &nn,
1833 &n_rhs,
1834 values,
1835 &nn,
1836 B.values.data(),
1837 &nn,
1838 &info);
1839 }
1840 else if (property == upper_triangular || property == lower_triangular)
1841 {
1842 const char uplo =
1844
1845 const types::blas_int lda = nn;
1846 const types::blas_int ldb = nn;
1847 trtrs(&uplo,
1848 trans,
1849 "N",
1850 &nn,
1851 &n_rhs,
1852 values,
1853 &lda,
1854 B.values.data(),
1855 &ldb,
1856 &info);
1857 }
1858 else
1859 {
1860 Assert(false,
1861 ExcMessage(
1862 "The matrix has to be either factorized or triangular."));
1863 }
1864
1865 Assert(info == 0, ExcInternalError());
1866}
1867
1868
1869
1870template <typename number>
1871number
1873{
1874 Assert(this->m() == this->n(), LACExceptions::ExcNotQuadratic());
1875
1876 // LAPACK doesn't offer a function to compute a matrix determinant.
1877 // This is due to the difficulty in maintaining numerical accuracy, as the
1878 // calculations are likely to overflow or underflow. See
1879 // http://www.netlib.org/lapack/faq.html#_are_there_routines_in_lapack_to_compute_determinants
1880 //
1881 // However, after a PLU decomposition one can compute this by multiplication
1882 // of the diagonal entries with one another. One must take into consideration
1883 // the number of permutations (row swaps) stored in the P matrix.
1884 //
1885 // See the implementations in the blaze library (detNxN)
1886 // https://bitbucket.org/blaze-lib/blaze
1887 // and also
1888 // https://dualm.wordpress.com/2012/01/06/computing-determinant-in-fortran/
1889 // http://icl.cs.utk.edu/lapack-forum/viewtopic.php?p=341&#p336
1890 // for further information.
1891 Assert(state == lu, ExcState(state));
1892 Assert(ipiv.size() == this->m(), ExcInternalError());
1893 number det = 1.0;
1894 for (size_type i = 0; i < this->m(); ++i)
1895 det *=
1896 (ipiv[i] == types::blas_int(i + 1) ? this->el(i, i) : -this->el(i, i));
1897 return det;
1898}
1899
1900
1901
1902template <typename number>
1903void
1904LAPACKFullMatrix<number>::compute_eigenvalues(const bool right, const bool left)
1905{
1906 Assert(state == matrix, ExcState(state));
1907 const types::blas_int nn = this->n();
1908 wr.resize(nn);
1909 wi.resize(nn);
1910 if (right)
1911 vr.resize(nn * nn);
1912 if (left)
1913 vl.resize(nn * nn);
1914
1915 types::blas_int info = 0;
1916 types::blas_int lwork = 1;
1917 const char jobvr = (right) ? V : N;
1918 const char jobvl = (left) ? V : N;
1919
1920 /*
1921 * The LAPACK routine xGEEV requires a sufficiently large work array; the
1922 * minimum requirement is
1923 *
1924 * work.size >= 4*nn.
1925 *
1926 * However, for better performance, a larger work array may be needed. The
1927 * first call determines the optimal work size and the second does the work.
1928 */
1929 lwork = -1;
1930 work.resize(1);
1931
1932 std::vector<typename numbers::NumberTraits<number>::real_type> real_work;
1934 // This array is only used by the complex versions.
1935 real_work.resize(2 * this->m());
1937 jobvr,
1938 this->values,
1939 this->m(),
1940 wr,
1941 wi,
1942 vl,
1943 vr,
1944 work,
1945 real_work,
1946 lwork,
1947 info);
1948
1949 // geev returns info=0 on success. Since we only queried the optimal size
1950 // for work, everything else would not be acceptable.
1951 Assert(info == 0, ExcInternalError());
1952 // Allocate working array according to suggestion (same strategy as was
1953 // noted in compute_svd).
1954 lwork = static_cast<types::blas_int>(std::abs(work[0]) + 1);
1955
1956 // resize workspace array
1957 work.resize(lwork);
1958 real_work.resize(lwork);
1959
1960 // Finally compute the eigenvalues.
1962 jobvr,
1963 this->values,
1964 this->m(),
1965 wr,
1966 wi,
1967 vl,
1968 vr,
1969 work,
1970 real_work,
1971 lwork,
1972 info);
1973
1974 Assert(info >= 0, ExcInternalError());
1975 if (info < 0)
1976 {
1977 AssertThrow(info == 0,
1978 ExcMessage("Lapack error in geev: the " +
1979 std::to_string(-info) +
1980 "-th"
1981 " parameter had an illegal value."));
1982 }
1983 else
1984 {
1986 info == 0,
1987 ExcMessage(
1988 "Lapack error in geev: the QR algorithm failed to compute "
1989 "all the eigenvalues, and no eigenvectors have been computed."));
1990 }
1991
1993}
1994
1995
1996
1997namespace
1998{
1999 // This function extracts complex eigenvectors from the underlying 'number'
2000 // array 'vr' of the LAPACK eigenvalue routine. For real-valued matrices
2001 // addressed by this function specialization, we might get complex
2002 // eigenvalues, which come in complex-conjugate pairs. In LAPACK, a compact
2003 // storage scheme is applied that stores the real and imaginary part of
2004 // eigenvectors only once, putting the real parts in one column and the
2005 // imaginary part in the next of a real-valued array. Here, we do the
2006 // unpacking into the usual complex values.
2007 template <typename RealNumber>
2008 void
2009 unpack_lapack_eigenvector_and_increment_index(
2010 const std::vector<RealNumber> &vr,
2011 const std::complex<RealNumber> &eigenvalue,
2012 FullMatrix<std::complex<RealNumber>> &result,
2013 unsigned int &index)
2014 {
2015 const std::size_t n = result.n();
2016 if (eigenvalue.imag() != 0.)
2017 {
2018 for (std::size_t j = 0; j < n; ++j)
2019 {
2020 result(j, index).real(vr[index * n + j]);
2021 result(j, index + 1).real(vr[index * n + j]);
2022 result(j, index).imag(vr[(index + 1) * n + j]);
2023 result(j, index + 1).imag(-vr[(index + 1) * n + j]);
2024 }
2025
2026 // we filled two columns with the complex-conjugate pair, so increment
2027 // returned index by 2
2028 index += 2;
2029 }
2030 else
2031 {
2032 for (unsigned int j = 0; j < n; ++j)
2033 result(j, index).real(vr[index * n + j]);
2034
2035 // real-valued case, we only filled one column
2036 ++index;
2037 }
2038 }
2039
2040 // This specialization fills the eigenvectors for complex-valued matrices,
2041 // in which case we simply read off the entry in the 'vr' array.
2042 template <typename ComplexNumber>
2043 void
2044 unpack_lapack_eigenvector_and_increment_index(
2045 const std::vector<ComplexNumber> &vr,
2046 const ComplexNumber &,
2048 unsigned int &index)
2049 {
2050 const std::size_t n = result.n();
2051 for (unsigned int j = 0; j < n; ++j)
2052 result(j, index) = vr[index * n + j];
2053
2054 // complex-valued case always only fills a single column
2055 ++index;
2056 }
2057} // namespace
2058
2059
2060
2061template <typename number>
2064{
2066 Assert(vr.size() == this->n_rows() * this->n_cols(),
2067 ExcMessage("Right eigenvectors are not available! Did you "
2068 "set the associated flag in compute_eigenvalues()?"));
2069
2071 result(n(), n());
2072
2073 for (unsigned int i = 0; i < n();)
2074 unpack_lapack_eigenvector_and_increment_index(vr, eigenvalue(i), result, i);
2075
2076 return result;
2077}
2078
2079
2080
2081template <typename number>
2084{
2086 Assert(vl.size() == this->n_rows() * this->n_cols(),
2087 ExcMessage("Left eigenvectors are not available! Did you "
2088 "set the associated flag in compute_eigenvalues()?"));
2089
2091 result(n(), n());
2092
2093 for (unsigned int i = 0; i < n();)
2094 unpack_lapack_eigenvector_and_increment_index(vl, eigenvalue(i), result, i);
2095
2096 return result;
2097}
2098
2099
2100
2101template <typename number>
2102void
2104 const number lower_bound,
2105 const number upper_bound,
2106 const number abs_accuracy,
2109{
2110 Assert(state == matrix, ExcState(state));
2111 const types::blas_int nn = (this->n() > 0 ? this->n() : 1);
2112 Assert(static_cast<size_type>(nn) == this->m(), ExcNotQuadratic());
2113
2114 wr.resize(nn);
2115 LAPACKFullMatrix<number> matrix_eigenvectors(nn, nn);
2116
2117 number *const values_A = this->values.data();
2118 number *const values_eigenvectors = matrix_eigenvectors.values.data();
2119
2120 types::blas_int info(0), lwork(-1), n_eigenpairs(0);
2121 const char *const jobz(&V);
2122 const char *const uplo(&U);
2123 const char *const range(&V);
2124 const types::blas_int *const dummy(&one);
2125 std::vector<types::blas_int> iwork(static_cast<size_type>(5 * nn));
2126 std::vector<types::blas_int> ifail(static_cast<size_type>(nn));
2127
2128
2129 /*
2130 * The LAPACK routine xSYEVX requires a sufficiently large work array; the
2131 * minimum requirement is
2132 *
2133 * work.size >= 8*nn.
2134 *
2135 * However, for better performance, a larger work array may be needed. The
2136 * first call determines the optimal work size and the second does the work.
2137 */
2138 work.resize(1);
2139
2140 syevx(jobz,
2141 range,
2142 uplo,
2143 &nn,
2144 values_A,
2145 &nn,
2146 &lower_bound,
2147 &upper_bound,
2148 dummy,
2149 dummy,
2150 &abs_accuracy,
2151 &n_eigenpairs,
2152 wr.data(),
2153 values_eigenvectors,
2154 &nn,
2155 work.data(),
2156 &lwork,
2157 iwork.data(),
2158 ifail.data(),
2159 &info);
2160 // syevx returns info=0 on success. Since we only queried the optimal size
2161 // for work, everything else would not be acceptable.
2162 Assert(info == 0, ExcInternalError());
2163 // Allocate working array according to suggestion (same strategy as was noted
2164 // in compute_svd).
2165 lwork = static_cast<types::blas_int>(std::abs(work[0]) + 1);
2166 work.resize(static_cast<size_type>(lwork));
2167
2168 // Finally compute the eigenvalues.
2169 syevx(jobz,
2170 range,
2171 uplo,
2172 &nn,
2173 values_A,
2174 &nn,
2175 &lower_bound,
2176 &upper_bound,
2177 dummy,
2178 dummy,
2179 &abs_accuracy,
2180 &n_eigenpairs,
2181 wr.data(),
2182 values_eigenvectors,
2183 &nn,
2184 work.data(),
2185 &lwork,
2186 iwork.data(),
2187 ifail.data(),
2188 &info);
2189
2190 // Negative return value implies a wrong argument. This should be internal.
2191 Assert(info >= 0, ExcInternalError());
2192 if (info < 0)
2193 {
2194 AssertThrow(info == 0,
2195 ExcMessage("Lapack error in syevx: the " +
2196 std::to_string(-info) +
2197 "-th"
2198 " parameter had an illegal value."));
2199 }
2200 else if ((info > 0) && (info <= nn))
2201 {
2202 AssertThrow(info == 0,
2203 ExcMessage(
2204 "Lapack error in syevx: " + std::to_string(info) +
2205 " eigenvectors failed to converge."
2206 " (You may need to scale the abs_accuracy according"
2207 " to your matrix norm.)"));
2208 }
2209 else
2210 {
2211 AssertThrow(info == 0,
2212 ExcMessage("Lapack error in syevx: unknown error."));
2213 }
2214
2215 eigenvalues.reinit(n_eigenpairs);
2216 eigenvectors.reinit(nn, n_eigenpairs, true);
2217
2218 for (size_type i = 0; i < static_cast<size_type>(n_eigenpairs); ++i)
2219 {
2220 eigenvalues(i) = wr[i];
2221 size_type col_begin(i * nn);
2222 for (size_type j = 0; j < static_cast<size_type>(nn); ++j)
2223 {
2224 eigenvectors(j, i) = values_eigenvectors[col_begin + j];
2225 }
2226 }
2227
2229}
2230
2231
2232
2233template <typename number>
2234void
2237 const number lower_bound,
2238 const number upper_bound,
2239 const number abs_accuracy,
2241 std::vector<Vector<number>> &eigenvectors,
2242 const types::blas_int itype)
2243{
2244 Assert(state == matrix, ExcState(state));
2245 const types::blas_int nn = (this->n() > 0 ? this->n() : 1);
2246 Assert(static_cast<size_type>(nn) == this->m(), ExcNotQuadratic());
2247 Assert(B.m() == B.n(), ExcNotQuadratic());
2248 Assert(static_cast<size_type>(nn) == B.n(), ExcDimensionMismatch(nn, B.n()));
2249
2250 wr.resize(nn);
2251 LAPACKFullMatrix<number> matrix_eigenvectors(nn, nn);
2252
2253 number *const values_A = this->values.data();
2254 number *const values_B = B.values.data();
2255 number *const values_eigenvectors = matrix_eigenvectors.values.data();
2256
2257 types::blas_int info(0), lwork(-1), n_eigenpairs(0);
2258 const char *const jobz(&V);
2259 const char *const uplo(&U);
2260 const char *const range(&V);
2261 const types::blas_int *const dummy(&one);
2262 iwork.resize(static_cast<size_type>(5 * nn));
2263 std::vector<types::blas_int> ifail(static_cast<size_type>(nn));
2264
2265
2266 /*
2267 * The LAPACK routine xSYGVX requires a sufficiently large work array; the
2268 * minimum requirement is
2269 *
2270 * work.size >= 8*nn.
2271 *
2272 * However, for better performance, a larger work array may be needed. The
2273 * first call determines the optimal work size and the second does the work.
2274 */
2275 work.resize(1);
2276
2277 sygvx(&itype,
2278 jobz,
2279 range,
2280 uplo,
2281 &nn,
2282 values_A,
2283 &nn,
2284 values_B,
2285 &nn,
2286 &lower_bound,
2287 &upper_bound,
2288 dummy,
2289 dummy,
2290 &abs_accuracy,
2291 &n_eigenpairs,
2292 wr.data(),
2293 values_eigenvectors,
2294 &nn,
2295 work.data(),
2296 &lwork,
2297 iwork.data(),
2298 ifail.data(),
2299 &info);
2300 // sygvx returns info=0 on success. Since we only queried the optimal size
2301 // for work, everything else would not be acceptable.
2302 Assert(info == 0, ExcInternalError());
2303 // Allocate working array according to suggestion (same strategy as was
2304 // noted in compute_svd).
2305 lwork = static_cast<types::blas_int>(std::abs(work[0]) + 1);
2306
2307 // resize workspace arrays
2308 work.resize(static_cast<size_type>(lwork));
2309
2310 // Finally compute the generalized eigenvalues.
2311 sygvx(&itype,
2312 jobz,
2313 range,
2314 uplo,
2315 &nn,
2316 values_A,
2317 &nn,
2318 values_B,
2319 &nn,
2320 &lower_bound,
2321 &upper_bound,
2322 dummy,
2323 dummy,
2324 &abs_accuracy,
2325 &n_eigenpairs,
2326 wr.data(),
2327 values_eigenvectors,
2328 &nn,
2329 work.data(),
2330 &lwork,
2331 iwork.data(),
2332 ifail.data(),
2333 &info);
2334
2335 // Negative return value implies a wrong argument. This should be internal.
2336 Assert(info >= 0, ExcInternalError());
2337 if (info < 0)
2338 {
2339 AssertThrow(info == 0,
2340 ExcMessage("Lapack error in sygvx: the " +
2341 std::to_string(-info) +
2342 "-th"
2343 " parameter had an illegal value."));
2344 }
2345 else if ((info > 0) && (info <= nn))
2346 {
2348 info == 0,
2349 ExcMessage(
2350 "Lapack error in sygvx: ssyevx/dsyevx failed to converge, and " +
2351 std::to_string(info) +
2352 " eigenvectors failed to converge."
2353 " (You may need to scale the abs_accuracy"
2354 " according to the norms of matrices A and B.)"));
2355 }
2356 else if ((info > nn) && (info <= 2 * nn))
2357 {
2358 AssertThrow(info == 0,
2359 ExcMessage(
2360 "Lapack error in sygvx: the leading minor of order " +
2361 std::to_string(info - nn) +
2362 " of matrix B is not positive-definite."
2363 " The factorization of B could not be completed and"
2364 " no eigenvalues or eigenvectors were computed."));
2365 }
2366 else
2367 {
2368 AssertThrow(info == 0,
2369 ExcMessage("Lapack error in sygvx: unknown error."));
2370 }
2371
2372 eigenvalues.reinit(n_eigenpairs);
2373 eigenvectors.resize(n_eigenpairs);
2374
2375 for (size_type i = 0; i < static_cast<size_type>(n_eigenpairs); ++i)
2376 {
2377 eigenvalues(i) = wr[i];
2378 size_type col_begin(i * nn);
2379 eigenvectors[i].reinit(nn, true);
2380 for (size_type j = 0; j < static_cast<size_type>(nn); ++j)
2381 {
2382 eigenvectors[i](j) = values_eigenvectors[col_begin + j];
2383 }
2384 }
2385
2387}
2388
2389
2390
2391template <typename number>
2392void
2395 std::vector<Vector<number>> &eigenvectors,
2396 const types::blas_int itype)
2397{
2398 Assert(state == matrix, ExcState(state));
2399 const types::blas_int nn = this->n();
2400 Assert(static_cast<size_type>(nn) == this->m(), ExcNotQuadratic());
2401 Assert(B.m() == B.n(), ExcNotQuadratic());
2402 Assert(static_cast<size_type>(nn) == B.n(), ExcDimensionMismatch(nn, B.n()));
2403 Assert(eigenvectors.size() <= static_cast<size_type>(nn),
2404 ExcMessage("eigenvectors.size() > matrix.n()"));
2405
2406 wr.resize(nn);
2407 wi.resize(nn); // This is set purely for consistency reasons with the
2408 // eigenvalues() function.
2409
2410 number *const values_A = this->values.data();
2411 number *const values_B = B.values.data();
2412
2413 types::blas_int info = 0;
2414 types::blas_int lwork = -1;
2415 const char *const jobz = (eigenvectors.size() > 0) ? (&V) : (&N);
2416 const char *const uplo = (&U);
2417
2418 /*
2419 * The LAPACK routine xSYGV requires a sufficiently large work array; the
2420 * minimum requirement is
2421 *
2422 * work.size >= 3*nn - 1.
2423 *
2424 * However, for better performance, a larger work array may be needed. The
2425 * first call determines the optimal work size and the second does the work.
2426 */
2427 work.resize(1);
2428
2429 sygv(&itype,
2430 jobz,
2431 uplo,
2432 &nn,
2433 values_A,
2434 &nn,
2435 values_B,
2436 &nn,
2437 wr.data(),
2438 work.data(),
2439 &lwork,
2440 &info);
2441 // sygv returns info=0 on success. Since we only queried the optimal size
2442 // for work, everything else would not be acceptable.
2443 Assert(info == 0, ExcInternalError());
2444 // Allocate working array according to suggestion (same strategy as was
2445 // noted in compute_svd).
2446 lwork = static_cast<types::blas_int>(std::abs(work[0]) + 1);
2447
2448 // resize workspace array
2449 work.resize(static_cast<size_type>(lwork));
2450
2451 // Finally compute the generalized eigenvalues.
2452 sygv(&itype,
2453 jobz,
2454 uplo,
2455 &nn,
2456 values_A,
2457 &nn,
2458 values_B,
2459 &nn,
2460 wr.data(),
2461 work.data(),
2462 &lwork,
2463 &info);
2464 // Negative return value implies a wrong argument. This should be internal.
2465
2466 Assert(info >= 0, ExcInternalError());
2467 if (info < 0)
2468 {
2469 AssertThrow(info == 0,
2470 ExcMessage("Lapack error in sygv: the " +
2471 std::to_string(-info) +
2472 "-th"
2473 " parameter had an illegal value."));
2474 }
2475 else if ((info > 0) && (info <= nn))
2476 {
2478 info == 0,
2479 ExcMessage(
2480 "Lapack error in sygv: ssyev/dsyev failed to converge, and " +
2481 std::to_string(info) +
2482 " off-diagonal elements of an intermediate "
2483 " tridiagonal did not converge to zero."
2484 " (You may need to scale the abs_accuracy"
2485 " according to the norms of matrices A and B.)"));
2486 }
2487 else if ((info > nn) && (info <= 2 * nn))
2488 {
2489 AssertThrow(info == 0,
2490 ExcMessage(
2491 "Lapack error in sygv: the leading minor of order " +
2492 std::to_string(info - nn) +
2493 " of matrix B is not positive-definite."
2494 " The factorization of B could not be completed and"
2495 " no eigenvalues or eigenvectors were computed."));
2496 }
2497 else
2498 {
2499 AssertThrow(info == 0,
2500 ExcMessage("Lapack error in sygv: unknown error."));
2501 }
2502
2503 for (size_type i = 0; i < eigenvectors.size(); ++i)
2504 {
2505 size_type col_begin(i * nn);
2506 eigenvectors[i].reinit(nn, true);
2507 for (size_type j = 0; j < static_cast<size_type>(nn); ++j)
2508 {
2509 eigenvectors[i](j) = values_A[col_begin + j];
2510 }
2511 }
2513}
2514
2515
2516
2517template <typename number>
2518void
2520 const unsigned int precision,
2521 const bool scientific,
2522 const unsigned int width_,
2523 const char *zero_string,
2524 const double denominator,
2525 const double threshold,
2526 const char *separator) const
2527{
2528 unsigned int width = width_;
2529
2530 Assert((!this->empty()) || (this->n() + this->m() == 0), ExcInternalError());
2531 Assert(state == LAPACKSupport::matrix ||
2533 state == LAPACKSupport::cholesky,
2534 ExcState(state));
2535
2536 // set output format, but store old
2537 // state
2538 std::ios::fmtflags old_flags = out.flags();
2539 std::streamsize old_precision = out.precision(precision);
2540
2541 if (scientific)
2542 {
2543 out.setf(std::ios::scientific, std::ios::floatfield);
2544 if (width == 0u)
2545 width = precision + 7;
2546 }
2547 else
2548 {
2549 out.setf(std::ios::fixed, std::ios::floatfield);
2550 if (width == 0u)
2551 width = precision + 2;
2552 }
2553
2554 for (size_type i = 0; i < this->m(); ++i)
2555 {
2556 // Cholesky is stored in lower triangular, so just output this part:
2557 const size_type nc = state == LAPACKSupport::cholesky ? i + 1 : this->n();
2558 for (size_type j = 0; j < nc; ++j)
2559 // we might have complex numbers, so use abs also to check for nan
2560 // since there is no isnan on complex numbers
2561 if (numbers::is_nan(std::abs((*this)(i, j))))
2562 out << std::setw(width) << (*this)(i, j) << separator;
2563 else if (std::abs(this->el(i, j)) > threshold)
2564 out << std::setw(width) << this->el(i, j) * denominator << separator;
2565 else
2566 out << std::setw(width) << zero_string << separator;
2567 out << std::endl;
2568 }
2569
2570 AssertThrow(out.fail() == false, ExcIO());
2571 // reset output format
2572 out.flags(old_flags);
2573 out.precision(old_precision);
2574}
2575
2576
2577//----------------------------------------------------------------------//
2578
2579template <typename number>
2580void
2582{
2583 matrix = &M;
2584 mem = nullptr;
2585}
2586
2587
2588template <typename number>
2589void
2596
2597
2598template <typename number>
2599void
2601 const Vector<number> &src) const
2602{
2603 dst = src;
2604 matrix->solve(dst, false);
2605}
2606
2607
2608template <typename number>
2609void
2611 const Vector<number> &src) const
2612{
2613 dst = src;
2614 matrix->solve(dst, true);
2615}
2616
2617
2618template <typename number>
2619void
2621 const BlockVector<number> &src) const
2622{
2623 Assert(mem != nullptr, ExcNotInitialized());
2624 Vector<number> *aux = mem->alloc();
2625 *aux = src;
2626 matrix->solve(*aux, false);
2627 dst = *aux;
2628}
2629
2630
2631template <typename number>
2632void
2634 const BlockVector<number> &src) const
2635{
2636 Assert(mem != nullptr, ExcNotInitialized());
2637 Vector<number> *aux = mem->alloc();
2638 *aux = src;
2639 matrix->solve(*aux, true);
2640 dst = *aux;
2641}
2642
2643
2644
2645#include "lapack_full_matrix.inst"
2646
2647
void omatcopy(char, char, ::types::blas_int, ::types::blas_int, const number1, const number2 *, ::types::blas_int, number3 *, ::types::blas_int)
pointer data()
size_type n() const
size_type m() const
LAPACKFullMatrix< number > & operator*=(const number factor)
number reciprocal_condition_number() const
void Tmmult(LAPACKFullMatrix< number > &C, const LAPACKFullMatrix< number > &B, const bool adding=false) const
void scale_rows(const Vector< number > &V)
FullMatrix< std::complex< typename numbers::NumberTraits< number >::real_type > > get_right_eigenvectors() const
void add(const number a, const LAPACKFullMatrix< number > &B)
void Tvmult(Vector< number2 > &w, const Vector< number2 > &v, const bool adding=false) const
void transpose(LAPACKFullMatrix< number > &B) const
void mTmult(LAPACKFullMatrix< number > &C, const LAPACKFullMatrix< number > &B, const bool adding=false) const
void compute_eigenvalues_symmetric(const number lower_bound, const number upper_bound, const number abs_accuracy, Vector< number > &eigenvalues, FullMatrix< number > &eigenvectors)
void vmult(Vector< number2 > &w, const Vector< number2 > &v, const bool adding=false) const
void reinit(const size_type size)
LAPACKFullMatrix< number > & operator=(const LAPACKFullMatrix< number > &)
FullMatrix< std::complex< typename numbers::NumberTraits< number >::real_type > > get_left_eigenvectors() const
void grow_or_shrink(const size_type size)
void apply_givens_rotation(const std::array< number, 3 > &csr, const size_type i, const size_type k, const bool left=true)
void set_property(const LAPACKSupport::Property property)
number norm(const char type) const
void solve(Vector< number > &v, const bool transposed=false) const
void compute_eigenvalues(const bool right_eigenvectors=false, const bool left_eigenvectors=false)
LAPACKSupport::State state
std::make_unsigned_t< types::blas_int > size_type
number frobenius_norm() const
LAPACKFullMatrix(const size_type size=0)
LAPACKSupport::Property property
size_type m() const
void compute_inverse_svd(const double threshold=0.)
void compute_generalized_eigenvalues_symmetric(LAPACKFullMatrix< number > &B, const number lower_bound, const number upper_bound, const number abs_accuracy, Vector< number > &eigenvalues, std::vector< Vector< number > > &eigenvectors, const types::blas_int itype=1)
void vmult_add(Vector< number2 > &w, const Vector< number2 > &v) const
size_type n() const
number linfty_norm() const
void TmTmult(LAPACKFullMatrix< number > &C, const LAPACKFullMatrix< number > &B, const bool adding=false) const
void compute_inverse_svd_with_kernel(const unsigned int kernel_size)
void Tvmult_add(Vector< number2 > &w, const Vector< number2 > &v) const
void rank1_update(const number a, const Vector< number > &v)
void remove_row_and_column(const size_type row, const size_type col)
LAPACKFullMatrix< number > & operator/=(const number factor)
number determinant() const
void mmult(LAPACKFullMatrix< number > &C, const LAPACKFullMatrix< number > &B, const bool adding=false) const
void print_formatted(std::ostream &out, const unsigned int precision=3, const bool scientific=true, const unsigned int width=0, const char *zero_string=" ", const double denominator=1., const double threshold=0., const char *separator=" ") const
void vmult(Vector< number > &, const Vector< number > &) const
void initialize(const LAPACKFullMatrix< number > &)
void Tvmult(Vector< number > &, const Vector< number > &) const
number el(const size_type i, const size_type j) const
size_type n() const
size_type m() const
Subscriptor & operator=(const Subscriptor &)
AlignedVector< T > values
Definition table.h:795
void reinit(const size_type size1, const size_type size2, const bool omit_default_initialization=false)
pointer data()
virtual size_type size() const override
iterator begin()
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:503
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:504
static ::ExceptionBase & ExcIO()
static ::ExceptionBase & ExcZero()
static ::ExceptionBase & ExcNotImplemented()
static ::ExceptionBase & ExcErrorCode(std::string arg1, types::blas_int arg2)
static ::ExceptionBase & ExcScalarAssignmentOnlyForZeroValue()
static ::ExceptionBase & ExcProperty(Property arg1)
#define Assert(cond, exc)
static ::ExceptionBase & ExcSingular()
#define AssertIsFinite(number)
#define AssertDimension(dim1, dim2)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
static ::ExceptionBase & ExcNotInitialized()
static ::ExceptionBase & ExcNotQuadratic()
static ::ExceptionBase & ExcInvalidState()
static ::ExceptionBase & ExcMessage(std::string arg1)
static ::ExceptionBase & ExcState(State arg1)
#define AssertThrow(cond, exc)
void getrs(const char *, const ::types::blas_int *, const ::types::blas_int *, const number1 *, const ::types::blas_int *, const ::types::blas_int *, number2 *, const ::types::blas_int *, ::types::blas_int *)
void syrk(const char *, const char *, const ::types::blas_int *, const ::types::blas_int *, const number1 *, const number2 *, const ::types::blas_int *, const number3 *, number4 *, const ::types::blas_int *)
void geev(const char *, const char *, const ::types::blas_int *, number1 *, const ::types::blas_int *, number2 *, number3 *, number4 *, const ::types::blas_int *, number5 *, const ::types::blas_int *, number6 *, const ::types::blas_int *, ::types::blas_int *)
void gemm(const char *, const char *, const ::types::blas_int *, const ::types::blas_int *, const ::types::blas_int *, const number1 *, const number2 *, const ::types::blas_int *, const number3 *, const ::types::blas_int *, const number4 *, number5 *, const ::types::blas_int *)
void pocon(const char *, const ::types::blas_int *, const number1 *, const ::types::blas_int *, const number2 *, number3 *, number4 *, ::types::blas_int *, ::types::blas_int *)
void lascl(const char *, const ::types::blas_int *, const ::types::blas_int *, const number1 *, const number2 *, const ::types::blas_int *, const ::types::blas_int *, number3 *, const ::types::blas_int *, ::types::blas_int *)
void syr(const char *, const ::types::blas_int *, const number1 *, const number2 *, const ::types::blas_int *, number3 *, const ::types::blas_int *)
void trtrs(const char *, const char *, const char *, const ::types::blas_int *, const ::types::blas_int *, const number1 *, const ::types::blas_int *, number2 *, const ::types::blas_int *, ::types::blas_int *)
void syevx(const char *, const char *, const char *, const ::types::blas_int *, number1 *, const ::types::blas_int *, const number2 *, const number3 *, const ::types::blas_int *, const ::types::blas_int *, const number4 *, ::types::blas_int *, number5 *, number6 *, const ::types::blas_int *, number7 *, const ::types::blas_int *, ::types::blas_int *, ::types::blas_int *, ::types::blas_int *)
void trcon(const char *, const char *, const char *, const ::types::blas_int *, const number1 *, const ::types::blas_int *, number2 *, number3 *, ::types::blas_int *, ::types::blas_int *)
void gesdd(const char *, const ::types::blas_int *, const ::types::blas_int *, number1 *, const ::types::blas_int *, number2 *, number3 *, const ::types::blas_int *, number4 *, const ::types::blas_int *, number5 *, const ::types::blas_int *, ::types::blas_int *, ::types::blas_int *)
void axpy(const ::types::blas_int *, const number1 *, const number2 *, const ::types::blas_int *, number3 *, const ::types::blas_int *)
void trmv(const char *, const char *, const char *, const ::types::blas_int *, const number1 *, const ::types::blas_int *, number2 *, const ::types::blas_int *)
void sygvx(const ::types::blas_int *, const char *, const char *, const char *, const ::types::blas_int *, number1 *, const ::types::blas_int *, number2 *, const ::types::blas_int *, const number3 *, const number4 *, const ::types::blas_int *, const ::types::blas_int *, const number5 *, ::types::blas_int *, number6 *, number7 *, const ::types::blas_int *, number8 *, const ::types::blas_int *, ::types::blas_int *, ::types::blas_int *, ::types::blas_int *)
void gemv(const char *, const ::types::blas_int *, const ::types::blas_int *, const number1 *, const number2 *, const ::types::blas_int *, const number3 *, const ::types::blas_int *, const number4 *, number5 *, const ::types::blas_int *)
number1 lange(const char *, const ::types::blas_int *, const ::types::blas_int *, const number1 *, const ::types::blas_int *, number2 *)
void potrs(const char *, const ::types::blas_int *, const ::types::blas_int *, const number1 *, const ::types::blas_int *, number2 *, const ::types::blas_int *, ::types::blas_int *)
void sygv(const ::types::blas_int *, const char *, const char *, const ::types::blas_int *, number1 *, const ::types::blas_int *, number2 *, const ::types::blas_int *, number3 *, number4 *, const ::types::blas_int *, ::types::blas_int *)
void potri(const char *, const ::types::blas_int *, number1 *, const ::types::blas_int *, ::types::blas_int *)
number1 lansy(const char *, const char *, const ::types::blas_int *, const number1 *, const ::types::blas_int *, number2 *)
void potrf(const char *, const ::types::blas_int *, number1 *, const ::types::blas_int *, ::types::blas_int *)
void getrf(const ::types::blas_int *, const ::types::blas_int *, number1 *, const ::types::blas_int *, ::types::blas_int *, ::types::blas_int *)
void getri(const ::types::blas_int *, number1 *, const ::types::blas_int *, const ::types::blas_int *, number2 *, const ::types::blas_int *, ::types::blas_int *)
static const char L
@ cholesky
Contents is a Cholesky decomposition.
@ lu
Contents is an LU decomposition.
@ matrix
Contents is actually a matrix.
@ unusable
Contents is something useless.
@ inverse_matrix
Contents is the inverse of a matrix.
@ svd
Matrix contains singular value decomposition,.
@ inverse_svd
Matrix is the inverse of a singular value decomposition.
@ eigenvalues
Eigenvalue vector is filled.
static const char U
static const char A
@ symmetric
Matrix is symmetric.
@ upper_triangular
Matrix is upper triangular.
@ lower_triangular
Matrix is lower triangular.
@ general
No special properties.
static const char T
static const char N
static const types::blas_int one
static const char V
std::array< NumberType, 3 > givens_rotation(const NumberType &x, const NumberType &y)
std::array< NumberType, 3 > hyperbolic_rotation(const NumberType &x, const NumberType &y)
void gesdd_helper(const char job, const types::blas_int n_rows, const types::blas_int n_cols, AlignedVector< T > &matrix, std::vector< T > &singular_values, AlignedVector< T > &left_vectors, AlignedVector< T > &right_vectors, std::vector< T > &real_work, std::vector< T > &, std::vector< types::blas_int > &integer_work, const types::blas_int work_flag, types::blas_int &info)
void geev_helper(const char vl, const char vr, AlignedVector< T > &matrix, const types::blas_int n_rows, std::vector< T > &real_part_eigenvalues, std::vector< T > &imag_part_eigenvalues, std::vector< T > &left_eigenvectors, std::vector< T > &right_eigenvectors, std::vector< T > &real_work, std::vector< T > &, const types::blas_int work_flag, types::blas_int &info)
bool is_nan(const double x)
Definition numbers.h:530
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)
static bool equal(const T *p1, const T *p2)
static constexpr const number & conjugate(const number &x)
Definition numbers.h:575
std::array< std::pair< Number, Tensor< 1, dim, Number > >, std::integral_constant< int, dim >::value > eigenvectors(const SymmetricTensor< 2, dim, Number > &T, const SymmetricTensorEigenvectorMethod method=SymmetricTensorEigenvectorMethod::ql_implicit_shifts)