Reference documentation for deal.II version 9.5.0
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
lapack_full_matrix.cc
Go to the documentation of this file.
1// ---------------------------------------------------------------------
2//
3// Copyright (C) 2005 - 2023 by the deal.II authors
4//
5// This file is part of the deal.II library.
6//
7// The deal.II library is free software; you can use it, redistribute
8// it, and/or modify it under the terms of the GNU Lesser General
9// Public License as published by the Free Software Foundation; either
10// version 2.1 of the License, or (at your option) any later version.
11// The full text of the license can be found in the file LICENSE.md at
12// the top level directory of deal.II.
13//
14// ---------------------------------------------------------------------
15
17
26#include <deal.II/lac/vector.h>
27
28#include <iomanip>
29#include <iostream>
30
32
33using namespace LAPACKSupport;
34
35namespace internal
36{
37 namespace LAPACKFullMatrixImplementation
38 {
39 // ZGEEV/CGEEV and DGEEV/SGEEV need different work arrays and different
40 // output arrays for eigenvalues. This makes working with generic scalar
41 // types a bit difficult. To get around this, geev_helper has the same
42 // signature for real and complex arguments, but it ignores some
43 // parameters when called with a real type and ignores different
44 // parameters when called with a complex type.
45 template <typename T>
46 void
47 geev_helper(const char vl,
48 const char vr,
50 const types::blas_int n_rows,
51 std::vector<T> & real_part_eigenvalues,
52 std::vector<T> & imag_part_eigenvalues,
53 std::vector<T> & left_eigenvectors,
54 std::vector<T> & right_eigenvectors,
55 std::vector<T> & real_work,
56 std::vector<T> & /*complex_work*/,
57 const types::blas_int work_flag,
58 types::blas_int & info)
59 {
60 static_assert(std::is_same<T, double>::value ||
61 std::is_same<T, float>::value,
62 "Only implemented for double and float");
63 Assert(matrix.size() == static_cast<std::size_t>(n_rows * n_rows),
65 Assert(static_cast<std::size_t>(n_rows) <= real_part_eigenvalues.size(),
67 Assert(static_cast<std::size_t>(n_rows) <= imag_part_eigenvalues.size(),
69 if (vl == 'V')
70 Assert(static_cast<std::size_t>(n_rows * n_rows) <=
71 left_eigenvectors.size(),
73 if (vr == 'V')
74 Assert(static_cast<std::size_t>(n_rows * n_rows) <=
75 right_eigenvectors.size(),
77 Assert(work_flag == -1 ||
78 static_cast<std::size_t>(2 * n_rows) <= real_work.size(),
80 Assert(work_flag == -1 || std::max<long int>(1, 3 * n_rows) <= work_flag,
82 geev(&vl,
83 &vr,
84 &n_rows,
85 matrix.data(),
86 &n_rows,
87 real_part_eigenvalues.data(),
88 imag_part_eigenvalues.data(),
89 left_eigenvectors.data(),
90 &n_rows,
91 right_eigenvectors.data(),
92 &n_rows,
93 real_work.data(),
94 &work_flag,
95 &info);
96 }
97
98
100 template <typename T>
101 void
102 geev_helper(const char vl,
103 const char vr,
104 AlignedVector<std::complex<T>> &matrix,
105 const types::blas_int n_rows,
106 std::vector<T> & /*real_part_eigenvalues*/,
107 std::vector<std::complex<T>> &eigenvalues,
108 std::vector<std::complex<T>> &left_eigenvectors,
109 std::vector<std::complex<T>> &right_eigenvectors,
110 std::vector<std::complex<T>> &complex_work,
111 std::vector<T> & real_work,
112 const types::blas_int work_flag,
113 types::blas_int & info)
114 {
115 static_assert(
116 std::is_same<T, double>::value || std::is_same<T, float>::value,
117 "Only implemented for std::complex<double> and std::complex<float>");
118 Assert(matrix.size() == static_cast<std::size_t>(n_rows * n_rows),
120 Assert(static_cast<std::size_t>(n_rows) <= eigenvalues.size(),
122 if (vl == 'V')
123 Assert(static_cast<std::size_t>(n_rows * n_rows) <=
124 left_eigenvectors.size(),
126 if (vr == 'V')
127 Assert(static_cast<std::size_t>(n_rows * n_rows) <=
128 right_eigenvectors.size(),
130 Assert(work_flag == -1 ||
131 std::max<std::size_t>(1, work_flag) <= real_work.size(),
133 Assert(work_flag == -1 || std::max<long int>(1, 2 * n_rows) <= work_flag,
135
136 geev(&vl,
137 &vr,
138 &n_rows,
139 matrix.data(),
140 &n_rows,
141 eigenvalues.data(),
142 left_eigenvectors.data(),
143 &n_rows,
144 right_eigenvectors.data(),
145 &n_rows,
146 complex_work.data(),
147 &work_flag,
148 real_work.data(),
149 &info);
150 }
151
152
153
154 template <typename T>
155 void
156 gesdd_helper(const char job,
157 const types::blas_int n_rows,
158 const types::blas_int n_cols,
160 std::vector<T> & singular_values,
161 AlignedVector<T> & left_vectors,
162 AlignedVector<T> & right_vectors,
163 std::vector<T> & real_work,
164 std::vector<T> & /*complex work*/,
165 std::vector<types::blas_int> &integer_work,
166 const types::blas_int work_flag,
167 types::blas_int & info)
168 {
169 Assert(job == 'A' || job == 'S' || job == 'O' || job == 'N',
171 Assert(static_cast<std::size_t>(n_rows * n_cols) == matrix.size(),
173 Assert(std::min<std::size_t>(n_rows, n_cols) <= singular_values.size(),
175 Assert(8 * std::min<std::size_t>(n_rows, n_cols) <= integer_work.size(),
177 Assert(work_flag == -1 ||
178 static_cast<std::size_t>(work_flag) <= real_work.size(),
180 gesdd(&job,
181 &n_rows,
182 &n_cols,
183 matrix.data(),
184 &n_rows,
185 singular_values.data(),
186 left_vectors.data(),
187 &n_rows,
188 right_vectors.data(),
189 &n_cols,
190 real_work.data(),
191 &work_flag,
192 integer_work.data(),
193 &info);
194 }
195
196
197
198 template <typename T>
199 void
200 gesdd_helper(const char job,
201 const types::blas_int n_rows,
202 const types::blas_int n_cols,
203 AlignedVector<std::complex<T>> &matrix,
204 std::vector<T> & singular_values,
205 AlignedVector<std::complex<T>> &left_vectors,
206 AlignedVector<std::complex<T>> &right_vectors,
207 std::vector<std::complex<T>> & work,
208 std::vector<T> & real_work,
209 std::vector<types::blas_int> & integer_work,
210 const types::blas_int & work_flag,
211 types::blas_int & info)
213 Assert(job == 'A' || job == 'S' || job == 'O' || job == 'N',
215 Assert(static_cast<std::size_t>(n_rows * n_cols) == matrix.size(),
217 Assert(static_cast<std::size_t>(std::min(n_rows, n_cols)) <=
218 singular_values.size(),
220 Assert(8 * std::min<std::size_t>(n_rows, n_cols) <= integer_work.size(),
222 Assert(work_flag == -1 ||
223 static_cast<std::size_t>(work_flag) <= real_work.size(),
225
226 gesdd(&job,
227 &n_rows,
228 &n_cols,
229 matrix.data(),
230 &n_rows,
231 singular_values.data(),
232 left_vectors.data(),
233 &n_rows,
234 right_vectors.data(),
235 &n_cols,
236 work.data(),
237 &work_flag,
238 real_work.data(),
239 integer_work.data(),
240 &info);
241 }
242 } // namespace LAPACKFullMatrixImplementation
243} // namespace internal
244
245
246
247template <typename number>
249 : TransposeTable<number>(n, n)
250 , state(matrix)
251 , property(general)
252{}
253
254
255
256template <typename number>
258 : TransposeTable<number>(m, n)
259 , state(matrix)
260 , property(general)
261{}
262
263
264
265template <typename number>
267 : TransposeTable<number>(M)
268 , state(matrix)
269 , property(general)
270{}
271
272
274template <typename number>
277{
279 state = M.state;
280 property = M.property;
281 return *this;
282}
283
284
285
286template <typename number>
287void
289{
291 state = LAPACKSupport::matrix;
292}
293
294
295
296template <typename number>
297void
299{
300 const size_type s = std::min(std::min(this->m(), n), this->n());
301 TransposeTable<number> copy(std::move(*this));
303 for (size_type i = 0; i < s; ++i)
304 for (size_type j = 0; j < s; ++j)
305 (*this)(i, j) = copy(i, j);
306}
307
308
309
310template <typename number>
311void
313 const std::array<number, 3> &csr,
314 const size_type i,
315 const size_type k,
316 const bool left)
317{
318 auto &A = *this;
319 // see Golub 2013 "Matrix computations", p241 5.1.9 Applying Givens
320 // Rotations but note the difference in notation, namely the sign of s: we
321 // have G * A, where G[1,1] = s
322 if (left)
323 {
324 for (size_type j = 0; j < A.n(); ++j)
325 {
326 const number t = A(i, j);
327 A(i, j) = csr[0] * A(i, j) + csr[1] * A(k, j);
328 A(k, j) = -csr[1] * t + csr[0] * A(k, j);
329 }
330 }
331 else
332 {
333 for (size_type j = 0; j < A.m(); ++j)
334 {
335 const number t = A(j, i);
336 A(j, i) = csr[0] * A(j, i) + csr[1] * A(j, k);
337 A(j, k) = -csr[1] * t + csr[0] * A(j, k);
338 }
339 }
340}
341
342
343
344template <typename number>
345void
347 const size_type col)
348{
349 AssertIndexRange(row, this->m());
350 AssertIndexRange(col, this->n());
351
352 const size_type nrows = this->m() - 1;
353 const size_type ncols = this->n() - 1;
354
355 TransposeTable<number> copy(std::move(*this));
356 this->TransposeTable<number>::reinit(nrows, ncols);
357
358 for (size_type j = 0; j < ncols; ++j)
359 {
360 const size_type jj = (j < col ? j : j + 1);
361 for (size_type i = 0; i < nrows; ++i)
362 {
363 const size_type ii = (i < row ? i : i + 1);
364 (*this)(i, j) = copy(ii, jj);
365 }
366 }
367}
368
369
371template <typename number>
372void
374{
376 state = LAPACKSupport::matrix;
377}
378
379
380
381template <typename number>
382template <typename number2>
385{
386 Assert(this->m() == M.m(), ExcDimensionMismatch(this->m(), M.m()));
387 Assert(this->n() == M.n(), ExcDimensionMismatch(this->n(), M.n()));
388 for (size_type i = 0; i < this->m(); ++i)
389 for (size_type j = 0; j < this->n(); ++j)
390 (*this)(i, j) = M(i, j);
391
392 state = LAPACKSupport::matrix;
394 return *this;
395}
396
397
398
399template <typename number>
400template <typename number2>
403{
404 Assert(this->m() == M.n(), ExcDimensionMismatch(this->m(), M.n()));
405 Assert(this->n() == M.m(), ExcDimensionMismatch(this->n(), M.m()));
406 for (size_type i = 0; i < this->m(); ++i)
407 for (size_type j = 0; j < this->n(); ++j)
408 (*this)(i, j) = M.el(i, j);
409
410 state = LAPACKSupport::matrix;
412 return *this;
413}
414
415
416
417template <typename number>
420{
421 (void)d;
423
424 if (this->n_elements() != 0)
425 this->reset_values();
426
427 state = LAPACKSupport::matrix;
428 return *this;
430
431
432
433template <typename number>
436{
437 Assert(state == LAPACKSupport::matrix ||
439 ExcState(state));
440
441 AssertIsFinite(factor);
442 const char type = 'G';
443 const number cfrom = 1.;
444 const types::blas_int m = this->m();
445 const types::blas_int n = this->n();
446 const types::blas_int lda = this->m();
447 types::blas_int info = 0;
448 // kl and ku will not be referenced for type = G (dense matrices).
449 const types::blas_int kl = 0;
450 number * values = this->values.data();
451
452 lascl(&type, &kl, &kl, &cfrom, &factor, &m, &n, values, &lda, &info);
453
454 // Negative return value implies a wrong argument. This should be internal.
455 Assert(info >= 0, ExcInternalError());
456
457 return *this;
458}
459
460
461
462template <typename number>
465{
468 ExcState(state));
469
470 AssertIsFinite(factor);
471 Assert(factor != number(0.), ExcZero());
472
473 const char type = 'G';
474 const number cto = 1.;
475 const types::blas_int m = this->m();
476 const types::blas_int n = this->n();
477 const types::blas_int lda = this->m();
478 types::blas_int info = 0;
479 // kl and ku will not be referenced for type = G (dense matrices).
480 const types::blas_int kl = 0;
481 number * values = this->values.data();
482
483 lascl(&type, &kl, &kl, &factor, &cto, &m, &n, values, &lda, &info);
484
485 // Negative return value implies a wrong argument. This should be internal.
486 Assert(info >= 0, ExcInternalError());
488 return *this;
489}
490
491
492
493template <typename number>
494void
496{
497 Assert(state == LAPACKSupport::matrix ||
499 ExcState(state));
500
501 Assert(m() == A.m(), ExcDimensionMismatch(m(), A.m()));
502 Assert(n() == A.n(), ExcDimensionMismatch(n(), A.n()));
503
505
506 // BLAS does not offer functions to add matrices.
507 // LapackFullMatrix is stored in contiguous array
508 // ==> use BLAS 1 for adding vectors
509 const types::blas_int n = this->m() * this->n();
510 const types::blas_int inc = 1;
511 number * values = this->values.data();
512 const number * values_A = A.values.data();
513
514 axpy(&n, &a, values_A, &inc, values, &inc);
515}
517
518
519namespace
520{
521 template <typename number>
522 void
523 cholesky_rank1(LAPACKFullMatrix<number> &A,
524 const number a,
525 const Vector<number> & v)
526 {
527 const typename LAPACKFullMatrix<number>::size_type N = A.n();
528 Vector<number> z(v);
529 // Cholesky update / downdate, see
530 // 6.5.4 Cholesky Updating and Downdating, Golub 2013 Matrix computations
531 // Note that potrf() is called with LAPACKSupport::L , so the
532 // factorization is stored in lower triangular part.
533 // Also see discussion here
534 // http://icl.cs.utk.edu/lapack-forum/viewtopic.php?f=2&t=2646
535 if (a > 0.)
537 // simple update via a sequence of Givens rotations.
538 // Observe that
539 //
540 // | L^T |T | L^T |
541 // A <-- | | | | = L L^T + z z^T
542 // | z^T | | z^T |
543 //
544 // so we can get updated factor by doing a sequence of Givens
545 // rotations to make the matrix lower-triangular
546 // Also see LINPACK's dchud http://www.netlib.org/linpack/dchud.f
547 z *= std::sqrt(a);
548 for (typename LAPACKFullMatrix<number>::size_type k = 0; k < N; ++k)
549 {
550 const std::array<number, 3> csr =
552 A(k, k) = csr[2];
553 for (typename LAPACKFullMatrix<number>::size_type i = k + 1; i < N;
554 ++i)
555 {
556 const number t = A(i, k);
557 A(i, k) = csr[0] * A(i, k) + csr[1] * z(i);
558 z(i) = -csr[1] * t + csr[0] * z(i);
560 }
561 }
562 else
563 {
564 // downdating is not always possible as we may end up with
565 // negative definite matrix. If it's possible, then it boils
566 // down to application of hyperbolic rotations.
567 // Observe that
568 //
569 // | L^T |T | L^T |
570 // A <-- | | S | | = L L^T - z z^T
571 // | z^T | | z^T |
572 //
573 // |In 0 |
574 // S := | |
575 // |0 -1 |
576 //
577 // We are looking for H which is S-orthogonal (HSH^T=S) and
578 // can restore upper-triangular factor of the factorization of A above.
579 // We will use Hyperbolic rotations to do the job
580 //
581 // | c -s | | x1 | | r |
582 // | | = | | = | |, c^2 - s^2 = 1
583 // |-s c | | x2 | | 0 |
584 //
585 // which have real solution only if x2 <= x1.
586 // See also Linpack's http://www.netlib.org/linpack/dchdd.f and
587 // https://infoscience.epfl.ch/record/161468/files/cholupdate.pdf and
588 // "Analysis of a recursive Least Squares Hyperbolic Rotation Algorithm
589 // for Signal Processing", Alexander, Pan, Plemmons, 1988.
590 z *= std::sqrt(-a);
591 for (typename LAPACKFullMatrix<number>::size_type k = 0; k < N; ++k)
592 {
593 const std::array<number, 3> csr =
595 A(k, k) = csr[2];
596 for (typename LAPACKFullMatrix<number>::size_type i = k + 1; i < N;
597 ++i)
598 {
599 const number t = A(i, k);
600 A(i, k) = csr[0] * A(i, k) - csr[1] * z(i);
601 z(i) = -csr[1] * t + csr[0] * z(i);
602 }
603 }
605 }
606
607
608 template <typename number>
609 void
610 cholesky_rank1(LAPACKFullMatrix<std::complex<number>> & /*A*/,
611 const std::complex<number> /*a*/,
612 const Vector<std::complex<number>> & /*v*/)
613 {
615 }
616} // namespace
617
618
619
620template <typename number>
621void
623{
624 Assert(property == LAPACKSupport::symmetric, ExcProperty(property));
625
626 Assert(n() == m(), ExcInternalError());
627 Assert(m() == v.size(), ExcDimensionMismatch(m(), v.size()));
630
631 if (state == LAPACKSupport::matrix)
632 {
633 {
634 const types::blas_int N = this->m();
635 const char uplo = LAPACKSupport::U;
636 const types::blas_int lda = N;
637 const types::blas_int incx = 1;
638
639 syr(&uplo, &N, &a, v.begin(), &incx, this->values.begin(), &lda);
641
642 const size_type N = this->m();
643 // FIXME: we should really only update upper or lower triangular parts
644 // of symmetric matrices and make sure the interface is consistent,
645 // for example operator(i,j) gives correct results regardless of storage.
646 for (size_type i = 0; i < N; ++i)
647 for (size_type j = 0; j < i; ++j)
648 (*this)(i, j) = (*this)(j, i);
649 }
650 else if (state == LAPACKSupport::cholesky)
651 {
652 cholesky_rank1(*this, a, v);
653 }
654 else
655 AssertThrow(false, ExcState(state));
656}
657
658
659
660template <typename number>
661void
663 const Vector<number> &v,
664 const bool adding) const
665{
666 const types::blas_int mm = this->m();
667 const types::blas_int nn = this->n();
668 const number alpha = 1.;
669 const number beta = (adding ? 1. : 0.);
670 const number null = 0.;
671
672 // use trmv for triangular matrices
673 if ((property == upper_triangular || property == lower_triangular) &&
674 (mm == nn) && state == matrix)
675 {
676 Assert(adding == false, ExcNotImplemented());
677
678 AssertDimension(v.size(), this->n());
679 AssertDimension(w.size(), this->m());
680
681 const char diag = 'N';
682 const char trans = 'N';
683 const char uplo =
685
686 w = v;
687
688 const types::blas_int N = mm;
689 const types::blas_int lda = N;
690 const types::blas_int incx = 1;
691
692 trmv(
693 &uplo, &trans, &diag, &N, this->values.data(), &lda, w.data(), &incx);
695 return;
696 }
697
698 switch (state)
699 {
700 case matrix:
701 case inverse_matrix:
702 {
703 AssertDimension(v.size(), this->n());
704 AssertDimension(w.size(), this->m());
705
706 gemv("N",
707 &mm,
708 &nn,
709 &alpha,
710 this->values.data(),
711 &mm,
712 v.data(),
713 &one,
714 &beta,
715 w.data(),
716 &one);
717 break;
718 }
719 case svd:
720 {
721 std::lock_guard<std::mutex> lock(mutex);
722 AssertDimension(v.size(), this->n());
723 AssertDimension(w.size(), this->m());
724 // Compute V^T v
725 work.resize(std::max(mm, nn));
726 gemv("N",
727 &nn,
728 &nn,
729 &alpha,
730 svd_vt->values.data(),
731 &nn,
732 v.data(),
733 &one,
734 &null,
735 work.data(),
736 &one);
737 // Multiply by singular values
738 for (size_type i = 0; i < wr.size(); ++i)
739 work[i] *= wr[i];
740 // Multiply with U
741 gemv("N",
742 &mm,
743 &mm,
744 &alpha,
745 svd_u->values.data(),
746 &mm,
747 work.data(),
748 &one,
749 &beta,
750 w.data(),
751 &one);
752 break;
753 }
754 case inverse_svd:
755 {
756 std::lock_guard<std::mutex> lock(mutex);
757 AssertDimension(w.size(), this->n());
758 AssertDimension(v.size(), this->m());
759 // Compute U^T v
760 work.resize(std::max(mm, nn));
761 gemv("T",
762 &mm,
763 &mm,
764 &alpha,
765 svd_u->values.data(),
766 &mm,
767 v.data(),
768 &one,
769 &null,
770 work.data(),
771 &one);
772 // Multiply by singular values
773 for (size_type i = 0; i < wr.size(); ++i)
774 work[i] *= wr[i];
775 // Multiply with V
776 gemv("T",
777 &nn,
778 &nn,
779 &alpha,
780 svd_vt->values.data(),
781 &nn,
782 work.data(),
783 &one,
784 &beta,
785 w.data(),
786 &one);
787 break;
788 }
789 default:
790 Assert(false, ExcState(state));
791 }
792}
793
794
795
796template <typename number>
797void
799 const Vector<number> &v,
800 const bool adding) const
801{
802 const types::blas_int mm = this->m();
803 const types::blas_int nn = this->n();
804 const number alpha = 1.;
805 const number beta = (adding ? 1. : 0.);
806 const number null = 0.;
807
808 // use trmv for triangular matrices
809 if ((property == upper_triangular || property == lower_triangular) &&
810 (mm == nn) && state == matrix)
811 {
812 Assert(adding == false, ExcNotImplemented());
813
814 AssertDimension(v.size(), this->n());
815 AssertDimension(w.size(), this->m());
816
817 const char diag = 'N';
818 const char trans = 'T';
819 const char uplo =
821
822 w = v;
823
824 const types::blas_int N = mm;
825 const types::blas_int lda = N;
826 const types::blas_int incx = 1;
827
828 trmv(
829 &uplo, &trans, &diag, &N, this->values.data(), &lda, w.data(), &incx);
830
831 return;
832 }
833
834
835 switch (state)
836 {
837 case matrix:
838 case inverse_matrix:
839 {
840 AssertDimension(w.size(), this->n());
841 AssertDimension(v.size(), this->m());
842
843 gemv("T",
844 &mm,
845 &nn,
846 &alpha,
847 this->values.data(),
848 &mm,
849 v.data(),
851 &beta,
852 w.data(),
853 &one);
854 break;
855 }
856 case svd:
857 {
858 std::lock_guard<std::mutex> lock(mutex);
859 AssertDimension(w.size(), this->n());
860 AssertDimension(v.size(), this->m());
861
862 // Compute U^T v
863 work.resize(std::max(mm, nn));
864 gemv("T",
865 &mm,
866 &mm,
867 &alpha,
868 svd_u->values.data(),
869 &mm,
870 v.data(),
871 &one,
872 &null,
873 work.data(),
874 &one);
875 // Multiply by singular values
876 for (size_type i = 0; i < wr.size(); ++i)
877 work[i] *= wr[i];
878 // Multiply with V
879 gemv("T",
880 &nn,
881 &nn,
882 &alpha,
883 svd_vt->values.data(),
884 &nn,
885 work.data(),
886 &one,
887 &beta,
888 w.data(),
889 &one);
890 break;
891 }
892 case inverse_svd:
893 {
894 std::lock_guard<std::mutex> lock(mutex);
895 AssertDimension(v.size(), this->n());
896 AssertDimension(w.size(), this->m());
897
898 // Compute V^T v
899 work.resize(std::max(mm, nn));
900 gemv("N",
901 &nn,
902 &nn,
903 &alpha,
904 svd_vt->values.data(),
905 &nn,
906 v.data(),
907 &one,
908 &null,
909 work.data(),
911 // Multiply by singular values
912 for (size_type i = 0; i < wr.size(); ++i)
913 work[i] *= wr[i];
914 // Multiply with U
915 gemv("N",
916 &mm,
917 &mm,
918 &alpha,
919 svd_u->values.data(),
920 &mm,
921 work.data(),
922 &one,
923 &beta,
924 w.data(),
925 &one);
926 break;
927 }
928 default:
929 Assert(false, ExcState(state));
930 }
931}
932
933
934
935template <typename number>
936void
938 const Vector<number> &v) const
939{
940 vmult(w, v, true);
941}
942
943
944
945template <typename number>
946void
948 const Vector<number> &v) const
949{
950 Tvmult(w, v, true);
951}
952
953
954
955template <typename number>
956void
959 const bool adding) const
960{
961 Assert(state == matrix || state == inverse_matrix, ExcState(state));
963 Assert(C.state == matrix || C.state == inverse_matrix, ExcState(C.state));
964 Assert(this->n() == B.m(), ExcDimensionMismatch(this->n(), B.m()));
965 Assert(C.n() == B.n(), ExcDimensionMismatch(C.n(), B.n()));
966 Assert(C.m() == this->m(), ExcDimensionMismatch(this->m(), C.m()));
967 const types::blas_int mm = this->m();
968 const types::blas_int nn = B.n();
969 const types::blas_int kk = this->n();
970 const number alpha = 1.;
971 const number beta = (adding ? 1. : 0.);
972
973 gemm("N",
974 "N",
975 &mm,
976 &nn,
977 &kk,
978 &alpha,
979 this->values.data(),
980 &mm,
981 B.values.data(),
982 &kk,
983 &beta,
984 C.values.data(),
985 &mm);
986}
987
988
989
990template <typename number>
991void
994 const bool adding) const
995{
996 Assert(state == matrix || state == inverse_matrix, ExcState(state));
998 Assert(this->n() == B.m(), ExcDimensionMismatch(this->n(), B.m()));
999 Assert(C.n() == B.n(), ExcDimensionMismatch(C.n(), B.n()));
1000 Assert(C.m() == this->m(), ExcDimensionMismatch(this->m(), C.m()));
1001 const types::blas_int mm = this->m();
1002 const types::blas_int nn = B.n();
1003 const types::blas_int kk = this->n();
1004 const number alpha = 1.;
1005 const number beta = (adding ? 1. : 0.);
1006
1007 // since FullMatrix stores the matrix in transposed order compared to this
1008 // matrix, compute B^T * A^T = (A * B)^T
1009 gemm("T",
1010 "T",
1011 &nn,
1012 &mm,
1013 &kk,
1014 &alpha,
1015 B.values.data(),
1016 &kk,
1017 this->values.data(),
1018 &mm,
1019 &beta,
1020 &C(0, 0),
1021 &nn);
1022}
1023
1024
1025
1026template <typename number>
1027void
1029 const LAPACKFullMatrix<number> &B,
1030 const Vector<number> & V,
1031 const bool adding) const
1032{
1033 Assert(state == matrix || state == inverse_matrix, ExcState(state));
1035 Assert(C.state == matrix || C.state == inverse_matrix, ExcState(C.state));
1036
1037 const LAPACKFullMatrix<number> &A = *this;
1038
1039 Assert(A.m() == B.m(), ExcDimensionMismatch(A.m(), B.m()));
1040 Assert(C.n() == B.n(), ExcDimensionMismatch(C.n(), B.n()));
1041 Assert(C.m() == A.n(), ExcDimensionMismatch(A.n(), C.m()));
1042 Assert(V.size() == A.m(), ExcDimensionMismatch(A.m(), V.size()));
1043
1044 const types::blas_int mm = A.n();
1045 const types::blas_int nn = B.n();
1046 const types::blas_int kk = B.m();
1047
1048 // lapack does not have any triple product routines, including the case of
1049 // diagonal matrix in the middle, see
1050 // https://stackoverflow.com/questions/3548069/multiplying-three-matrices-in-blas-with-the-middle-one-being-diagonal
1051 // http://mathforum.org/kb/message.jspa?messageID=3546564
1052
1053 std::lock_guard<std::mutex> lock(mutex);
1054 // First, get V*B into "work" array
1055 work.resize(kk * nn);
1056 // following http://icl.cs.utk.edu/lapack-forum/viewtopic.php?f=2&t=768#p2577
1057 // do left-multiplication manually. Note that Xscal would require to first
1058 // copy the input vector as multiplication is done inplace.
1059 for (types::blas_int j = 0; j < nn; ++j)
1060 for (types::blas_int i = 0; i < kk; ++i)
1061 {
1062 Assert(j * kk + i < static_cast<types::blas_int>(work.size()),
1064 work[j * kk + i] = V(i) * B(i, j);
1065 }
1066
1067 // Now do the standard Tmmult:
1068 const number alpha = 1.;
1069 const number beta = (adding ? 1. : 0.);
1070
1071 gemm("T",
1072 "N",
1073 &mm,
1074 &nn,
1075 &kk,
1076 &alpha,
1077 this->values.data(),
1078 &kk,
1079 work.data(),
1080 &kk,
1081 &beta,
1082 C.values.data(),
1083 &mm);
1084}
1085
1086
1087
1088template <typename number>
1089void
1091{
1092 const LAPACKFullMatrix<number> &A = *this;
1093 AssertDimension(A.m(), B.n());
1094 AssertDimension(A.n(), B.m());
1095 const types::blas_int m = B.m();
1096 const types::blas_int n = B.n();
1097#ifdef DEAL_II_LAPACK_WITH_MKL
1098 const number one = 1.;
1099 omatcopy('C', 'C', n, m, one, A.values.data(), n, B.values.data(), m);
1100#else
1101 for (types::blas_int i = 0; i < m; ++i)
1102 for (types::blas_int j = 0; j < n; ++j)
1104#endif
1105}
1106
1107
1108
1109template <typename number>
1110void
1112{
1113 LAPACKFullMatrix<number> &A = *this;
1114 Assert(state == matrix || state == inverse_matrix, ExcState(state));
1115 Assert(V.size() == A.m(), ExcDimensionMismatch(A.m(), V.size()));
1116
1117 const types::blas_int nn = A.n();
1118 const types::blas_int kk = A.m();
1119 for (types::blas_int j = 0; j < nn; ++j)
1120 for (types::blas_int i = 0; i < kk; ++i)
1121 A(i, j) *= V(i);
1122}
1123
1124
1125
1126template <typename number>
1127void
1129 const LAPACKFullMatrix<number> &B,
1130 const bool adding) const
1131{
1132 Assert(state == matrix || state == inverse_matrix, ExcState(state));
1134 Assert(C.state == matrix || C.state == inverse_matrix, ExcState(C.state));
1135 Assert(this->m() == B.m(), ExcDimensionMismatch(this->m(), B.m()));
1136 Assert(C.n() == B.n(), ExcDimensionMismatch(C.n(), B.n()));
1137 Assert(C.m() == this->n(), ExcDimensionMismatch(this->n(), C.m()));
1138 const types::blas_int mm = this->n();
1139 const types::blas_int nn = B.n();
1140 const types::blas_int kk = B.m();
1141 const number alpha = 1.;
1142 const number beta = (adding ? 1. : 0.);
1143
1144 if (PointerComparison::equal(this, &B))
1145 {
1147 "T",
1148 &nn,
1149 &kk,
1150 &alpha,
1151 this->values.data(),
1152 &kk,
1153 &beta,
1154 C.values.data(),
1155 &nn);
1156
1157 // fill-in lower triangular part
1158 for (types::blas_int j = 0; j < nn; ++j)
1159 for (types::blas_int i = 0; i < j; ++i)
1160 C(j, i) = C(i, j);
1161
1162 C.property = symmetric;
1163 }
1164 else
1165 {
1166 gemm("T",
1167 "N",
1168 &mm,
1169 &nn,
1170 &kk,
1171 &alpha,
1172 this->values.data(),
1173 &kk,
1174 B.values.data(),
1175 &kk,
1176 &beta,
1177 C.values.data(),
1178 &mm);
1179 }
1180}
1181
1182
1183
1184template <typename number>
1185void
1187 const LAPACKFullMatrix<number> &B,
1188 const bool adding) const
1189{
1190 Assert(state == matrix || state == inverse_matrix, ExcState(state));
1192 Assert(this->m() == B.m(), ExcDimensionMismatch(this->m(), B.m()));
1193 Assert(C.n() == B.n(), ExcDimensionMismatch(C.n(), B.n()));
1194 Assert(C.m() == this->n(), ExcDimensionMismatch(this->n(), C.m()));
1195 const types::blas_int mm = this->n();
1196 const types::blas_int nn = B.n();
1197 const types::blas_int kk = B.m();
1198 const number alpha = 1.;
1199 const number beta = (adding ? 1. : 0.);
1200
1201 // since FullMatrix stores the matrix in transposed order compared to this
1202 // matrix, compute B^T * A = (A^T * B)^T
1203 gemm("T",
1204 "N",
1205 &nn,
1206 &mm,
1207 &kk,
1208 &alpha,
1209 B.values.data(),
1210 &kk,
1211 this->values.data(),
1212 &kk,
1213 &beta,
1214 &C(0, 0),
1215 &nn);
1216}
1217
1218
1219
1220template <typename number>
1221void
1223 const LAPACKFullMatrix<number> &B,
1224 const bool adding) const
1225{
1226 Assert(state == matrix || state == inverse_matrix, ExcState(state));
1228 Assert(C.state == matrix || C.state == inverse_matrix, ExcState(C.state));
1229 Assert(this->n() == B.n(), ExcDimensionMismatch(this->n(), B.n()));
1230 Assert(C.n() == B.m(), ExcDimensionMismatch(C.n(), B.m()));
1231 Assert(C.m() == this->m(), ExcDimensionMismatch(this->m(), C.m()));
1232 const types::blas_int mm = this->m();
1233 const types::blas_int nn = B.m();
1234 const types::blas_int kk = B.n();
1235 const number alpha = 1.;
1236 const number beta = (adding ? 1. : 0.);
1237
1238 if (PointerComparison::equal(this, &B))
1239 {
1241 "N",
1242 &nn,
1243 &kk,
1244 &alpha,
1245 this->values.data(),
1246 &nn,
1247 &beta,
1248 C.values.data(),
1249 &nn);
1250
1251 // fill-in lower triangular part
1252 for (types::blas_int j = 0; j < nn; ++j)
1253 for (types::blas_int i = 0; i < j; ++i)
1254 C(j, i) = C(i, j);
1255
1256 C.property = symmetric;
1257 }
1258 else
1259 {
1260 gemm("N",
1261 "T",
1262 &mm,
1263 &nn,
1264 &kk,
1265 &alpha,
1266 this->values.data(),
1267 &mm,
1268 B.values.data(),
1269 &nn,
1270 &beta,
1271 C.values.data(),
1272 &mm);
1273 }
1274}
1275
1276
1277
1278template <typename number>
1279void
1281 const LAPACKFullMatrix<number> &B,
1282 const bool adding) const
1283{
1284 Assert(state == matrix || state == inverse_matrix, ExcState(state));
1286 Assert(this->n() == B.n(), ExcDimensionMismatch(this->n(), B.n()));
1287 Assert(C.n() == B.m(), ExcDimensionMismatch(C.n(), B.m()));
1288 Assert(C.m() == this->m(), ExcDimensionMismatch(this->m(), C.m()));
1289 const types::blas_int mm = this->m();
1290 const types::blas_int nn = B.m();
1291 const types::blas_int kk = B.n();
1292 const number alpha = 1.;
1293 const number beta = (adding ? 1. : 0.);
1294
1295 // since FullMatrix stores the matrix in transposed order compared to this
1296 // matrix, compute B * A^T = (A * B^T)^T
1297 gemm("N",
1298 "T",
1299 &nn,
1300 &mm,
1301 &kk,
1302 &alpha,
1303 B.values.data(),
1304 &nn,
1305 this->values.data(),
1306 &mm,
1307 &beta,
1308 &C(0, 0),
1309 &nn);
1310}
1311
1312
1313
1314template <typename number>
1315void
1317 const LAPACKFullMatrix<number> &B,
1318 const bool adding) const
1319{
1320 Assert(state == matrix || state == inverse_matrix, ExcState(state));
1322 Assert(C.state == matrix || C.state == inverse_matrix, ExcState(C.state));
1323 Assert(this->m() == B.n(), ExcDimensionMismatch(this->m(), B.n()));
1324 Assert(C.n() == B.m(), ExcDimensionMismatch(C.n(), B.m()));
1325 Assert(C.m() == this->n(), ExcDimensionMismatch(this->n(), C.m()));
1326 const types::blas_int mm = this->n();
1327 const types::blas_int nn = B.m();
1328 const types::blas_int kk = B.n();
1329 const number alpha = 1.;
1330 const number beta = (adding ? 1. : 0.);
1331
1332 gemm("T",
1333 "T",
1334 &mm,
1335 &nn,
1336 &kk,
1337 &alpha,
1338 this->values.data(),
1339 &kk,
1340 B.values.data(),
1341 &nn,
1342 &beta,
1343 C.values.data(),
1344 &mm);
1345}
1346
1347
1348
1349template <typename number>
1350void
1352 const LAPACKFullMatrix<number> &B,
1353 const bool adding) const
1354{
1355 Assert(state == matrix || state == inverse_matrix, ExcState(state));
1357 Assert(this->m() == B.n(), ExcDimensionMismatch(this->m(), B.n()));
1358 Assert(C.n() == B.m(), ExcDimensionMismatch(C.n(), B.m()));
1359 Assert(C.m() == this->n(), ExcDimensionMismatch(this->n(), C.m()));
1360 const types::blas_int mm = this->n();
1361 const types::blas_int nn = B.m();
1362 const types::blas_int kk = B.n();
1363 const number alpha = 1.;
1364 const number beta = (adding ? 1. : 0.);
1365
1366 // since FullMatrix stores the matrix in transposed order compared to this
1367 // matrix, compute B * A = (A^T * B^T)^T
1368 gemm("N",
1369 "N",
1370 &nn,
1371 &mm,
1372 &kk,
1373 &alpha,
1374 B.values.data(),
1375 &nn,
1376 this->values.data(),
1377 &kk,
1378 &beta,
1379 &C(0, 0),
1380 &nn);
1381}
1382
1383
1384
1385template <typename number>
1386void
1388{
1389 Assert(state == matrix, ExcState(state));
1391
1392 const types::blas_int mm = this->m();
1393 const types::blas_int nn = this->n();
1394 number *const values = this->values.data();
1395 ipiv.resize(mm);
1396 types::blas_int info = 0;
1397 getrf(&mm, &nn, values, &mm, ipiv.data(), &info);
1398
1399 Assert(info >= 0, ExcInternalError());
1400
1401 // if info >= 0, the factorization has been completed
1402 state = lu;
1403
1405}
1406
1407
1408
1409template <typename number>
1410void
1412{
1413 property = p;
1414}
1415
1416
1417
1418template <typename number>
1419number
1421{
1422 const char type('O');
1423 return norm(type);
1424}
1425
1426
1427
1428template <typename number>
1429number
1431{
1432 const char type('I');
1433 return norm(type);
1434}
1435
1436
1437
1438template <typename number>
1439number
1441{
1442 const char type('F');
1443 return norm(type);
1444}
1445
1446
1447
1448template <typename number>
1449number
1451{
1452 std::lock_guard<std::mutex> lock(mutex);
1453
1454 Assert(state == LAPACKSupport::matrix ||
1456 ExcMessage("norms can be called in matrix state only."));
1457
1458 const types::blas_int N = this->n();
1459 const types::blas_int M = this->m();
1460 const number *const values = this->values.data();
1461 if (property == symmetric)
1462 {
1463 const types::blas_int lda = std::max<types::blas_int>(1, N);
1464 const types::blas_int lwork =
1465 (type == 'I' || type == 'O') ? std::max<types::blas_int>(1, N) : 0;
1466 work.resize(lwork);
1467 return lansy(&type, &LAPACKSupport::L, &N, values, &lda, work.data());
1468 }
1469 else
1470 {
1471 const types::blas_int lda = std::max<types::blas_int>(1, M);
1472 const types::blas_int lwork =
1473 (type == 'I') ? std::max<types::blas_int>(1, M) : 0;
1474 work.resize(lwork);
1475 return lange(&type, &M, &N, values, &lda, work.data());
1476 }
1477}
1478
1479
1480
1481template <typename number>
1482number
1484{
1485 Assert(state == LAPACKSupport::matrix ||
1487 ExcMessage("Trace can be called in matrix state only."));
1488 Assert(this->n() == this->m(), ExcDimensionMismatch(this->n(), this->m()));
1489
1490 number tr = 0;
1491 for (size_type i = 0; i < this->m(); ++i)
1492 tr += (*this)(i, i);
1493
1494 return tr;
1495}
1496
1497
1498
1499template <typename number>
1500void
1502{
1503 Assert(state == matrix, ExcState(state));
1504 Assert(property == symmetric, ExcProperty(property));
1506
1507 const types::blas_int mm = this->m();
1508 const types::blas_int nn = this->n();
1509 (void)mm;
1510 Assert(mm == nn, ExcDimensionMismatch(mm, nn));
1511
1512 number *const values = this->values.data();
1513 types::blas_int info = 0;
1514 const types::blas_int lda = std::max<types::blas_int>(1, nn);
1515 potrf(&LAPACKSupport::L, &nn, values, &lda, &info);
1516
1517 // info < 0 : the info-th argument had an illegal value
1518 Assert(info >= 0, ExcInternalError());
1519
1520 state = cholesky;
1522}
1523
1524
1525
1526template <typename number>
1527number
1529{
1530 std::lock_guard<std::mutex> lock(mutex);
1531 Assert(state == cholesky, ExcState(state));
1532 number rcond = 0.;
1533
1534 const types::blas_int N = this->m();
1535 const number * values = this->values.data();
1536 types::blas_int info = 0;
1537 const types::blas_int lda = std::max<types::blas_int>(1, N);
1538 work.resize(3 * N);
1539 iwork.resize(N);
1540
1541 // use the same uplo as in Cholesky
1543 &N,
1544 values,
1545 &lda,
1546 &a_norm,
1547 &rcond,
1548 work.data(),
1549 iwork.data(),
1550 &info);
1551
1552 Assert(info >= 0, ExcInternalError());
1553
1554 return rcond;
1555}
1556
1557
1558
1559template <typename number>
1560number
1562{
1563 std::lock_guard<std::mutex> lock(mutex);
1564 Assert(property == upper_triangular || property == lower_triangular,
1565 ExcProperty(property));
1566 number rcond = 0.;
1567
1568 const types::blas_int N = this->m();
1569 const number *const values = this->values.data();
1570 types::blas_int info = 0;
1571 const types::blas_int lda = std::max<types::blas_int>(1, N);
1572 work.resize(3 * N);
1573 iwork.resize(N);
1574
1575 const char norm = '1';
1576 const char diag = 'N';
1577 const char uplo =
1579 trcon(&norm,
1580 &uplo,
1581 &diag,
1582 &N,
1583 values,
1584 &lda,
1585 &rcond,
1586 work.data(),
1587 iwork.data(),
1588 &info);
1589
1590 Assert(info >= 0, ExcInternalError());
1591
1592 return rcond;
1593}
1594
1595
1596
1597template <typename number>
1598void
1600{
1601 Assert(state == matrix, ExcState(state));
1603
1604 const types::blas_int mm = this->m();
1605 const types::blas_int nn = this->n();
1606 wr.resize(std::max(mm, nn));
1607 std::fill(wr.begin(), wr.end(), 0.);
1608 ipiv.resize(8 * mm);
1609
1610 svd_u = std::make_unique<LAPACKFullMatrix<number>>(mm, mm);
1611 svd_vt = std::make_unique<LAPACKFullMatrix<number>>(nn, nn);
1612 types::blas_int info = 0;
1613
1614 // First determine optimal workspace size
1615 work.resize(1);
1616 types::blas_int lwork = -1;
1617
1618 // TODO double check size
1619 std::vector<typename numbers::NumberTraits<number>::real_type> real_work;
1621 {
1622 // This array is only used by the complex versions.
1623 std::size_t min = std::min(this->m(), this->n());
1624 std::size_t max = std::max(this->m(), this->n());
1625 real_work.resize(
1626 std::max(5 * min * min + 5 * min, 2 * max * min + 2 * min * min + min));
1627 }
1628
1629 // make sure that the first entry in the work array is clear, in case the
1630 // routine does not completely overwrite the memory:
1631 work[0] = number();
1633 mm,
1634 nn,
1635 this->values,
1636 wr,
1637 svd_u->values,
1638 svd_vt->values,
1639 work,
1640 real_work,
1641 ipiv,
1642 lwork,
1643 info);
1644
1645 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("gesdd", info));
1646 // Resize the work array. Add one to the size computed by LAPACK to be on
1647 // the safe side.
1648 lwork = static_cast<types::blas_int>(std::abs(work[0]) + 1);
1649
1650 work.resize(lwork);
1651 // Do the actual SVD.
1653 mm,
1654 nn,
1655 this->values,
1656 wr,
1657 svd_u->values,
1658 svd_vt->values,
1659 work,
1660 real_work,
1661 ipiv,
1662 lwork,
1663 info);
1664 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("gesdd", info));
1665
1666 work.resize(0);
1667 ipiv.resize(0);
1668
1669 state = LAPACKSupport::svd;
1670}
1671
1672
1673
1674template <typename number>
1675void
1677{
1678 if (state == LAPACKSupport::matrix)
1679 compute_svd();
1680
1681 Assert(state == LAPACKSupport::svd, ExcState(state));
1682
1684 const double lim = std::abs(wr[0]) * threshold;
1685 for (size_type i = 0; i < wr.size(); ++i)
1686 {
1687 if (std::abs(wr[i]) > lim)
1688 wr[i] = one / wr[i];
1689 else
1690 wr[i] = 0.;
1691 }
1693}
1694
1695
1696
1697template <typename number>
1698void
1700 const unsigned int kernel_size)
1701{
1702 if (state == LAPACKSupport::matrix)
1703 compute_svd();
1704
1705 Assert(state == LAPACKSupport::svd, ExcState(state));
1706
1708 const unsigned int n_wr = wr.size();
1709 for (size_type i = 0; i < n_wr - kernel_size; ++i)
1710 wr[i] = one / wr[i];
1711 for (size_type i = n_wr - kernel_size; i < n_wr; ++i)
1712 wr[i] = 0.;
1714}
1715
1716
1717
1718template <typename number>
1719void
1721{
1722 Assert(state == matrix || state == lu || state == cholesky, ExcState(state));
1723 const types::blas_int mm = this->m();
1724 const types::blas_int nn = this->n();
1725 Assert(nn == mm, ExcNotQuadratic());
1726
1727 number *const values = this->values.data();
1728 types::blas_int info = 0;
1729
1730 if (property != symmetric)
1731 {
1732 if (state == matrix)
1733 compute_lu_factorization();
1734
1735 ipiv.resize(mm);
1736 inv_work.resize(mm);
1737 getri(&mm, values, &mm, ipiv.data(), inv_work.data(), &mm, &info);
1738 }
1739 else
1740 {
1741 if (state == matrix)
1742 compute_cholesky_factorization();
1743
1744 const types::blas_int lda = std::max<types::blas_int>(1, nn);
1745 potri(&LAPACKSupport::L, &nn, values, &lda, &info);
1746 // inverse is stored in lower diagonal, set the upper diagonal
1747 // appropriately:
1748 for (types::blas_int i = 0; i < nn; ++i)
1749 for (types::blas_int j = i + 1; j < nn; ++j)
1750 this->el(i, j) = this->el(j, i);
1751 }
1752
1753 Assert(info >= 0, ExcInternalError());
1755
1756 state = inverse_matrix;
1757}
1758
1759
1760
1761template <typename number>
1762void
1763LAPACKFullMatrix<number>::solve(Vector<number> &v, const bool transposed) const
1764{
1765 Assert(this->m() == this->n(), LACExceptions::ExcNotQuadratic());
1766 AssertDimension(this->m(), v.size());
1767 const char * trans = transposed ? &T : &N;
1768 const types::blas_int nn = this->n();
1769 const number *const values = this->values.data();
1770 const types::blas_int n_rhs = 1;
1771 types::blas_int info = 0;
1772
1773 if (state == lu)
1774 {
1775 getrs(
1776 trans, &nn, &n_rhs, values, &nn, ipiv.data(), v.begin(), &nn, &info);
1777 }
1778 else if (state == cholesky)
1779 {
1780 potrs(&LAPACKSupport::L, &nn, &n_rhs, values, &nn, v.begin(), &nn, &info);
1781 }
1782 else if (property == upper_triangular || property == lower_triangular)
1783 {
1784 const char uplo =
1786
1787 const types::blas_int lda = nn;
1788 const types::blas_int ldb = nn;
1789 trtrs(
1790 &uplo, trans, "N", &nn, &n_rhs, values, &lda, v.begin(), &ldb, &info);
1791 }
1792 else
1793 {
1794 Assert(false,
1795 ExcMessage(
1796 "The matrix has to be either factorized or triangular."));
1797 }
1798
1799 Assert(info == 0, ExcInternalError());
1800}
1801
1802
1803
1804template <typename number>
1805void
1807 const bool transposed) const
1808{
1809 Assert(B.state == matrix, ExcState(B.state));
1810
1811 Assert(this->m() == this->n(), LACExceptions::ExcNotQuadratic());
1812 AssertDimension(this->m(), B.m());
1813 const char * trans = transposed ? &T : &N;
1814 const types::blas_int nn = this->n();
1815 const number *const values = this->values.data();
1816 const types::blas_int n_rhs = B.n();
1817 types::blas_int info = 0;
1818
1819 if (state == lu)
1820 {
1821 getrs(trans,
1822 &nn,
1823 &n_rhs,
1824 values,
1825 &nn,
1826 ipiv.data(),
1827 B.values.data(),
1828 &nn,
1829 &info);
1830 }
1831 else if (state == cholesky)
1832 {
1834 &nn,
1835 &n_rhs,
1836 values,
1837 &nn,
1838 B.values.data(),
1839 &nn,
1840 &info);
1841 }
1842 else if (property == upper_triangular || property == lower_triangular)
1843 {
1844 const char uplo =
1846
1847 const types::blas_int lda = nn;
1848 const types::blas_int ldb = nn;
1849 trtrs(&uplo,
1850 trans,
1851 "N",
1852 &nn,
1853 &n_rhs,
1854 values,
1855 &lda,
1856 B.values.data(),
1857 &ldb,
1858 &info);
1859 }
1860 else
1861 {
1862 Assert(false,
1863 ExcMessage(
1864 "The matrix has to be either factorized or triangular."));
1865 }
1866
1867 Assert(info == 0, ExcInternalError());
1868}
1869
1870
1871
1872template <typename number>
1873number
1875{
1876 Assert(this->m() == this->n(), LACExceptions::ExcNotQuadratic());
1877
1878 // LAPACK doesn't offer a function to compute a matrix determinant.
1879 // This is due to the difficulty in maintaining numerical accuracy, as the
1880 // calculations are likely to overflow or underflow. See
1881 // http://www.netlib.org/lapack/faq.html#_are_there_routines_in_lapack_to_compute_determinants
1882 //
1883 // However, after a PLU decomposition one can compute this by multiplication
1884 // of the diagonal entries with one another. One must take into consideration
1885 // the number of permutations (row swaps) stored in the P matrix.
1886 //
1887 // See the implementations in the blaze library (detNxN)
1888 // https://bitbucket.org/blaze-lib/blaze
1889 // and also
1890 // https://dualm.wordpress.com/2012/01/06/computing-determinant-in-fortran/
1891 // http://icl.cs.utk.edu/lapack-forum/viewtopic.php?p=341&#p336
1892 // for further information.
1893 Assert(state == lu, ExcState(state));
1894 Assert(ipiv.size() == this->m(), ExcInternalError());
1895 number det = 1.0;
1896 for (size_type i = 0; i < this->m(); ++i)
1897 det *=
1898 (ipiv[i] == types::blas_int(i + 1) ? this->el(i, i) : -this->el(i, i));
1899 return det;
1900}
1901
1902
1903
1904template <typename number>
1905void
1906LAPACKFullMatrix<number>::compute_eigenvalues(const bool right, const bool left)
1907{
1908 Assert(state == matrix, ExcState(state));
1909 const types::blas_int nn = this->n();
1910 wr.resize(nn);
1911 wi.resize(nn);
1912 if (right)
1913 vr.resize(nn * nn);
1914 if (left)
1915 vl.resize(nn * nn);
1916
1917 types::blas_int info = 0;
1918 types::blas_int lwork = 1;
1919 const char jobvr = (right) ? V : N;
1920 const char jobvl = (left) ? V : N;
1921
1922 /*
1923 * The LAPACK routine xGEEV requires a sufficiently large work array; the
1924 * minimum requirement is
1925 *
1926 * work.size >= 4*nn.
1927 *
1928 * However, for better performance, a larger work array may be needed. The
1929 * first call determines the optimal work size and the second does the work.
1930 */
1931 lwork = -1;
1932 work.resize(1);
1933
1934 std::vector<typename numbers::NumberTraits<number>::real_type> real_work;
1936 // This array is only used by the complex versions.
1937 real_work.resize(2 * this->m());
1939 jobvr,
1940 this->values,
1941 this->m(),
1942 wr,
1943 wi,
1944 vl,
1945 vr,
1946 work,
1947 real_work,
1948 lwork,
1949 info);
1950
1951 // geev returns info=0 on success. Since we only queried the optimal size
1952 // for work, everything else would not be acceptable.
1953 Assert(info == 0, ExcInternalError());
1954 // Allocate working array according to suggestion (same strategy as was
1955 // noted in compute_svd).
1956 lwork = static_cast<types::blas_int>(std::abs(work[0]) + 1);
1957
1958 // resize workspace array
1959 work.resize(lwork);
1960 real_work.resize(lwork);
1961
1962 // Finally compute the eigenvalues.
1964 jobvr,
1965 this->values,
1966 this->m(),
1967 wr,
1968 wi,
1969 vl,
1970 vr,
1971 work,
1972 real_work,
1973 lwork,
1974 info);
1975
1976 Assert(info >= 0, ExcInternalError());
1977 if (info < 0)
1978 {
1979 AssertThrow(info == 0,
1980 ExcMessage("Lapack error in geev: the " +
1981 std::to_string(-info) +
1982 "-th"
1983 " parameter had an illegal value."));
1984 }
1985 else
1986 {
1988 info == 0,
1989 ExcMessage(
1990 "Lapack error in geev: the QR algorithm failed to compute "
1991 "all the eigenvalues, and no eigenvectors have been computed."));
1992 }
1993
1995}
1996
1997
1998
1999namespace
2000{
2001 // This function extracts complex eigenvectors from the underlying 'number'
2002 // array 'vr' of the LAPACK eigenvalue routine. For real-valued matrices
2003 // addressed by this function specialization, we might get complex
2004 // eigenvalues, which come in complex-conjugate pairs. In LAPACK, a compact
2005 // storage scheme is applied that stores the real and imaginary part of
2006 // eigenvectors only once, putting the real parts in one column and the
2007 // imaginary part in the next of a real-valued array. Here, we do the
2008 // unpacking into the usual complex values.
2009 template <typename RealNumber>
2010 void
2011 unpack_lapack_eigenvector_and_increment_index(
2012 const std::vector<RealNumber> & vr,
2013 const std::complex<RealNumber> & eigenvalue,
2014 FullMatrix<std::complex<RealNumber>> &result,
2015 unsigned int & index)
2016 {
2017 const std::size_t n = result.n();
2018 if (eigenvalue.imag() != 0.)
2019 {
2020 for (std::size_t j = 0; j < n; ++j)
2021 {
2022 result(j, index).real(vr[index * n + j]);
2023 result(j, index + 1).real(vr[index * n + j]);
2024 result(j, index).imag(vr[(index + 1) * n + j]);
2025 result(j, index + 1).imag(-vr[(index + 1) * n + j]);
2026 }
2027
2028 // we filled two columns with the complex-conjugate pair, so increment
2029 // returned index by 2
2030 index += 2;
2031 }
2032 else
2033 {
2034 for (unsigned int j = 0; j < n; ++j)
2035 result(j, index).real(vr[index * n + j]);
2036
2037 // real-valued case, we only filled one column
2038 ++index;
2039 }
2040 }
2041
2042 // This specialization fills the eigenvectors for complex-valued matrices,
2043 // in which case we simply read off the entry in the 'vr' array.
2044 template <typename ComplexNumber>
2045 void
2046 unpack_lapack_eigenvector_and_increment_index(
2047 const std::vector<ComplexNumber> &vr,
2048 const ComplexNumber &,
2050 unsigned int & index)
2051 {
2052 const std::size_t n = result.n();
2053 for (unsigned int j = 0; j < n; ++j)
2054 result(j, index) = vr[index * n + j];
2055
2056 // complex-valued case always only fills a single column
2057 ++index;
2058 }
2059} // namespace
2060
2061
2062
2063template <typename number>
2066{
2068 Assert(vr.size() == this->n_rows() * this->n_cols(),
2069 ExcMessage("Right eigenvectors are not available! Did you "
2070 "set the associated flag in compute_eigenvalues()?"));
2071
2073 result(n(), n());
2074
2075 for (unsigned int i = 0; i < n();)
2076 unpack_lapack_eigenvector_and_increment_index(vr, eigenvalue(i), result, i);
2077
2078 return result;
2079}
2080
2081
2082
2083template <typename number>
2086{
2088 Assert(vl.size() == this->n_rows() * this->n_cols(),
2089 ExcMessage("Left eigenvectors are not available! Did you "
2090 "set the associated flag in compute_eigenvalues()?"));
2091
2093 result(n(), n());
2094
2095 for (unsigned int i = 0; i < n();)
2096 unpack_lapack_eigenvector_and_increment_index(vl, eigenvalue(i), result, i);
2097
2098 return result;
2099}
2100
2101
2102
2103template <typename number>
2104void
2106 const number lower_bound,
2107 const number upper_bound,
2108 const number abs_accuracy,
2111{
2112 Assert(state == matrix, ExcState(state));
2113 const types::blas_int nn = (this->n() > 0 ? this->n() : 1);
2114 Assert(static_cast<size_type>(nn) == this->m(), ExcNotQuadratic());
2115
2116 wr.resize(nn);
2117 LAPACKFullMatrix<number> matrix_eigenvectors(nn, nn);
2118
2119 number *const values_A = this->values.data();
2120 number *const values_eigenvectors = matrix_eigenvectors.values.data();
2121
2122 types::blas_int info(0), lwork(-1), n_eigenpairs(0);
2123 const char *const jobz(&V);
2124 const char *const uplo(&U);
2125 const char *const range(&V);
2126 const types::blas_int *const dummy(&one);
2127 std::vector<types::blas_int> iwork(static_cast<size_type>(5 * nn));
2128 std::vector<types::blas_int> ifail(static_cast<size_type>(nn));
2129
2130
2131 /*
2132 * The LAPACK routine xSYEVX requires a sufficiently large work array; the
2133 * minimum requirement is
2134 *
2135 * work.size >= 8*nn.
2136 *
2137 * However, for better performance, a larger work array may be needed. The
2138 * first call determines the optimal work size and the second does the work.
2139 */
2140 work.resize(1);
2141
2142 syevx(jobz,
2143 range,
2144 uplo,
2145 &nn,
2146 values_A,
2147 &nn,
2148 &lower_bound,
2149 &upper_bound,
2150 dummy,
2151 dummy,
2152 &abs_accuracy,
2153 &n_eigenpairs,
2154 wr.data(),
2155 values_eigenvectors,
2156 &nn,
2157 work.data(),
2158 &lwork,
2159 iwork.data(),
2160 ifail.data(),
2161 &info);
2162 // syevx returns info=0 on success. Since we only queried the optimal size
2163 // for work, everything else would not be acceptable.
2164 Assert(info == 0, ExcInternalError());
2165 // Allocate working array according to suggestion (same strategy as was noted
2166 // in compute_svd).
2167 lwork = static_cast<types::blas_int>(std::abs(work[0]) + 1);
2168 work.resize(static_cast<size_type>(lwork));
2169
2170 // Finally compute the eigenvalues.
2171 syevx(jobz,
2172 range,
2173 uplo,
2174 &nn,
2175 values_A,
2176 &nn,
2177 &lower_bound,
2178 &upper_bound,
2179 dummy,
2180 dummy,
2181 &abs_accuracy,
2182 &n_eigenpairs,
2183 wr.data(),
2184 values_eigenvectors,
2185 &nn,
2186 work.data(),
2187 &lwork,
2188 iwork.data(),
2189 ifail.data(),
2190 &info);
2191
2192 // Negative return value implies a wrong argument. This should be internal.
2193 Assert(info >= 0, ExcInternalError());
2194 if (info < 0)
2195 {
2196 AssertThrow(info == 0,
2197 ExcMessage("Lapack error in syevx: the " +
2198 std::to_string(-info) +
2199 "-th"
2200 " parameter had an illegal value."));
2201 }
2202 else if ((info > 0) && (info <= nn))
2203 {
2204 AssertThrow(info == 0,
2205 ExcMessage(
2206 "Lapack error in syevx: " + std::to_string(info) +
2207 " eigenvectors failed to converge."
2208 " (You may need to scale the abs_accuracy according"
2209 " to your matrix norm.)"));
2210 }
2211 else
2212 {
2213 AssertThrow(info == 0,
2214 ExcMessage("Lapack error in syevx: unknown error."));
2215 }
2216
2217 eigenvalues.reinit(n_eigenpairs);
2218 eigenvectors.reinit(nn, n_eigenpairs, true);
2219
2220 for (size_type i = 0; i < static_cast<size_type>(n_eigenpairs); ++i)
2221 {
2222 eigenvalues(i) = wr[i];
2223 size_type col_begin(i * nn);
2224 for (size_type j = 0; j < static_cast<size_type>(nn); ++j)
2225 {
2226 eigenvectors(j, i) = values_eigenvectors[col_begin + j];
2227 }
2228 }
2229
2231}
2232
2233
2234
2235template <typename number>
2236void
2239 const number lower_bound,
2240 const number upper_bound,
2241 const number abs_accuracy,
2243 std::vector<Vector<number>> &eigenvectors,
2244 const types::blas_int itype)
2245{
2246 Assert(state == matrix, ExcState(state));
2247 const types::blas_int nn = (this->n() > 0 ? this->n() : 1);
2248 Assert(static_cast<size_type>(nn) == this->m(), ExcNotQuadratic());
2249 Assert(B.m() == B.n(), ExcNotQuadratic());
2250 Assert(static_cast<size_type>(nn) == B.n(), ExcDimensionMismatch(nn, B.n()));
2251
2252 wr.resize(nn);
2253 LAPACKFullMatrix<number> matrix_eigenvectors(nn, nn);
2254
2255 number *const values_A = this->values.data();
2256 number *const values_B = B.values.data();
2257 number *const values_eigenvectors = matrix_eigenvectors.values.data();
2258
2259 types::blas_int info(0), lwork(-1), n_eigenpairs(0);
2260 const char *const jobz(&V);
2261 const char *const uplo(&U);
2262 const char *const range(&V);
2263 const types::blas_int *const dummy(&one);
2264 iwork.resize(static_cast<size_type>(5 * nn));
2265 std::vector<types::blas_int> ifail(static_cast<size_type>(nn));
2266
2267
2268 /*
2269 * The LAPACK routine xSYGVX requires a sufficiently large work array; the
2270 * minimum requirement is
2271 *
2272 * work.size >= 8*nn.
2273 *
2274 * However, for better performance, a larger work array may be needed. The
2275 * first call determines the optimal work size and the second does the work.
2276 */
2277 work.resize(1);
2278
2279 sygvx(&itype,
2280 jobz,
2281 range,
2282 uplo,
2283 &nn,
2284 values_A,
2285 &nn,
2286 values_B,
2287 &nn,
2288 &lower_bound,
2289 &upper_bound,
2290 dummy,
2291 dummy,
2292 &abs_accuracy,
2293 &n_eigenpairs,
2294 wr.data(),
2295 values_eigenvectors,
2296 &nn,
2297 work.data(),
2298 &lwork,
2299 iwork.data(),
2300 ifail.data(),
2301 &info);
2302 // sygvx returns info=0 on success. Since we only queried the optimal size
2303 // for work, everything else would not be acceptable.
2304 Assert(info == 0, ExcInternalError());
2305 // Allocate working array according to suggestion (same strategy as was
2306 // noted in compute_svd).
2307 lwork = static_cast<types::blas_int>(std::abs(work[0]) + 1);
2308
2309 // resize workspace arrays
2310 work.resize(static_cast<size_type>(lwork));
2311
2312 // Finally compute the generalized eigenvalues.
2313 sygvx(&itype,
2314 jobz,
2315 range,
2316 uplo,
2317 &nn,
2318 values_A,
2319 &nn,
2320 values_B,
2321 &nn,
2322 &lower_bound,
2323 &upper_bound,
2324 dummy,
2325 dummy,
2326 &abs_accuracy,
2327 &n_eigenpairs,
2328 wr.data(),
2329 values_eigenvectors,
2330 &nn,
2331 work.data(),
2332 &lwork,
2333 iwork.data(),
2334 ifail.data(),
2335 &info);
2336
2337 // Negative return value implies a wrong argument. This should be internal.
2338 Assert(info >= 0, ExcInternalError());
2339 if (info < 0)
2340 {
2341 AssertThrow(info == 0,
2342 ExcMessage("Lapack error in sygvx: the " +
2343 std::to_string(-info) +
2344 "-th"
2345 " parameter had an illegal value."));
2346 }
2347 else if ((info > 0) && (info <= nn))
2348 {
2350 info == 0,
2351 ExcMessage(
2352 "Lapack error in sygvx: ssyevx/dsyevx failed to converge, and " +
2353 std::to_string(info) +
2354 " eigenvectors failed to converge."
2355 " (You may need to scale the abs_accuracy"
2356 " according to the norms of matrices A and B.)"));
2357 }
2358 else if ((info > nn) && (info <= 2 * nn))
2359 {
2360 AssertThrow(info == 0,
2361 ExcMessage(
2362 "Lapack error in sygvx: the leading minor of order " +
2363 std::to_string(info - nn) +
2364 " of matrix B is not positive-definite."
2365 " The factorization of B could not be completed and"
2366 " no eigenvalues or eigenvectors were computed."));
2367 }
2368 else
2369 {
2370 AssertThrow(info == 0,
2371 ExcMessage("Lapack error in sygvx: unknown error."));
2372 }
2373
2374 eigenvalues.reinit(n_eigenpairs);
2375 eigenvectors.resize(n_eigenpairs);
2376
2377 for (size_type i = 0; i < static_cast<size_type>(n_eigenpairs); ++i)
2378 {
2379 eigenvalues(i) = wr[i];
2380 size_type col_begin(i * nn);
2381 eigenvectors[i].reinit(nn, true);
2382 for (size_type j = 0; j < static_cast<size_type>(nn); ++j)
2383 {
2384 eigenvectors[i](j) = values_eigenvectors[col_begin + j];
2385 }
2386 }
2387
2389}
2390
2391
2392
2393template <typename number>
2394void
2397 std::vector<Vector<number>> &eigenvectors,
2398 const types::blas_int itype)
2399{
2400 Assert(state == matrix, ExcState(state));
2401 const types::blas_int nn = this->n();
2402 Assert(static_cast<size_type>(nn) == this->m(), ExcNotQuadratic());
2403 Assert(B.m() == B.n(), ExcNotQuadratic());
2404 Assert(static_cast<size_type>(nn) == B.n(), ExcDimensionMismatch(nn, B.n()));
2405 Assert(eigenvectors.size() <= static_cast<size_type>(nn),
2406 ExcMessage("eigenvectors.size() > matrix.n()"));
2407
2408 wr.resize(nn);
2409 wi.resize(nn); // This is set purely for consistency reasons with the
2410 // eigenvalues() function.
2411
2412 number *const values_A = this->values.data();
2413 number *const values_B = B.values.data();
2414
2415 types::blas_int info = 0;
2416 types::blas_int lwork = -1;
2417 const char *const jobz = (eigenvectors.size() > 0) ? (&V) : (&N);
2418 const char *const uplo = (&U);
2419
2420 /*
2421 * The LAPACK routine xSYGV requires a sufficiently large work array; the
2422 * minimum requirement is
2423 *
2424 * work.size >= 3*nn - 1.
2425 *
2426 * However, for better performance, a larger work array may be needed. The
2427 * first call determines the optimal work size and the second does the work.
2428 */
2429 work.resize(1);
2430
2431 sygv(&itype,
2432 jobz,
2433 uplo,
2434 &nn,
2435 values_A,
2436 &nn,
2437 values_B,
2438 &nn,
2439 wr.data(),
2440 work.data(),
2441 &lwork,
2442 &info);
2443 // sygv returns info=0 on success. Since we only queried the optimal size
2444 // for work, everything else would not be acceptable.
2445 Assert(info == 0, ExcInternalError());
2446 // Allocate working array according to suggestion (same strategy as was
2447 // noted in compute_svd).
2448 lwork = static_cast<types::blas_int>(std::abs(work[0]) + 1);
2449
2450 // resize workspace array
2451 work.resize(static_cast<size_type>(lwork));
2452
2453 // Finally compute the generalized eigenvalues.
2454 sygv(&itype,
2455 jobz,
2456 uplo,
2457 &nn,
2458 values_A,
2459 &nn,
2460 values_B,
2461 &nn,
2462 wr.data(),
2463 work.data(),
2464 &lwork,
2465 &info);
2466 // Negative return value implies a wrong argument. This should be internal.
2467
2468 Assert(info >= 0, ExcInternalError());
2469 if (info < 0)
2470 {
2471 AssertThrow(info == 0,
2472 ExcMessage("Lapack error in sygv: the " +
2473 std::to_string(-info) +
2474 "-th"
2475 " parameter had an illegal value."));
2476 }
2477 else if ((info > 0) && (info <= nn))
2478 {
2480 info == 0,
2481 ExcMessage(
2482 "Lapack error in sygv: ssyev/dsyev failed to converge, and " +
2483 std::to_string(info) +
2484 " off-diagonal elements of an intermediate "
2485 " tridiagonal did not converge to zero."
2486 " (You may need to scale the abs_accuracy"
2487 " according to the norms of matrices A and B.)"));
2488 }
2489 else if ((info > nn) && (info <= 2 * nn))
2490 {
2491 AssertThrow(info == 0,
2492 ExcMessage(
2493 "Lapack error in sygv: the leading minor of order " +
2494 std::to_string(info - nn) +
2495 " of matrix B is not positive-definite."
2496 " The factorization of B could not be completed and"
2497 " no eigenvalues or eigenvectors were computed."));
2498 }
2499 else
2500 {
2501 AssertThrow(info == 0,
2502 ExcMessage("Lapack error in sygv: unknown error."));
2503 }
2504
2505 for (size_type i = 0; i < eigenvectors.size(); ++i)
2506 {
2507 size_type col_begin(i * nn);
2508 eigenvectors[i].reinit(nn, true);
2509 for (size_type j = 0; j < static_cast<size_type>(nn); ++j)
2510 {
2511 eigenvectors[i](j) = values_A[col_begin + j];
2512 }
2513 }
2515}
2516
2517
2518
2519template <typename number>
2520void
2522 const unsigned int precision,
2523 const bool scientific,
2524 const unsigned int width_,
2525 const char * zero_string,
2526 const double denominator,
2527 const double threshold) const
2528{
2529 unsigned int width = width_;
2530
2531 Assert((!this->empty()) || (this->n() + this->m() == 0), ExcInternalError());
2532 Assert(state == LAPACKSupport::matrix ||
2534 state == LAPACKSupport::cholesky,
2535 ExcState(state));
2536
2537 // set output format, but store old
2538 // state
2539 std::ios::fmtflags old_flags = out.flags();
2540 std::streamsize old_precision = out.precision(precision);
2541
2542 if (scientific)
2543 {
2544 out.setf(std::ios::scientific, std::ios::floatfield);
2545 if (width == 0u)
2546 width = precision + 7;
2547 }
2548 else
2549 {
2550 out.setf(std::ios::fixed, std::ios::floatfield);
2551 if (width == 0u)
2552 width = precision + 2;
2553 }
2554
2555 for (size_type i = 0; i < this->m(); ++i)
2556 {
2557 // Cholesky is stored in lower triangular, so just output this part:
2558 const size_type nc = state == LAPACKSupport::cholesky ? i + 1 : this->n();
2559 for (size_type j = 0; j < nc; ++j)
2560 // we might have complex numbers, so use abs also to check for nan
2561 // since there is no isnan on complex numbers
2562 if (numbers::is_nan(std::abs((*this)(i, j))))
2563 out << std::setw(width) << (*this)(i, j) << ' ';
2564 else if (std::abs(this->el(i, j)) > threshold)
2565 out << std::setw(width) << this->el(i, j) * denominator << ' ';
2566 else
2567 out << std::setw(width) << zero_string << ' ';
2568 out << std::endl;
2569 }
2570
2571 AssertThrow(out.fail() == false, ExcIO());
2572 // reset output format
2573 out.flags(old_flags);
2574 out.precision(old_precision);
2575}
2576
2577
2578//----------------------------------------------------------------------//
2579
2580template <typename number>
2581void
2583{
2584 matrix = &M;
2585 mem = nullptr;
2586}
2587
2588
2589template <typename number>
2590void
2593{
2594 matrix = &M;
2595 mem = &V;
2596}
2597
2598
2599template <typename number>
2600void
2602 const Vector<number> &src) const
2603{
2604 dst = src;
2605 matrix->solve(dst, false);
2606}
2607
2608
2609template <typename number>
2610void
2612 const Vector<number> &src) const
2613{
2614 dst = src;
2615 matrix->solve(dst, true);
2616}
2617
2618
2619template <typename number>
2620void
2622 const BlockVector<number> &src) const
2623{
2624 Assert(mem != nullptr, ExcNotInitialized());
2625 Vector<number> *aux = mem->alloc();
2626 *aux = src;
2627 matrix->solve(*aux, false);
2628 dst = *aux;
2629}
2630
2631
2632template <typename number>
2633void
2635 const BlockVector<number> &src) const
2636{
2637 Assert(mem != nullptr, ExcNotInitialized());
2638 Vector<number> *aux = mem->alloc();
2639 *aux = src;
2640 matrix->solve(*aux, true);
2641 dst = *aux;
2642}
2643
2644
2645
2646#include "lapack_full_matrix.inst"
2647
2648
void omatcopy(char, char, ::types::blas_int, ::types::blas_int, const number1, const number2 *, ::types::blas_int, number3 *, ::types::blas_int)
pointer data()
size_type n() const
size_type m() const
LAPACKFullMatrix< number > & operator*=(const number factor)
number reciprocal_condition_number() const
void Tmmult(LAPACKFullMatrix< number > &C, const LAPACKFullMatrix< number > &B, const bool adding=false) const
void scale_rows(const Vector< number > &V)
FullMatrix< std::complex< typename numbers::NumberTraits< number >::real_type > > get_right_eigenvectors() const
void add(const number a, const LAPACKFullMatrix< number > &B)
void Tvmult(Vector< number2 > &w, const Vector< number2 > &v, const bool adding=false) const
void transpose(LAPACKFullMatrix< number > &B) const
void mTmult(LAPACKFullMatrix< number > &C, const LAPACKFullMatrix< number > &B, const bool adding=false) const
void compute_eigenvalues_symmetric(const number lower_bound, const number upper_bound, const number abs_accuracy, Vector< number > &eigenvalues, FullMatrix< number > &eigenvectors)
void vmult(Vector< number2 > &w, const Vector< number2 > &v, const bool adding=false) const
void reinit(const size_type size)
std::make_unsigned< types::blas_int >::type size_type
LAPACKFullMatrix< number > & operator=(const LAPACKFullMatrix< number > &)
FullMatrix< std::complex< typename numbers::NumberTraits< number >::real_type > > get_left_eigenvectors() const
void print_formatted(std::ostream &out, const unsigned int precision=3, const bool scientific=true, const unsigned int width=0, const char *zero_string=" ", const double denominator=1., const double threshold=0.) const
void grow_or_shrink(const size_type size)
void apply_givens_rotation(const std::array< number, 3 > &csr, const size_type i, const size_type k, const bool left=true)
void set_property(const LAPACKSupport::Property property)
number norm(const char type) const
void solve(Vector< number > &v, const bool transposed=false) const
void compute_eigenvalues(const bool right_eigenvectors=false, const bool left_eigenvectors=false)
LAPACKSupport::State state
number frobenius_norm() const
LAPACKFullMatrix(const size_type size=0)
LAPACKSupport::Property property
size_type m() const
void compute_inverse_svd(const double threshold=0.)
void compute_generalized_eigenvalues_symmetric(LAPACKFullMatrix< number > &B, const number lower_bound, const number upper_bound, const number abs_accuracy, Vector< number > &eigenvalues, std::vector< Vector< number > > &eigenvectors, const types::blas_int itype=1)
void vmult_add(Vector< number2 > &w, const Vector< number2 > &v) const
size_type n() const
number linfty_norm() const
void TmTmult(LAPACKFullMatrix< number > &C, const LAPACKFullMatrix< number > &B, const bool adding=false) const
void compute_inverse_svd_with_kernel(const unsigned int kernel_size)
void Tvmult_add(Vector< number2 > &w, const Vector< number2 > &v) const
void rank1_update(const number a, const Vector< number > &v)
void remove_row_and_column(const size_type row, const size_type col)
LAPACKFullMatrix< number > & operator/=(const number factor)
number determinant() const
void mmult(LAPACKFullMatrix< number > &C, const LAPACKFullMatrix< number > &B, const bool adding=false) const
void vmult(Vector< number > &, const Vector< number > &) const
void initialize(const LAPACKFullMatrix< number > &)
void Tvmult(Vector< number > &, const Vector< number > &) const
number el(const size_type i, const size_type j) const
size_type n() const
size_type m() const
Subscriptor & operator=(const Subscriptor &)
AlignedVector< T > values
Definition table.h:796
void reinit(const size_type size1, const size_type size2, const bool omit_default_initialization=false)
pointer data()
size_type size() const
iterator begin()
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:472
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:473
static ::ExceptionBase & ExcIO()
static ::ExceptionBase & ExcZero()
static ::ExceptionBase & ExcNotImplemented()
static ::ExceptionBase & ExcErrorCode(std::string arg1, types::blas_int arg2)
static ::ExceptionBase & ExcScalarAssignmentOnlyForZeroValue()
static ::ExceptionBase & ExcProperty(Property arg1)
#define Assert(cond, exc)
static ::ExceptionBase & ExcSingular()
#define AssertIsFinite(number)
#define AssertDimension(dim1, dim2)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
static ::ExceptionBase & ExcNotInitialized()
static ::ExceptionBase & ExcNotQuadratic()
static ::ExceptionBase & ExcInvalidState()
static ::ExceptionBase & ExcMessage(std::string arg1)
static ::ExceptionBase & ExcState(State arg1)
#define AssertThrow(cond, exc)
void getrs(const char *, const ::types::blas_int *, const ::types::blas_int *, const number1 *, const ::types::blas_int *, const ::types::blas_int *, number2 *, const ::types::blas_int *, ::types::blas_int *)
void syrk(const char *, const char *, const ::types::blas_int *, const ::types::blas_int *, const number1 *, const number2 *, const ::types::blas_int *, const number3 *, number4 *, const ::types::blas_int *)
void geev(const char *, const char *, const ::types::blas_int *, number1 *, const ::types::blas_int *, number2 *, number3 *, number4 *, const ::types::blas_int *, number5 *, const ::types::blas_int *, number6 *, const ::types::blas_int *, ::types::blas_int *)
void gemm(const char *, const char *, const ::types::blas_int *, const ::types::blas_int *, const ::types::blas_int *, const number1 *, const number2 *, const ::types::blas_int *, const number3 *, const ::types::blas_int *, const number4 *, number5 *, const ::types::blas_int *)
void pocon(const char *, const ::types::blas_int *, const number1 *, const ::types::blas_int *, const number2 *, number3 *, number4 *, ::types::blas_int *, ::types::blas_int *)
void lascl(const char *, const ::types::blas_int *, const ::types::blas_int *, const number1 *, const number2 *, const ::types::blas_int *, const ::types::blas_int *, number3 *, const ::types::blas_int *, ::types::blas_int *)
void syr(const char *, const ::types::blas_int *, const number1 *, const number2 *, const ::types::blas_int *, number3 *, const ::types::blas_int *)
void trtrs(const char *, const char *, const char *, const ::types::blas_int *, const ::types::blas_int *, const number1 *, const ::types::blas_int *, number2 *, const ::types::blas_int *, ::types::blas_int *)
void syevx(const char *, const char *, const char *, const ::types::blas_int *, number1 *, const ::types::blas_int *, const number2 *, const number3 *, const ::types::blas_int *, const ::types::blas_int *, const number4 *, ::types::blas_int *, number5 *, number6 *, const ::types::blas_int *, number7 *, const ::types::blas_int *, ::types::blas_int *, ::types::blas_int *, ::types::blas_int *)
void trcon(const char *, const char *, const char *, const ::types::blas_int *, const number1 *, const ::types::blas_int *, number2 *, number3 *, ::types::blas_int *, ::types::blas_int *)
void gesdd(const char *, const ::types::blas_int *, const ::types::blas_int *, number1 *, const ::types::blas_int *, number2 *, number3 *, const ::types::blas_int *, number4 *, const ::types::blas_int *, number5 *, const ::types::blas_int *, ::types::blas_int *, ::types::blas_int *)
void axpy(const ::types::blas_int *, const number1 *, const number2 *, const ::types::blas_int *, number3 *, const ::types::blas_int *)
void trmv(const char *, const char *, const char *, const ::types::blas_int *, const number1 *, const ::types::blas_int *, number2 *, const ::types::blas_int *)
void sygvx(const ::types::blas_int *, const char *, const char *, const char *, const ::types::blas_int *, number1 *, const ::types::blas_int *, number2 *, const ::types::blas_int *, const number3 *, const number4 *, const ::types::blas_int *, const ::types::blas_int *, const number5 *, ::types::blas_int *, number6 *, number7 *, const ::types::blas_int *, number8 *, const ::types::blas_int *, ::types::blas_int *, ::types::blas_int *, ::types::blas_int *)
void gemv(const char *, const ::types::blas_int *, const ::types::blas_int *, const number1 *, const number2 *, const ::types::blas_int *, const number3 *, const ::types::blas_int *, const number4 *, number5 *, const ::types::blas_int *)
number1 lange(const char *, const ::types::blas_int *, const ::types::blas_int *, const number1 *, const ::types::blas_int *, number2 *)
void potrs(const char *, const ::types::blas_int *, const ::types::blas_int *, const number1 *, const ::types::blas_int *, number2 *, const ::types::blas_int *, ::types::blas_int *)
void sygv(const ::types::blas_int *, const char *, const char *, const ::types::blas_int *, number1 *, const ::types::blas_int *, number2 *, const ::types::blas_int *, number3 *, number4 *, const ::types::blas_int *, ::types::blas_int *)
void potri(const char *, const ::types::blas_int *, number1 *, const ::types::blas_int *, ::types::blas_int *)
number1 lansy(const char *, const char *, const ::types::blas_int *, const number1 *, const ::types::blas_int *, number2 *)
void potrf(const char *, const ::types::blas_int *, number1 *, const ::types::blas_int *, ::types::blas_int *)
void getrf(const ::types::blas_int *, const ::types::blas_int *, number1 *, const ::types::blas_int *, ::types::blas_int *, ::types::blas_int *)
void getri(const ::types::blas_int *, number1 *, const ::types::blas_int *, const ::types::blas_int *, number2 *, const ::types::blas_int *, ::types::blas_int *)
static const char L
@ cholesky
Contents is a Cholesky decomposition.
@ lu
Contents is an LU decomposition.
@ matrix
Contents is actually a matrix.
@ unusable
Contents is something useless.
@ inverse_matrix
Contents is the inverse of a matrix.
@ svd
Matrix contains singular value decomposition,.
@ inverse_svd
Matrix is the inverse of a singular value decomposition.
@ eigenvalues
Eigenvalue vector is filled.
static const char U
static const char A
@ symmetric
Matrix is symmetric.
@ upper_triangular
Matrix is upper triangular.
@ lower_triangular
Matrix is lower triangular.
@ general
No special properties.
static const char T
static const char N
static const types::blas_int one
static const char V
std::array< NumberType, 3 > givens_rotation(const NumberType &x, const NumberType &y)
std::array< NumberType, 3 > hyperbolic_rotation(const NumberType &x, const NumberType &y)
void gesdd_helper(const char job, const types::blas_int n_rows, const types::blas_int n_cols, AlignedVector< T > &matrix, std::vector< T > &singular_values, AlignedVector< T > &left_vectors, AlignedVector< T > &right_vectors, std::vector< T > &real_work, std::vector< T > &, std::vector< types::blas_int > &integer_work, const types::blas_int work_flag, types::blas_int &info)
void geev_helper(const char vl, const char vr, AlignedVector< T > &matrix, const types::blas_int n_rows, std::vector< T > &real_part_eigenvalues, std::vector< T > &imag_part_eigenvalues, std::vector< T > &left_eigenvectors, std::vector< T > &right_eigenvectors, std::vector< T > &real_work, std::vector< T > &, const types::blas_int work_flag, types::blas_int &info)
bool is_nan(const double x)
Definition numbers.h:530
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)
static bool equal(const T *p1, const T *p2)
static constexpr const number & conjugate(const number &x)
Definition numbers.h:575
std::array< std::pair< Number, Tensor< 1, dim, Number > >, std::integral_constant< int, dim >::value > eigenvectors(const SymmetricTensor< 2, dim, Number > &T, const SymmetricTensorEigenvectorMethod method=SymmetricTensorEigenvectorMethod::ql_implicit_shifts)