Reference documentation for deal.II version 9.1.1
\(\newcommand{\dealcoloneq}{\mathrel{\vcenter{:}}=}\)
lapack_full_matrix.cc
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2005 - 2019 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 #include <deal.II/base/numbers.h>
17 
18 #include <deal.II/lac/blas_extension_templates.h>
19 #include <deal.II/lac/block_vector.h>
20 #include <deal.II/lac/full_matrix.h>
21 #include <deal.II/lac/lapack_full_matrix.h>
22 #include <deal.II/lac/lapack_support.h>
23 #include <deal.II/lac/lapack_templates.h>
24 #include <deal.II/lac/sparse_matrix.h>
25 #include <deal.II/lac/utilities.h>
26 #include <deal.II/lac/vector.h>
27 
28 #include <iomanip>
29 #include <iostream>
30 
31 DEAL_II_NAMESPACE_OPEN
32 
33 using namespace LAPACKSupport;
34 
35 namespace internal
36 {
37  namespace LAPACKFullMatrixImplementation
38  {
39  // ZGEEV/CGEEV and DGEEV/SGEEV need different work arrays and different
40  // output arrays for eigenvalues. This makes working with generic scalar
41  // types a bit difficult. To get around this, geev_helper has the same
42  // signature for real and complex arguments, but it ignores some
43  // parameters when called with a real type and ignores different
44  // parameters when called with a complex type.
45  template <typename T>
46  void
47  geev_helper(const char vl,
48  const char vr,
50  const types::blas_int n_rows,
51  std::vector<T> & real_part_eigenvalues,
52  std::vector<T> & imag_part_eigenvalues,
53  std::vector<T> & left_eigenvectors,
54  std::vector<T> & right_eigenvectors,
55  std::vector<T> & real_work,
56  std::vector<T> & /*complex_work*/,
57  const types::blas_int work_flag,
58  types::blas_int & info)
59  {
60  static_assert(std::is_same<T, double>::value ||
61  std::is_same<T, float>::value,
62  "Only implemented for double and float");
63  Assert(matrix.size() == static_cast<std::size_t>(n_rows * n_rows),
65  Assert(static_cast<std::size_t>(n_rows) <= real_part_eigenvalues.size(),
67  Assert(static_cast<std::size_t>(n_rows) <= imag_part_eigenvalues.size(),
69  if (vl == 'V')
70  Assert(static_cast<std::size_t>(n_rows * n_rows) <=
71  left_eigenvectors.size(),
73  if (vr == 'V')
74  Assert(static_cast<std::size_t>(n_rows * n_rows) <=
75  right_eigenvectors.size(),
77  Assert(work_flag == -1 ||
78  static_cast<std::size_t>(2 * n_rows) <= real_work.size(),
80  Assert(work_flag == -1 || std::max<long int>(1, 3 * n_rows) <= work_flag,
82  geev(&vl,
83  &vr,
84  &n_rows,
85  matrix.data(),
86  &n_rows,
87  real_part_eigenvalues.data(),
88  imag_part_eigenvalues.data(),
89  left_eigenvectors.data(),
90  &n_rows,
91  right_eigenvectors.data(),
92  &n_rows,
93  real_work.data(),
94  &work_flag,
95  &info);
96  }
97 
98  template <typename T>
99  void
100  geev_helper(const char vl,
101  const char vr,
102  AlignedVector<std::complex<T>> &matrix,
103  const types::blas_int n_rows,
104  std::vector<T> & /*real_part_eigenvalues*/,
105  std::vector<std::complex<T>> &eigenvalues,
106  std::vector<std::complex<T>> &left_eigenvectors,
107  std::vector<std::complex<T>> &right_eigenvectors,
108  std::vector<std::complex<T>> &complex_work,
109  std::vector<T> & real_work,
110  const types::blas_int work_flag,
111  types::blas_int & info)
112  {
113  static_assert(
114  std::is_same<T, double>::value || std::is_same<T, float>::value,
115  "Only implemented for std::complex<double> and std::complex<float>");
116  Assert(matrix.size() == static_cast<std::size_t>(n_rows * n_rows),
117  ExcInternalError());
118  Assert(static_cast<std::size_t>(n_rows) <= eigenvalues.size(),
119  ExcInternalError());
120  if (vl == 'V')
121  Assert(static_cast<std::size_t>(n_rows * n_rows) <=
122  left_eigenvectors.size(),
123  ExcInternalError());
124  if (vr == 'V')
125  Assert(static_cast<std::size_t>(n_rows * n_rows) <=
126  right_eigenvectors.size(),
127  ExcInternalError());
128  Assert(std::max<std::size_t>(1, work_flag) <= real_work.size(),
129  ExcInternalError());
130  Assert(work_flag == -1 ||
131  std::max<long int>(1, 2 * n_rows) <= (work_flag),
132  ExcInternalError());
133 
134  geev(&vl,
135  &vr,
136  &n_rows,
137  matrix.data(),
138  &n_rows,
139  eigenvalues.data(),
140  left_eigenvectors.data(),
141  &n_rows,
142  right_eigenvectors.data(),
143  &n_rows,
144  complex_work.data(),
145  &work_flag,
146  real_work.data(),
147  &info);
148  }
149 
150 
151 
152  template <typename T>
153  void
154  gesdd_helper(const char job,
155  const types::blas_int n_rows,
156  const types::blas_int n_cols,
158  std::vector<T> & singular_values,
159  AlignedVector<T> & left_vectors,
160  AlignedVector<T> & right_vectors,
161  std::vector<T> & real_work,
162  std::vector<T> & /*complex work*/,
163  std::vector<types::blas_int> &integer_work,
164  const types::blas_int work_flag,
165  types::blas_int & info)
166  {
167  Assert(job == 'A' || job == 'S' || job == 'O' || job == 'N',
168  ExcInternalError());
169  Assert(static_cast<std::size_t>(n_rows * n_cols) == matrix.size(),
170  ExcInternalError());
171  Assert(std::min<std::size_t>(n_rows, n_cols) <= singular_values.size(),
172  ExcInternalError());
173  Assert(8 * std::min<std::size_t>(n_rows, n_cols) <= integer_work.size(),
174  ExcInternalError());
175  Assert(work_flag == -1 ||
176  static_cast<std::size_t>(work_flag) <= real_work.size(),
177  ExcInternalError());
178  gesdd(&job,
179  &n_rows,
180  &n_cols,
181  matrix.data(),
182  &n_rows,
183  singular_values.data(),
184  left_vectors.data(),
185  &n_rows,
186  right_vectors.data(),
187  &n_cols,
188  real_work.data(),
189  &work_flag,
190  integer_work.data(),
191  &info);
192  }
193 
194 
195 
196  template <typename T>
197  void
198  gesdd_helper(const char job,
199  const types::blas_int n_rows,
200  const types::blas_int n_cols,
201  AlignedVector<std::complex<T>> &matrix,
202  std::vector<T> & singular_values,
203  AlignedVector<std::complex<T>> &left_vectors,
204  AlignedVector<std::complex<T>> &right_vectors,
205  std::vector<std::complex<T>> & work,
206  std::vector<T> & real_work,
207  std::vector<types::blas_int> & integer_work,
208  const types::blas_int & work_flag,
209  types::blas_int & info)
210  {
211  Assert(job == 'A' || job == 'S' || job == 'O' || job == 'N',
212  ExcInternalError());
213  Assert(static_cast<std::size_t>(n_rows * n_cols) == matrix.size(),
214  ExcInternalError());
215  Assert(static_cast<std::size_t>(std::min(n_rows, n_cols)) <=
216  singular_values.size(),
217  ExcInternalError());
218  Assert(8 * std::min<std::size_t>(n_rows, n_cols) <= integer_work.size(),
219  ExcInternalError());
220  Assert(work_flag == -1 ||
221  static_cast<std::size_t>(work_flag) <= real_work.size(),
222  ExcInternalError());
223 
224  gesdd(&job,
225  &n_rows,
226  &n_cols,
227  matrix.data(),
228  &n_rows,
229  singular_values.data(),
230  left_vectors.data(),
231  &n_rows,
232  right_vectors.data(),
233  &n_cols,
234  work.data(),
235  &work_flag,
236  real_work.data(),
237  integer_work.data(),
238  &info);
239  }
240  } // namespace LAPACKFullMatrixImplementation
241 } // namespace internal
242 
243 template <typename number>
245  : TransposeTable<number>(n, n)
246  , state(matrix)
247  , property(general)
248 {}
249 
250 
251 template <typename number>
253  : TransposeTable<number>(m, n)
254  , state(matrix)
255  , property(general)
256 {}
257 
258 
259 template <typename number>
261  : TransposeTable<number>(M)
262  , state(matrix)
263  , property(general)
264 {}
265 
266 
267 template <typename number>
270 {
272  state = M.state;
273  property = M.property;
274  return *this;
275 }
276 
277 
278 
279 template <typename number>
280 void
282 {
283  this->TransposeTable<number>::reinit(n, n);
284  state = LAPACKSupport::matrix;
285 }
286 
287 
288 
289 template <typename number>
290 void
292 {
293  const size_type s = std::min(std::min(this->m(), n), this->n());
294  TransposeTable<number> copy(std::move(*this));
295  this->TransposeTable<number>::reinit(n, n);
296  for (size_type i = 0; i < s; ++i)
297  for (size_type j = 0; j < s; ++j)
298  (*this)(i, j) = copy(i, j);
299 }
300 
301 
302 
303 template <typename number>
304 void
306  const std::array<number, 3> &csr,
307  const size_type i,
308  const size_type k,
309  const bool left)
310 {
311  auto &A = *this;
312  // see Golub 2013 "Matrix computations", p241 5.1.9 Applying Givens
313  // Rotations but note the difference in notation, namely the sign of s: we
314  // have G * A, where G[1,1] = s
315  if (left)
316  {
317  for (size_type j = 0; j < A.n(); ++j)
318  {
319  const number t = A(i, j);
320  A(i, j) = csr[0] * A(i, j) + csr[1] * A(k, j);
321  A(k, j) = -csr[1] * t + csr[0] * A(k, j);
322  }
323  }
324  else
325  {
326  for (size_type j = 0; j < A.m(); ++j)
327  {
328  const number t = A(j, i);
329  A(j, i) = csr[0] * A(j, i) + csr[1] * A(j, k);
330  A(j, k) = -csr[1] * t + csr[0] * A(j, k);
331  }
332  }
333 }
334 
335 
336 
337 template <typename number>
338 void
340  const size_type col)
341 {
342  AssertIndexRange(row, this->m());
343  AssertIndexRange(col, this->n());
344 
345  const size_type nrows = this->m() - 1;
346  const size_type ncols = this->n() - 1;
347 
348  TransposeTable<number> copy(std::move(*this));
349  this->TransposeTable<number>::reinit(nrows, ncols);
350 
351  for (size_type j = 0; j < ncols; ++j)
352  {
353  const size_type jj = (j < col ? j : j + 1);
354  for (size_type i = 0; i < nrows; ++i)
355  {
356  const size_type ii = (i < row ? i : i + 1);
357  (*this)(i, j) = copy(ii, jj);
358  }
359  }
360 }
361 
362 
363 
364 template <typename number>
365 void
367 {
368  this->TransposeTable<number>::reinit(m, n);
369  state = LAPACKSupport::matrix;
370 }
371 
372 
373 template <typename number>
374 template <typename number2>
377 {
378  Assert(this->m() == M.m(), ExcDimensionMismatch(this->m(), M.m()));
379  Assert(this->n() == M.n(), ExcDimensionMismatch(this->n(), M.n()));
380  for (size_type i = 0; i < this->m(); ++i)
381  for (size_type j = 0; j < this->n(); ++j)
382  (*this)(i, j) = M(i, j);
383 
384  state = LAPACKSupport::matrix;
385  property = LAPACKSupport::general;
386  return *this;
387 }
388 
389 
390 template <typename number>
391 template <typename number2>
394 {
395  Assert(this->m() == M.n(), ExcDimensionMismatch(this->m(), M.n()));
396  Assert(this->n() == M.m(), ExcDimensionMismatch(this->n(), M.m()));
397  for (size_type i = 0; i < this->m(); ++i)
398  for (size_type j = 0; j < this->n(); ++j)
399  (*this)(i, j) = M.el(i, j);
400 
401  state = LAPACKSupport::matrix;
402  property = LAPACKSupport::general;
403  return *this;
404 }
405 
406 
407 template <typename number>
410 {
411  (void)d;
412  Assert(d == number(0), ExcScalarAssignmentOnlyForZeroValue());
413 
414  if (this->n_elements() != 0)
415  this->reset_values();
416 
417  state = LAPACKSupport::matrix;
418  return *this;
419 }
420 
421 
422 template <typename number>
425 {
426  Assert(state == LAPACKSupport::matrix ||
428  ExcState(state));
429 
430  AssertIsFinite(factor);
431  const char type = 'G';
432  const number cfrom = 1.;
433  const types::blas_int m = this->m();
434  const types::blas_int n = this->n();
435  const types::blas_int lda = this->m();
436  types::blas_int info = 0;
437  // kl and ku will not be referenced for type = G (dense matrices).
438  const types::blas_int kl = 0;
439  number * values = this->values.data();
440 
441  lascl(&type, &kl, &kl, &cfrom, &factor, &m, &n, values, &lda, &info);
442 
443  // Negative return value implies a wrong argument. This should be internal.
444  Assert(info >= 0, ExcInternalError());
445 
446  return *this;
447 }
448 
449 
450 template <typename number>
453 {
454  Assert(state == LAPACKSupport::matrix ||
456  ExcState(state));
457 
458  AssertIsFinite(factor);
459  Assert(factor != number(0.), ExcZero());
460 
461  const char type = 'G';
462  const number cto = 1.;
463  const types::blas_int m = this->m();
464  const types::blas_int n = this->n();
465  const types::blas_int lda = this->m();
466  types::blas_int info = 0;
467  // kl and ku will not be referenced for type = G (dense matrices).
468  const types::blas_int kl = 0;
469  number * values = this->values.data();
470 
471  lascl(&type, &kl, &kl, &factor, &cto, &m, &n, values, &lda, &info);
472 
473  // Negative return value implies a wrong argument. This should be internal.
474  Assert(info >= 0, ExcInternalError());
475 
476  return *this;
477 }
478 
479 
480 
481 template <typename number>
482 void
484 {
485  Assert(state == LAPACKSupport::matrix ||
487  ExcState(state));
488 
489  Assert(m() == A.m(), ExcDimensionMismatch(m(), A.m()));
490  Assert(n() == A.n(), ExcDimensionMismatch(n(), A.n()));
491 
492  AssertIsFinite(a);
493 
494  // BLAS does not offer functions to add matrices.
495  // LapackFullMatrix is stored in contiguous array
496  // ==> use BLAS 1 for adding vectors
497  const types::blas_int n = this->m() * this->n();
498  const types::blas_int inc = 1;
499  number * values = this->values.data();
500  const number * values_A = A.values.data();
501 
502  axpy(&n, &a, values_A, &inc, values, &inc);
503 }
504 
505 
506 
507 namespace
508 {
509  template <typename number>
510  void
511  cholesky_rank1(LAPACKFullMatrix<number> &A,
512  const number a,
513  const Vector<number> & v)
514  {
515  const typename LAPACKFullMatrix<number>::size_type N = A.n();
516  Vector<number> z(v);
517  // Cholesky update / downdate, see
518  // 6.5.4 Cholesky Updating and Downdating, Golub 2013 Matrix computations
519  // Note that potrf() is called with LAPACKSupport::L , so the
520  // factorization is stored in lower triangular part.
521  // Also see discussion here
522  // http://icl.cs.utk.edu/lapack-forum/viewtopic.php?f=2&t=2646
523  if (a > 0.)
524  {
525  // simple update via a sequence of Givens rotations.
526  // Observe that
527  //
528  // | L^T |T | L^T |
529  // A <-- | | | | = L L^T + z z^T
530  // | z^T | | z^T |
531  //
532  // so we can get updated factor by doing a sequence of Givens
533  // rotations to make the matrix lower-triangular
534  // Also see LINPACK's dchud http://www.netlib.org/linpack/dchud.f
535  z *= std::sqrt(a);
536  for (typename LAPACKFullMatrix<number>::size_type k = 0; k < N; ++k)
537  {
538  const std::array<number, 3> csr =
540  A(k, k) = csr[2];
541  for (typename LAPACKFullMatrix<number>::size_type i = k + 1; i < N;
542  ++i)
543  {
544  const number t = A(i, k);
545  A(i, k) = csr[0] * A(i, k) + csr[1] * z(i);
546  z(i) = -csr[1] * t + csr[0] * z(i);
547  }
548  }
549  }
550  else
551  {
552  // downdating is not always possible as we may end up with
553  // negative definite matrix. If it's possible, then it boils
554  // down to application of hyperbolic rotations.
555  // Observe that
556  //
557  // | L^T |T | L^T |
558  // A <-- | | S | | = L L^T - z z^T
559  // | z^T | | z^T |
560  //
561  // |In 0 |
562  // S := | |
563  // |0 -1 |
564  //
565  // We are looking for H which is S-orthogonal (HSH^T=S) and
566  // can restore upper-triangular factor of the factorization of A above.
567  // We will use Hyperbolic rotations to do the job
568  //
569  // | c -s | | x1 | | r |
570  // | | = | | = | |, c^2 - s^2 = 1
571  // |-s c | | x2 | | 0 |
572  //
573  // which have real solution only if x2 <= x1.
574  // See also Linpack's http://www.netlib.org/linpack/dchdd.f and
575  // https://infoscience.epfl.ch/record/161468/files/cholupdate.pdf and
576  // "Analysis of a recursive Least Squares Hyperbolic Rotation Algorithm
577  // for Signal Processing", Alexander, Pan, Plemmons, 1988.
578  z *= std::sqrt(-a);
579  for (typename LAPACKFullMatrix<number>::size_type k = 0; k < N; ++k)
580  {
581  const std::array<number, 3> csr =
583  A(k, k) = csr[2];
584  for (typename LAPACKFullMatrix<number>::size_type i = k + 1; i < N;
585  ++i)
586  {
587  const number t = A(i, k);
588  A(i, k) = csr[0] * A(i, k) - csr[1] * z(i);
589  z(i) = -csr[1] * t + csr[0] * z(i);
590  }
591  }
592  }
593  }
594 
595 
596  template <typename number>
597  void
598  cholesky_rank1(LAPACKFullMatrix<std::complex<number>> & /*A*/,
599  const std::complex<number> /*a*/,
600  const Vector<std::complex<number>> & /*v*/)
601  {
602  AssertThrow(false, ExcNotImplemented());
603  }
604 } // namespace
605 
606 
607 
608 template <typename number>
609 void
611 {
612  Assert(property == LAPACKSupport::symmetric, ExcProperty(property));
613 
614  Assert(n() == m(), ExcInternalError());
615  Assert(m() == v.size(), ExcDimensionMismatch(m(), v.size()));
616 
617  AssertIsFinite(a);
618 
619  if (state == LAPACKSupport::matrix)
620  {
621  {
622  const types::blas_int N = this->m();
623  const char uplo = LAPACKSupport::U;
624  const types::blas_int lda = N;
625  const types::blas_int incx = 1;
626 
627  syr(&uplo, &N, &a, v.begin(), &incx, this->values.begin(), &lda);
628  }
629 
630  const size_type N = this->m();
631  // FIXME: we should really only update upper or lower triangular parts
632  // of symmetric matrices and make sure the interface is consistent,
633  // for example operator(i,j) gives correct results regardless of storage.
634  for (size_type i = 0; i < N; ++i)
635  for (size_type j = 0; j < i; ++j)
636  (*this)(i, j) = (*this)(j, i);
637  }
638  else if (state == LAPACKSupport::cholesky)
639  {
640  cholesky_rank1(*this, a, v);
641  }
642  else
643  AssertThrow(false, ExcState(state));
644 }
645 
646 
647 
648 template <typename number>
649 void
651  const Vector<number> &v,
652  const bool adding) const
653 {
654  const types::blas_int mm = this->m();
655  const types::blas_int nn = this->n();
656  const number alpha = 1.;
657  const number beta = (adding ? 1. : 0.);
658  const number null = 0.;
659 
660  // use trmv for triangular matrices
661  if ((property == upper_triangular || property == lower_triangular) &&
662  (mm == nn) && state == matrix)
663  {
664  Assert(adding == false, ExcNotImplemented());
665 
666  AssertDimension(v.size(), this->n());
667  AssertDimension(w.size(), this->m());
668 
669  const char diag = 'N';
670  const char trans = 'N';
671  const char uplo =
673 
674  w = v;
675 
676  const types::blas_int N = mm;
677  const types::blas_int lda = N;
678  const types::blas_int incx = 1;
679 
680  trmv(
681  &uplo, &trans, &diag, &N, this->values.data(), &lda, w.data(), &incx);
682 
683  return;
684  }
685 
686  switch (state)
687  {
688  case matrix:
689  case inverse_matrix:
690  {
691  AssertDimension(v.size(), this->n());
692  AssertDimension(w.size(), this->m());
693 
694  gemv("N",
695  &mm,
696  &nn,
697  &alpha,
698  this->values.data(),
699  &mm,
700  v.data(),
701  &one,
702  &beta,
703  w.data(),
704  &one);
705  break;
706  }
707  case svd:
708  {
709  std::lock_guard<std::mutex> lock(mutex);
710  AssertDimension(v.size(), this->n());
711  AssertDimension(w.size(), this->m());
712  // Compute V^T v
713  work.resize(std::max(mm, nn));
714  gemv("N",
715  &nn,
716  &nn,
717  &alpha,
718  svd_vt->values.data(),
719  &nn,
720  v.data(),
721  &one,
722  &null,
723  work.data(),
724  &one);
725  // Multiply by singular values
726  for (size_type i = 0; i < wr.size(); ++i)
727  work[i] *= wr[i];
728  // Multiply with U
729  gemv("N",
730  &mm,
731  &mm,
732  &alpha,
733  svd_u->values.data(),
734  &mm,
735  work.data(),
736  &one,
737  &beta,
738  w.data(),
739  &one);
740  break;
741  }
742  case inverse_svd:
743  {
744  std::lock_guard<std::mutex> lock(mutex);
745  AssertDimension(w.size(), this->n());
746  AssertDimension(v.size(), this->m());
747  // Compute U^T v
748  work.resize(std::max(mm, nn));
749  gemv("T",
750  &mm,
751  &mm,
752  &alpha,
753  svd_u->values.data(),
754  &mm,
755  v.data(),
756  &one,
757  &null,
758  work.data(),
759  &one);
760  // Multiply by singular values
761  for (size_type i = 0; i < wr.size(); ++i)
762  work[i] *= wr[i];
763  // Multiply with V
764  gemv("T",
765  &nn,
766  &nn,
767  &alpha,
768  svd_vt->values.data(),
769  &nn,
770  work.data(),
771  &one,
772  &beta,
773  w.data(),
774  &one);
775  break;
776  }
777  default:
778  Assert(false, ExcState(state));
779  }
780 }
781 
782 
783 template <typename number>
784 void
786  const Vector<number> &v,
787  const bool adding) const
788 {
789  const types::blas_int mm = this->m();
790  const types::blas_int nn = this->n();
791  const number alpha = 1.;
792  const number beta = (adding ? 1. : 0.);
793  const number null = 0.;
794 
795  // use trmv for triangular matrices
796  if ((property == upper_triangular || property == lower_triangular) &&
797  (mm == nn) && state == matrix)
798  {
799  Assert(adding == false, ExcNotImplemented());
800 
801  AssertDimension(v.size(), this->n());
802  AssertDimension(w.size(), this->m());
803 
804  const char diag = 'N';
805  const char trans = 'T';
806  const char uplo =
808 
809  w = v;
810 
811  const types::blas_int N = mm;
812  const types::blas_int lda = N;
813  const types::blas_int incx = 1;
814 
815  trmv(
816  &uplo, &trans, &diag, &N, this->values.data(), &lda, w.data(), &incx);
817 
818  return;
819  }
820 
821 
822  switch (state)
823  {
824  case matrix:
825  case inverse_matrix:
826  {
827  AssertDimension(w.size(), this->n());
828  AssertDimension(v.size(), this->m());
829 
830  gemv("T",
831  &mm,
832  &nn,
833  &alpha,
834  this->values.data(),
835  &mm,
836  v.data(),
837  &one,
838  &beta,
839  w.data(),
840  &one);
841  break;
842  }
843  case svd:
844  {
845  std::lock_guard<std::mutex> lock(mutex);
846  AssertDimension(w.size(), this->n());
847  AssertDimension(v.size(), this->m());
848 
849  // Compute U^T v
850  work.resize(std::max(mm, nn));
851  gemv("T",
852  &mm,
853  &mm,
854  &alpha,
855  svd_u->values.data(),
856  &mm,
857  v.data(),
858  &one,
859  &null,
860  work.data(),
861  &one);
862  // Multiply by singular values
863  for (size_type i = 0; i < wr.size(); ++i)
864  work[i] *= wr[i];
865  // Multiply with V
866  gemv("T",
867  &nn,
868  &nn,
869  &alpha,
870  svd_vt->values.data(),
871  &nn,
872  work.data(),
873  &one,
874  &beta,
875  w.data(),
876  &one);
877  break;
878  }
879  case inverse_svd:
880  {
881  std::lock_guard<std::mutex> lock(mutex);
882  AssertDimension(v.size(), this->n());
883  AssertDimension(w.size(), this->m());
884 
885  // Compute V^T v
886  work.resize(std::max(mm, nn));
887  gemv("N",
888  &nn,
889  &nn,
890  &alpha,
891  svd_vt->values.data(),
892  &nn,
893  v.data(),
894  &one,
895  &null,
896  work.data(),
897  &one);
898  // Multiply by singular values
899  for (size_type i = 0; i < wr.size(); ++i)
900  work[i] *= wr[i];
901  // Multiply with U
902  gemv("N",
903  &mm,
904  &mm,
905  &alpha,
906  svd_u->values.data(),
907  &mm,
908  work.data(),
909  &one,
910  &beta,
911  w.data(),
912  &one);
913  break;
914  }
915  default:
916  Assert(false, ExcState(state));
917  }
918 }
919 
920 
921 template <typename number>
922 void
924  const Vector<number> &v) const
925 {
926  vmult(w, v, true);
927 }
928 
929 
930 template <typename number>
931 void
933  const Vector<number> &v) const
934 {
935  Tvmult(w, v, true);
936 }
937 
938 
939 template <typename number>
940 void
942  const LAPACKFullMatrix<number> &B,
943  const bool adding) const
944 {
945  Assert(state == matrix || state == inverse_matrix, ExcState(state));
947  Assert(C.state == matrix || C.state == inverse_matrix, ExcState(C.state));
948  Assert(this->n() == B.m(), ExcDimensionMismatch(this->n(), B.m()));
949  Assert(C.n() == B.n(), ExcDimensionMismatch(C.n(), B.n()));
950  Assert(C.m() == this->m(), ExcDimensionMismatch(this->m(), C.m()));
951  const types::blas_int mm = this->m();
952  const types::blas_int nn = B.n();
953  const types::blas_int kk = this->n();
954  const number alpha = 1.;
955  const number beta = (adding ? 1. : 0.);
956 
957  gemm("N",
958  "N",
959  &mm,
960  &nn,
961  &kk,
962  &alpha,
963  this->values.data(),
964  &mm,
965  B.values.data(),
966  &kk,
967  &beta,
968  C.values.data(),
969  &mm);
970 }
971 
972 
973 template <typename number>
974 void
976  const LAPACKFullMatrix<number> &B,
977  const bool adding) const
978 {
979  Assert(state == matrix || state == inverse_matrix, ExcState(state));
981  Assert(this->n() == B.m(), ExcDimensionMismatch(this->n(), B.m()));
982  Assert(C.n() == B.n(), ExcDimensionMismatch(C.n(), B.n()));
983  Assert(C.m() == this->m(), ExcDimensionMismatch(this->m(), C.m()));
984  const types::blas_int mm = this->m();
985  const types::blas_int nn = B.n();
986  const types::blas_int kk = this->n();
987  const number alpha = 1.;
988  const number beta = (adding ? 1. : 0.);
989 
990  // since FullMatrix stores the matrix in transposed order compared to this
991  // matrix, compute B^T * A^T = (A * B)^T
992  gemm("T",
993  "T",
994  &nn,
995  &mm,
996  &kk,
997  &alpha,
998  B.values.data(),
999  &kk,
1000  this->values.data(),
1001  &mm,
1002  &beta,
1003  &C(0, 0),
1004  &nn);
1005 }
1006 
1007 
1008 
1009 template <typename number>
1010 void
1012  const LAPACKFullMatrix<number> &B,
1013  const Vector<number> & V,
1014  const bool adding) const
1015 {
1016  Assert(state == matrix || state == inverse_matrix, ExcState(state));
1017  Assert(B.state == matrix || B.state == inverse_matrix, ExcState(B.state));
1018  Assert(C.state == matrix || C.state == inverse_matrix, ExcState(C.state));
1019 
1020  const LAPACKFullMatrix<number> &A = *this;
1021 
1022  Assert(A.m() == B.m(), ExcDimensionMismatch(A.m(), B.m()));
1023  Assert(C.n() == B.n(), ExcDimensionMismatch(C.n(), B.n()));
1024  Assert(C.m() == A.n(), ExcDimensionMismatch(A.n(), C.m()));
1025  Assert(V.size() == A.m(), ExcDimensionMismatch(A.m(), V.size()));
1026 
1027  const types::blas_int mm = A.n();
1028  const types::blas_int nn = B.n();
1029  const types::blas_int kk = B.m();
1030 
1031  // lapack does not have any triple product routines, including the case of
1032  // diagonal matrix in the middle, see
1033  // https://stackoverflow.com/questions/3548069/multiplying-three-matrices-in-blas-with-the-middle-one-being-diagonal
1034  // http://mathforum.org/kb/message.jspa?messageID=3546564
1035 
1036  std::lock_guard<std::mutex> lock(mutex);
1037  // First, get V*B into "work" array
1038  work.resize(kk * nn);
1039  // following http://icl.cs.utk.edu/lapack-forum/viewtopic.php?f=2&t=768#p2577
1040  // do left-multiplication manually. Note that Xscal would require to first
1041  // copy the input vector as multiplication is done inplace.
1042  for (types::blas_int j = 0; j < nn; ++j)
1043  for (types::blas_int i = 0; i < kk; ++i)
1044  {
1045  Assert(j * kk + i < static_cast<types::blas_int>(work.size()),
1046  ExcInternalError());
1047  work[j * kk + i] = V(i) * B(i, j);
1048  }
1049 
1050  // Now do the standard Tmmult:
1051  const number alpha = 1.;
1052  const number beta = (adding ? 1. : 0.);
1053 
1054  gemm("T",
1055  "N",
1056  &mm,
1057  &nn,
1058  &kk,
1059  &alpha,
1060  this->values.data(),
1061  &kk,
1062  work.data(),
1063  &kk,
1064  &beta,
1065  C.values.data(),
1066  &mm);
1067 }
1068 
1069 
1070 
1071 template <typename number>
1072 void
1074 {
1075  const LAPACKFullMatrix<number> &A = *this;
1076  AssertDimension(A.m(), B.n());
1077  AssertDimension(A.n(), B.m());
1078  const types::blas_int m = B.m();
1079  const types::blas_int n = B.n();
1080 #ifdef DEAL_II_LAPACK_WITH_MKL
1081  const number one = 1.;
1082  omatcopy('C', 'C', n, m, one, A.values.data(), n, B.values.data(), m);
1083 #else
1084  for (types::blas_int i = 0; i < m; ++i)
1085  for (types::blas_int j = 0; j < n; ++j)
1086  B(i, j) = numbers::NumberTraits<number>::conjugate(A(j, i));
1087 #endif
1088 }
1089 
1090 
1091 template <typename number>
1092 void
1094 {
1095  LAPACKFullMatrix<number> &A = *this;
1096  Assert(state == matrix || state == inverse_matrix, ExcState(state));
1097  Assert(V.size() == A.m(), ExcDimensionMismatch(A.m(), V.size()));
1098 
1099  const types::blas_int nn = A.n();
1100  const types::blas_int kk = A.m();
1101  for (types::blas_int j = 0; j < nn; ++j)
1102  for (types::blas_int i = 0; i < kk; ++i)
1103  A(i, j) *= V(i);
1104 }
1105 
1106 
1107 
1108 template <typename number>
1109 void
1111  const LAPACKFullMatrix<number> &B,
1112  const bool adding) const
1113 {
1114  Assert(state == matrix || state == inverse_matrix, ExcState(state));
1115  Assert(B.state == matrix || B.state == inverse_matrix, ExcState(B.state));
1116  Assert(C.state == matrix || C.state == inverse_matrix, ExcState(C.state));
1117  Assert(this->m() == B.m(), ExcDimensionMismatch(this->m(), B.m()));
1118  Assert(C.n() == B.n(), ExcDimensionMismatch(C.n(), B.n()));
1119  Assert(C.m() == this->n(), ExcDimensionMismatch(this->n(), C.m()));
1120  const types::blas_int mm = this->n();
1121  const types::blas_int nn = B.n();
1122  const types::blas_int kk = B.m();
1123  const number alpha = 1.;
1124  const number beta = (adding ? 1. : 0.);
1125 
1126  if (PointerComparison::equal(this, &B))
1127  {
1128  syrk(&LAPACKSupport::U,
1129  "T",
1130  &nn,
1131  &kk,
1132  &alpha,
1133  this->values.data(),
1134  &kk,
1135  &beta,
1136  C.values.data(),
1137  &nn);
1138 
1139  // fill-in lower triangular part
1140  for (types::blas_int j = 0; j < nn; ++j)
1141  for (types::blas_int i = 0; i < j; ++i)
1142  C(j, i) = C(i, j);
1143 
1144  C.property = symmetric;
1145  }
1146  else
1147  {
1148  gemm("T",
1149  "N",
1150  &mm,
1151  &nn,
1152  &kk,
1153  &alpha,
1154  this->values.data(),
1155  &kk,
1156  B.values.data(),
1157  &kk,
1158  &beta,
1159  C.values.data(),
1160  &mm);
1161  }
1162 }
1163 
1164 
1165 template <typename number>
1166 void
1168  const LAPACKFullMatrix<number> &B,
1169  const bool adding) const
1170 {
1171  Assert(state == matrix || state == inverse_matrix, ExcState(state));
1172  Assert(B.state == matrix || B.state == inverse_matrix, ExcState(B.state));
1173  Assert(this->m() == B.m(), ExcDimensionMismatch(this->m(), B.m()));
1174  Assert(C.n() == B.n(), ExcDimensionMismatch(C.n(), B.n()));
1175  Assert(C.m() == this->n(), ExcDimensionMismatch(this->n(), C.m()));
1176  const types::blas_int mm = this->n();
1177  const types::blas_int nn = B.n();
1178  const types::blas_int kk = B.m();
1179  const number alpha = 1.;
1180  const number beta = (adding ? 1. : 0.);
1181 
1182  // since FullMatrix stores the matrix in transposed order compared to this
1183  // matrix, compute B^T * A = (A^T * B)^T
1184  gemm("T",
1185  "N",
1186  &nn,
1187  &mm,
1188  &kk,
1189  &alpha,
1190  B.values.data(),
1191  &kk,
1192  this->values.data(),
1193  &kk,
1194  &beta,
1195  &C(0, 0),
1196  &nn);
1197 }
1198 
1199 
1200 template <typename number>
1201 void
1203  const LAPACKFullMatrix<number> &B,
1204  const bool adding) const
1205 {
1206  Assert(state == matrix || state == inverse_matrix, ExcState(state));
1207  Assert(B.state == matrix || B.state == inverse_matrix, ExcState(B.state));
1208  Assert(C.state == matrix || C.state == inverse_matrix, ExcState(C.state));
1209  Assert(this->n() == B.n(), ExcDimensionMismatch(this->n(), B.n()));
1210  Assert(C.n() == B.m(), ExcDimensionMismatch(C.n(), B.m()));
1211  Assert(C.m() == this->m(), ExcDimensionMismatch(this->m(), C.m()));
1212  const types::blas_int mm = this->m();
1213  const types::blas_int nn = B.m();
1214  const types::blas_int kk = B.n();
1215  const number alpha = 1.;
1216  const number beta = (adding ? 1. : 0.);
1217 
1218  if (PointerComparison::equal(this, &B))
1219  {
1220  syrk(&LAPACKSupport::U,
1221  "N",
1222  &nn,
1223  &kk,
1224  &alpha,
1225  this->values.data(),
1226  &nn,
1227  &beta,
1228  C.values.data(),
1229  &nn);
1230 
1231  // fill-in lower triangular part
1232  for (types::blas_int j = 0; j < nn; ++j)
1233  for (types::blas_int i = 0; i < j; ++i)
1234  C(j, i) = C(i, j);
1235 
1236  C.property = symmetric;
1237  }
1238  else
1239  {
1240  gemm("N",
1241  "T",
1242  &mm,
1243  &nn,
1244  &kk,
1245  &alpha,
1246  this->values.data(),
1247  &mm,
1248  B.values.data(),
1249  &nn,
1250  &beta,
1251  C.values.data(),
1252  &mm);
1253  }
1254 }
1255 
1256 
1257 
1258 template <typename number>
1259 void
1261  const LAPACKFullMatrix<number> &B,
1262  const bool adding) const
1263 {
1264  Assert(state == matrix || state == inverse_matrix, ExcState(state));
1265  Assert(B.state == matrix || B.state == inverse_matrix, ExcState(B.state));
1266  Assert(this->n() == B.n(), ExcDimensionMismatch(this->n(), B.n()));
1267  Assert(C.n() == B.m(), ExcDimensionMismatch(C.n(), B.m()));
1268  Assert(C.m() == this->m(), ExcDimensionMismatch(this->m(), C.m()));
1269  const types::blas_int mm = this->m();
1270  const types::blas_int nn = B.m();
1271  const types::blas_int kk = B.n();
1272  const number alpha = 1.;
1273  const number beta = (adding ? 1. : 0.);
1274 
1275  // since FullMatrix stores the matrix in transposed order compared to this
1276  // matrix, compute B * A^T = (A * B^T)^T
1277  gemm("N",
1278  "T",
1279  &nn,
1280  &mm,
1281  &kk,
1282  &alpha,
1283  B.values.data(),
1284  &nn,
1285  this->values.data(),
1286  &mm,
1287  &beta,
1288  &C(0, 0),
1289  &nn);
1290 }
1291 
1292 
1293 template <typename number>
1294 void
1296  const LAPACKFullMatrix<number> &B,
1297  const bool adding) const
1298 {
1299  Assert(state == matrix || state == inverse_matrix, ExcState(state));
1300  Assert(B.state == matrix || B.state == inverse_matrix, ExcState(B.state));
1301  Assert(C.state == matrix || C.state == inverse_matrix, ExcState(C.state));
1302  Assert(this->m() == B.n(), ExcDimensionMismatch(this->m(), B.n()));
1303  Assert(C.n() == B.m(), ExcDimensionMismatch(C.n(), B.m()));
1304  Assert(C.m() == this->n(), ExcDimensionMismatch(this->n(), C.m()));
1305  const types::blas_int mm = this->n();
1306  const types::blas_int nn = B.m();
1307  const types::blas_int kk = B.n();
1308  const number alpha = 1.;
1309  const number beta = (adding ? 1. : 0.);
1310 
1311  gemm("T",
1312  "T",
1313  &mm,
1314  &nn,
1315  &kk,
1316  &alpha,
1317  this->values.data(),
1318  &kk,
1319  B.values.data(),
1320  &nn,
1321  &beta,
1322  C.values.data(),
1323  &mm);
1324 }
1325 
1326 
1327 template <typename number>
1328 void
1330  const LAPACKFullMatrix<number> &B,
1331  const bool adding) const
1332 {
1333  Assert(state == matrix || state == inverse_matrix, ExcState(state));
1334  Assert(B.state == matrix || B.state == inverse_matrix, ExcState(B.state));
1335  Assert(this->m() == B.n(), ExcDimensionMismatch(this->m(), B.n()));
1336  Assert(C.n() == B.m(), ExcDimensionMismatch(C.n(), B.m()));
1337  Assert(C.m() == this->n(), ExcDimensionMismatch(this->n(), C.m()));
1338  const types::blas_int mm = this->n();
1339  const types::blas_int nn = B.m();
1340  const types::blas_int kk = B.n();
1341  const number alpha = 1.;
1342  const number beta = (adding ? 1. : 0.);
1343 
1344  // since FullMatrix stores the matrix in transposed order compared to this
1345  // matrix, compute B * A = (A^T * B^T)^T
1346  gemm("N",
1347  "N",
1348  &nn,
1349  &mm,
1350  &kk,
1351  &alpha,
1352  B.values.data(),
1353  &nn,
1354  this->values.data(),
1355  &kk,
1356  &beta,
1357  &C(0, 0),
1358  &nn);
1359 }
1360 
1361 
1362 template <typename number>
1363 void
1365 {
1366  Assert(state == matrix, ExcState(state));
1367  state = LAPACKSupport::unusable;
1368 
1369  const types::blas_int mm = this->m();
1370  const types::blas_int nn = this->n();
1371  number *const values = this->values.data();
1372  ipiv.resize(mm);
1373  types::blas_int info = 0;
1374  getrf(&mm, &nn, values, &mm, ipiv.data(), &info);
1375 
1376  Assert(info >= 0, ExcInternalError());
1377 
1378  // if info >= 0, the factorization has been completed
1379  state = lu;
1380 
1382 }
1383 
1384 
1385 
1386 template <typename number>
1387 void
1389 {
1390  property = p;
1391 }
1392 
1393 
1394 
1395 template <typename number>
1396 number
1398 {
1399  const char type('O');
1400  return norm(type);
1401 }
1402 
1403 
1404 
1405 template <typename number>
1406 number
1408 {
1409  const char type('I');
1410  return norm(type);
1411 }
1412 
1413 
1414 
1415 template <typename number>
1416 number
1418 {
1419  const char type('F');
1420  return norm(type);
1421 }
1422 
1423 
1424 
1425 template <typename number>
1426 number
1427 LAPACKFullMatrix<number>::norm(const char type) const
1428 {
1429  std::lock_guard<std::mutex> lock(mutex);
1430 
1431  Assert(state == LAPACKSupport::matrix ||
1433  ExcMessage("norms can be called in matrix state only."));
1434 
1435  const types::blas_int N = this->n();
1436  const types::blas_int M = this->m();
1437  const number *const values = this->values.data();
1438  if (property == symmetric)
1439  {
1440  const types::blas_int lda = std::max<types::blas_int>(1, N);
1441  const types::blas_int lwork =
1442  (type == 'I' || type == 'O') ? std::max<types::blas_int>(1, N) : 0;
1443  work.resize(lwork);
1444  return lansy(&type, &LAPACKSupport::L, &N, values, &lda, work.data());
1445  }
1446  else
1447  {
1448  const types::blas_int lda = std::max<types::blas_int>(1, M);
1449  const types::blas_int lwork =
1450  (type == 'I') ? std::max<types::blas_int>(1, M) : 0;
1451  work.resize(lwork);
1452  return lange(&type, &M, &N, values, &lda, work.data());
1453  }
1454 }
1455 
1456 
1457 
1458 template <typename number>
1459 number
1461 {
1462  Assert(state == LAPACKSupport::matrix ||
1464  ExcMessage("Trace can be called in matrix state only."));
1465  Assert(this->n() == this->m(), ExcDimensionMismatch(this->n(), this->m()));
1466 
1467  number tr = 0;
1468  for (size_type i = 0; i < this->m(); ++i)
1469  tr += (*this)(i, i);
1470 
1471  return tr;
1472 }
1473 
1474 
1475 
1476 template <typename number>
1477 void
1479 {
1480  Assert(state == matrix, ExcState(state));
1481  Assert(property == symmetric, ExcProperty(property));
1482  state = LAPACKSupport::unusable;
1483 
1484  const types::blas_int mm = this->m();
1485  const types::blas_int nn = this->n();
1486  (void)mm;
1487  Assert(mm == nn, ExcDimensionMismatch(mm, nn));
1488 
1489  number *const values = this->values.data();
1490  types::blas_int info = 0;
1491  const types::blas_int lda = std::max<types::blas_int>(1, nn);
1492  potrf(&LAPACKSupport::L, &nn, values, &lda, &info);
1493 
1494  // info < 0 : the info-th argument had an illegal value
1495  Assert(info >= 0, ExcInternalError());
1496 
1497  state = cholesky;
1499 }
1500 
1501 
1502 
1503 template <typename number>
1504 number
1506 {
1507  std::lock_guard<std::mutex> lock(mutex);
1508  Assert(state == cholesky, ExcState(state));
1509  number rcond = 0.;
1510 
1511  const types::blas_int N = this->m();
1512  const number * values = this->values.data();
1513  types::blas_int info = 0;
1514  const types::blas_int lda = std::max<types::blas_int>(1, N);
1515  work.resize(3 * N);
1516  iwork.resize(N);
1517 
1518  // use the same uplo as in Cholesky
1519  pocon(&LAPACKSupport::L,
1520  &N,
1521  values,
1522  &lda,
1523  &a_norm,
1524  &rcond,
1525  work.data(),
1526  iwork.data(),
1527  &info);
1528 
1529  Assert(info >= 0, ExcInternalError());
1530 
1531  return rcond;
1532 }
1533 
1534 
1535 
1536 template <typename number>
1537 number
1539 {
1540  std::lock_guard<std::mutex> lock(mutex);
1541  Assert(property == upper_triangular || property == lower_triangular,
1542  ExcProperty(property));
1543  number rcond = 0.;
1544 
1545  const types::blas_int N = this->m();
1546  const number *const values = this->values.data();
1547  types::blas_int info = 0;
1548  const types::blas_int lda = std::max<types::blas_int>(1, N);
1549  work.resize(3 * N);
1550  iwork.resize(N);
1551 
1552  const char norm = '1';
1553  const char diag = 'N';
1554  const char uplo =
1556  trcon(&norm,
1557  &uplo,
1558  &diag,
1559  &N,
1560  values,
1561  &lda,
1562  &rcond,
1563  work.data(),
1564  iwork.data(),
1565  &info);
1566 
1567  Assert(info >= 0, ExcInternalError());
1568 
1569  return rcond;
1570 }
1571 
1572 
1573 
1574 template <typename number>
1575 void
1577 {
1578  Assert(state == matrix, ExcState(state));
1579  state = LAPACKSupport::unusable;
1580 
1581  const types::blas_int mm = this->m();
1582  const types::blas_int nn = this->n();
1583  wr.resize(std::max(mm, nn));
1584  std::fill(wr.begin(), wr.end(), 0.);
1585  ipiv.resize(8 * mm);
1586 
1587  svd_u = std_cxx14::make_unique<LAPACKFullMatrix<number>>(mm, mm);
1588  svd_vt = std_cxx14::make_unique<LAPACKFullMatrix<number>>(nn, nn);
1589  types::blas_int info = 0;
1590 
1591  // First determine optimal workspace size
1592  work.resize(1);
1593  types::blas_int lwork = -1;
1594 
1595  // TODO double check size
1596  std::vector<typename numbers::NumberTraits<number>::real_type> real_work;
1598  {
1599  // This array is only used by the complex versions.
1600  std::size_t min = std::min(this->m(), this->n());
1601  std::size_t max = std::max(this->m(), this->n());
1602  real_work.resize(
1603  std::max(5 * min * min + 5 * min, 2 * max * min + 2 * min * min + min));
1604  }
1605 
1606  // make sure that the first entry in the work array is clear, in case the
1607  // routine does not completely overwrite the memory:
1608  work[0] = number();
1609  internal::LAPACKFullMatrixImplementation::gesdd_helper(LAPACKSupport::A,
1610  mm,
1611  nn,
1612  this->values,
1613  wr,
1614  svd_u->values,
1615  svd_vt->values,
1616  work,
1617  real_work,
1618  ipiv,
1619  lwork,
1620  info);
1621 
1622  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("gesdd", info));
1623  // Resize the work array. Add one to the size computed by LAPACK to be on
1624  // the safe side.
1625  lwork = static_cast<types::blas_int>(std::abs(work[0]) + 1);
1626 
1627  work.resize(lwork);
1628  // Do the actual SVD.
1629  internal::LAPACKFullMatrixImplementation::gesdd_helper(LAPACKSupport::A,
1630  mm,
1631  nn,
1632  this->values,
1633  wr,
1634  svd_u->values,
1635  svd_vt->values,
1636  work,
1637  real_work,
1638  ipiv,
1639  lwork,
1640  info);
1641  AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("gesdd", info));
1642 
1643  work.resize(0);
1644  ipiv.resize(0);
1645 
1646  state = LAPACKSupport::svd;
1647 }
1648 
1649 
1650 template <typename number>
1651 void
1653 {
1654  if (state == LAPACKSupport::matrix)
1655  compute_svd();
1656 
1657  Assert(state == LAPACKSupport::svd, ExcState(state));
1658 
1659  const typename numbers::NumberTraits<number>::real_type one(1.0);
1660  const double lim = std::abs(wr[0]) * threshold;
1661  for (size_type i = 0; i < wr.size(); ++i)
1662  {
1663  if (std::abs(wr[i]) > lim)
1664  wr[i] = one / wr[i];
1665  else
1666  wr[i] = 0.;
1667  }
1669 }
1670 
1671 
1672 
1673 template <typename number>
1674 void
1676  const unsigned int kernel_size)
1677 {
1678  if (state == LAPACKSupport::matrix)
1679  compute_svd();
1680 
1681  Assert(state == LAPACKSupport::svd, ExcState(state));
1682 
1683  const typename numbers::NumberTraits<number>::real_type one(1.0);
1684  const unsigned int n_wr = wr.size();
1685  for (size_type i = 0; i < n_wr - kernel_size; ++i)
1686  wr[i] = one / wr[i];
1687  for (size_type i = n_wr - kernel_size; i < n_wr; ++i)
1688  wr[i] = 0.;
1690 }
1691 
1692 
1693 
1694 template <typename number>
1695 void
1697 {
1698  Assert(state == matrix || state == lu || state == cholesky, ExcState(state));
1699  const types::blas_int mm = this->m();
1700  const types::blas_int nn = this->n();
1701  Assert(nn == mm, ExcNotQuadratic());
1702 
1703  number *const values = this->values.data();
1704  types::blas_int info = 0;
1705 
1706  if (property != symmetric)
1707  {
1708  if (state == matrix)
1709  compute_lu_factorization();
1710 
1711  ipiv.resize(mm);
1712  inv_work.resize(mm);
1713  getri(&mm, values, &mm, ipiv.data(), inv_work.data(), &mm, &info);
1714  }
1715  else
1716  {
1717  if (state == matrix)
1718  compute_cholesky_factorization();
1719 
1720  const types::blas_int lda = std::max<types::blas_int>(1, nn);
1721  potri(&LAPACKSupport::L, &nn, values, &lda, &info);
1722  // inverse is stored in lower diagonal, set the upper diagonal
1723  // appropriately:
1724  for (types::blas_int i = 0; i < nn; ++i)
1725  for (types::blas_int j = i + 1; j < nn; ++j)
1726  this->el(i, j) = this->el(j, i);
1727  }
1728 
1729  Assert(info >= 0, ExcInternalError());
1731 
1732  state = inverse_matrix;
1733 }
1734 
1735 
1736 
1737 template <typename number>
1738 void
1739 LAPACKFullMatrix<number>::solve(Vector<number> &v, const bool transposed) const
1740 {
1741  Assert(this->m() == this->n(), LACExceptions::ExcNotQuadratic());
1742  AssertDimension(this->m(), v.size());
1743  const char * trans = transposed ? &T : &N;
1744  const types::blas_int nn = this->n();
1745  const number *const values = this->values.data();
1746  const types::blas_int n_rhs = 1;
1747  types::blas_int info = 0;
1748 
1749  if (state == lu)
1750  {
1751  getrs(
1752  trans, &nn, &n_rhs, values, &nn, ipiv.data(), v.begin(), &nn, &info);
1753  }
1754  else if (state == cholesky)
1755  {
1756  potrs(&LAPACKSupport::L, &nn, &n_rhs, values, &nn, v.begin(), &nn, &info);
1757  }
1758  else if (property == upper_triangular || property == lower_triangular)
1759  {
1760  const char uplo =
1762 
1763  const types::blas_int lda = nn;
1764  const types::blas_int ldb = nn;
1765  trtrs(
1766  &uplo, trans, "N", &nn, &n_rhs, values, &lda, v.begin(), &ldb, &info);
1767  }
1768  else
1769  {
1770  Assert(false,
1771  ExcMessage(
1772  "The matrix has to be either factorized or triangular."));
1773  }
1774 
1775  Assert(info == 0, ExcInternalError());
1776 }
1777 
1778 
1779 
1780 template <typename number>
1781 void
1783  const bool transposed) const
1784 {
1785  Assert(B.state == matrix, ExcState(B.state));
1786 
1787  Assert(this->m() == this->n(), LACExceptions::ExcNotQuadratic());
1788  AssertDimension(this->m(), B.m());
1789  const char * trans = transposed ? &T : &N;
1790  const types::blas_int nn = this->n();
1791  const number *const values = this->values.data();
1792  const types::blas_int n_rhs = B.n();
1793  types::blas_int info = 0;
1794 
1795  if (state == lu)
1796  {
1797  getrs(trans,
1798  &nn,
1799  &n_rhs,
1800  values,
1801  &nn,
1802  ipiv.data(),
1803  B.values.data(),
1804  &nn,
1805  &info);
1806  }
1807  else if (state == cholesky)
1808  {
1809  potrs(&LAPACKSupport::L,
1810  &nn,
1811  &n_rhs,
1812  values,
1813  &nn,
1814  B.values.data(),
1815  &nn,
1816  &info);
1817  }
1818  else if (property == upper_triangular || property == lower_triangular)
1819  {
1820  const char uplo =
1822 
1823  const types::blas_int lda = nn;
1824  const types::blas_int ldb = nn;
1825  trtrs(&uplo,
1826  trans,
1827  "N",
1828  &nn,
1829  &n_rhs,
1830  values,
1831  &lda,
1832  B.values.data(),
1833  &ldb,
1834  &info);
1835  }
1836  else
1837  {
1838  Assert(false,
1839  ExcMessage(
1840  "The matrix has to be either factorized or triangular."));
1841  }
1842 
1843  Assert(info == 0, ExcInternalError());
1844 }
1845 
1846 
1847 
1848 template <typename number>
1849 void
1851  const bool transposed) const
1852 {
1853  solve(v, transposed);
1854 }
1855 
1856 
1857 
1858 template <typename number>
1859 void
1861  const bool transposed) const
1862 {
1863  solve(B, transposed);
1864 }
1865 
1866 
1867 
1868 template <typename number>
1869 number
1871 {
1872  Assert(this->m() == this->n(), LACExceptions::ExcNotQuadratic());
1873 
1874  // LAPACK doesn't offer a function to compute a matrix determinant.
1875  // This is due to the difficulty in maintaining numerical accuracy, as the
1876  // calculations are likely to overflow or underflow. See
1877  // http://www.netlib.org/lapack/faq.html#_are_there_routines_in_lapack_to_compute_determinants
1878  //
1879  // However, after a PLU decomposition one can compute this by multiplication
1880  // of the diagonal entries with one another. One must take into consideration
1881  // the number of permutations (row swaps) stored in the P matrix.
1882  //
1883  // See the implementations in the blaze library (detNxN)
1884  // https://bitbucket.org/blaze-lib/blaze
1885  // and also
1886  // https://dualm.wordpress.com/2012/01/06/computing-determinant-in-fortran/
1887  // http://icl.cs.utk.edu/lapack-forum/viewtopic.php?p=341&#p336
1888  // for further information.
1889  Assert(state == lu, ExcState(state));
1890  Assert(ipiv.size() == this->m(), ExcInternalError());
1891  number det = 1.0;
1892  for (size_type i = 0; i < this->m(); ++i)
1893  det *=
1894  (ipiv[i] == types::blas_int(i + 1) ? this->el(i, i) : -this->el(i, i));
1895  return det;
1896 }
1897 
1898 
1899 template <typename number>
1900 void
1901 LAPACKFullMatrix<number>::compute_eigenvalues(const bool right, const bool left)
1902 {
1903  Assert(state == matrix, ExcState(state));
1904  const types::blas_int nn = this->n();
1905  wr.resize(nn);
1906  wi.resize(nn);
1907  if (right)
1908  vr.resize(nn * nn);
1909  if (left)
1910  vl.resize(nn * nn);
1911 
1912  types::blas_int info = 0;
1913  types::blas_int lwork = 1;
1914  const char jobvr = (right) ? V : N;
1915  const char jobvl = (left) ? V : N;
1916 
1917  /*
1918  * The LAPACK routine xGEEV requires a sufficiently large work array; the
1919  * minimum requirement is
1920  *
1921  * work.size >= 4*nn.
1922  *
1923  * However, for better performance, a larger work array may be needed. The
1924  * first call determines the optimal work size and the second does the work.
1925  */
1926  lwork = -1;
1927  work.resize(1);
1928 
1929  std::vector<typename numbers::NumberTraits<number>::real_type> real_work;
1931  // This array is only used by the complex versions.
1932  real_work.resize(2 * this->m());
1933  internal::LAPACKFullMatrixImplementation::geev_helper(jobvl,
1934  jobvr,
1935  this->values,
1936  this->m(),
1937  wr,
1938  wi,
1939  vl,
1940  vr,
1941  work,
1942  real_work,
1943  lwork,
1944  info);
1945 
1946  // geev returns info=0 on success. Since we only queried the optimal size
1947  // for work, everything else would not be acceptable.
1948  Assert(info == 0, ExcInternalError());
1949  // Allocate working array according to suggestion (same strategy as was
1950  // noted in compute_svd).
1951  lwork = static_cast<types::blas_int>(std::abs(work[0]) + 1);
1952 
1953  // resize workspace array
1954  work.resize(lwork);
1955 
1956  // Finally compute the eigenvalues.
1957  internal::LAPACKFullMatrixImplementation::geev_helper(jobvl,
1958  jobvr,
1959  this->values,
1960  this->m(),
1961  wr,
1962  wi,
1963  vl,
1964  vr,
1965  work,
1966  real_work,
1967  lwork,
1968  info);
1969 
1970  Assert(info >= 0, ExcInternalError());
1971  // TODO:[GK] What if the QR method fails?
1972  if (info != 0)
1973  std::cerr << "LAPACK error in geev" << std::endl;
1974 
1976 }
1977 
1978 
1979 template <typename number>
1980 void
1982  const number lower_bound,
1983  const number upper_bound,
1984  const number abs_accuracy,
1987 {
1988  Assert(state == matrix, ExcState(state));
1989  const types::blas_int nn = (this->n() > 0 ? this->n() : 1);
1990  Assert(static_cast<size_type>(nn) == this->m(), ExcNotQuadratic());
1991 
1992  wr.resize(nn);
1993  LAPACKFullMatrix<number> matrix_eigenvectors(nn, nn);
1994 
1995  number *const values_A = this->values.data();
1996  number *const values_eigenvectors = matrix_eigenvectors.values.data();
1997 
1998  types::blas_int info(0), lwork(-1), n_eigenpairs(0);
1999  const char *const jobz(&V);
2000  const char *const uplo(&U);
2001  const char *const range(&V);
2002  const types::blas_int *const dummy(&one);
2003  std::vector<types::blas_int> iwork(static_cast<size_type>(5 * nn));
2004  std::vector<types::blas_int> ifail(static_cast<size_type>(nn));
2005 
2006 
2007  /*
2008  * The LAPACK routine xSYEVX requires a sufficiently large work array; the
2009  * minimum requirement is
2010  *
2011  * work.size >= 8*nn.
2012  *
2013  * However, for better performance, a larger work array may be needed. The
2014  * first call determines the optimal work size and the second does the work.
2015  */
2016  work.resize(1);
2017 
2018  syevx(jobz,
2019  range,
2020  uplo,
2021  &nn,
2022  values_A,
2023  &nn,
2024  &lower_bound,
2025  &upper_bound,
2026  dummy,
2027  dummy,
2028  &abs_accuracy,
2029  &n_eigenpairs,
2030  wr.data(),
2031  values_eigenvectors,
2032  &nn,
2033  work.data(),
2034  &lwork,
2035  iwork.data(),
2036  ifail.data(),
2037  &info);
2038  // syevx returns info=0 on success. Since we only queried the optimal size
2039  // for work, everything else would not be acceptable.
2040  Assert(info == 0, ExcInternalError());
2041  // Allocate working array according to suggestion (same strategy as was noted
2042  // in compute_svd).
2043  lwork = static_cast<types::blas_int>(std::abs(work[0]) + 1);
2044  work.resize(static_cast<size_type>(lwork));
2045 
2046  // Finally compute the eigenvalues.
2047  syevx(jobz,
2048  range,
2049  uplo,
2050  &nn,
2051  values_A,
2052  &nn,
2053  &lower_bound,
2054  &upper_bound,
2055  dummy,
2056  dummy,
2057  &abs_accuracy,
2058  &n_eigenpairs,
2059  wr.data(),
2060  values_eigenvectors,
2061  &nn,
2062  work.data(),
2063  &lwork,
2064  iwork.data(),
2065  ifail.data(),
2066  &info);
2067 
2068  // Negative return value implies a wrong argument. This should be internal.
2069  Assert(info >= 0, ExcInternalError());
2070  if (info != 0)
2071  std::cerr << "LAPACK error in syevx" << std::endl;
2072 
2073  eigenvalues.reinit(n_eigenpairs);
2074  eigenvectors.reinit(nn, n_eigenpairs, true);
2075 
2076  for (size_type i = 0; i < static_cast<size_type>(n_eigenpairs); ++i)
2077  {
2078  eigenvalues(i) = wr[i];
2079  size_type col_begin(i * nn);
2080  for (size_type j = 0; j < static_cast<size_type>(nn); ++j)
2081  {
2082  eigenvectors(j, i) = values_eigenvectors[col_begin + j];
2083  }
2084  }
2085 
2086  state = LAPACKSupport::State(unusable);
2087 }
2088 
2089 
2090 template <typename number>
2091 void
2094  const number lower_bound,
2095  const number upper_bound,
2096  const number abs_accuracy,
2098  std::vector<Vector<number>> &eigenvectors,
2099  const types::blas_int itype)
2100 {
2101  Assert(state == matrix, ExcState(state));
2102  const types::blas_int nn = (this->n() > 0 ? this->n() : 1);
2103  Assert(static_cast<size_type>(nn) == this->m(), ExcNotQuadratic());
2104  Assert(B.m() == B.n(), ExcNotQuadratic());
2105  Assert(static_cast<size_type>(nn) == B.n(), ExcDimensionMismatch(nn, B.n()));
2106 
2107  wr.resize(nn);
2108  LAPACKFullMatrix<number> matrix_eigenvectors(nn, nn);
2109 
2110  number *const values_A = this->values.data();
2111  number *const values_B = B.values.data();
2112  number *const values_eigenvectors = matrix_eigenvectors.values.data();
2113 
2114  types::blas_int info(0), lwork(-1), n_eigenpairs(0);
2115  const char *const jobz(&V);
2116  const char *const uplo(&U);
2117  const char *const range(&V);
2118  const types::blas_int *const dummy(&one);
2119  iwork.resize(static_cast<size_type>(5 * nn));
2120  std::vector<types::blas_int> ifail(static_cast<size_type>(nn));
2121 
2122 
2123  /*
2124  * The LAPACK routine xSYGVX requires a sufficiently large work array; the
2125  * minimum requirement is
2126  *
2127  * work.size >= 8*nn.
2128  *
2129  * However, for better performance, a larger work array may be needed. The
2130  * first call determines the optimal work size and the second does the work.
2131  */
2132  work.resize(1);
2133 
2134  sygvx(&itype,
2135  jobz,
2136  range,
2137  uplo,
2138  &nn,
2139  values_A,
2140  &nn,
2141  values_B,
2142  &nn,
2143  &lower_bound,
2144  &upper_bound,
2145  dummy,
2146  dummy,
2147  &abs_accuracy,
2148  &n_eigenpairs,
2149  wr.data(),
2150  values_eigenvectors,
2151  &nn,
2152  work.data(),
2153  &lwork,
2154  iwork.data(),
2155  ifail.data(),
2156  &info);
2157  // sygvx returns info=0 on success. Since we only queried the optimal size
2158  // for work, everything else would not be acceptable.
2159  Assert(info == 0, ExcInternalError());
2160  // Allocate working array according to suggestion (same strategy as was
2161  // noted in compute_svd).
2162  lwork = static_cast<types::blas_int>(std::abs(work[0]) + 1);
2163 
2164  // resize workspace arrays
2165  work.resize(static_cast<size_type>(lwork));
2166 
2167  // Finally compute the generalized eigenvalues.
2168  sygvx(&itype,
2169  jobz,
2170  range,
2171  uplo,
2172  &nn,
2173  values_A,
2174  &nn,
2175  values_B,
2176  &nn,
2177  &lower_bound,
2178  &upper_bound,
2179  dummy,
2180  dummy,
2181  &abs_accuracy,
2182  &n_eigenpairs,
2183  wr.data(),
2184  values_eigenvectors,
2185  &nn,
2186  work.data(),
2187  &lwork,
2188  iwork.data(),
2189  ifail.data(),
2190  &info);
2191 
2192  // Negative return value implies a wrong argument. This should be internal.
2193  Assert(info >= 0, ExcInternalError());
2194  if (info != 0)
2195  std::cerr << "LAPACK error in sygvx" << std::endl;
2196 
2197  eigenvalues.reinit(n_eigenpairs);
2198  eigenvectors.resize(n_eigenpairs);
2199 
2200  for (size_type i = 0; i < static_cast<size_type>(n_eigenpairs); ++i)
2201  {
2202  eigenvalues(i) = wr[i];
2203  size_type col_begin(i * nn);
2204  eigenvectors[i].reinit(nn, true);
2205  for (size_type j = 0; j < static_cast<size_type>(nn); ++j)
2206  {
2207  eigenvectors[i](j) = values_eigenvectors[col_begin + j];
2208  }
2209  }
2210 
2211  state = LAPACKSupport::State(unusable);
2212 }
2213 
2214 
2215 template <typename number>
2216 void
2219  std::vector<Vector<number>> &eigenvectors,
2220  const types::blas_int itype)
2221 {
2222  Assert(state == matrix, ExcState(state));
2223  const types::blas_int nn = this->n();
2224  Assert(static_cast<size_type>(nn) == this->m(), ExcNotQuadratic());
2225  Assert(B.m() == B.n(), ExcNotQuadratic());
2226  Assert(static_cast<size_type>(nn) == B.n(), ExcDimensionMismatch(nn, B.n()));
2227  Assert(eigenvectors.size() <= static_cast<size_type>(nn),
2228  ExcMessage("eigenvectors.size() > matrix.n()"));
2229 
2230  wr.resize(nn);
2231  wi.resize(nn); // This is set purely for consistency reasons with the
2232  // eigenvalues() function.
2233 
2234  number *const values_A = this->values.data();
2235  number *const values_B = B.values.data();
2236 
2237  types::blas_int info = 0;
2238  types::blas_int lwork = -1;
2239  const char *const jobz = (eigenvectors.size() > 0) ? (&V) : (&N);
2240  const char *const uplo = (&U);
2241 
2242  /*
2243  * The LAPACK routine xSYGV requires a sufficiently large work array; the
2244  * minimum requirement is
2245  *
2246  * work.size >= 3*nn - 1.
2247  *
2248  * However, for better performance, a larger work array may be needed. The
2249  * first call determines the optimal work size and the second does the work.
2250  */
2251  work.resize(1);
2252 
2253  sygv(&itype,
2254  jobz,
2255  uplo,
2256  &nn,
2257  values_A,
2258  &nn,
2259  values_B,
2260  &nn,
2261  wr.data(),
2262  work.data(),
2263  &lwork,
2264  &info);
2265  // sygv returns info=0 on success. Since we only queried the optimal size
2266  // for work, everything else would not be acceptable.
2267  Assert(info == 0, ExcInternalError());
2268  // Allocate working array according to suggestion (same strategy as was
2269  // noted in compute_svd).
2270  lwork = static_cast<types::blas_int>(std::abs(work[0]) + 1);
2271 
2272  // resize workspace array
2273  work.resize(static_cast<size_type>(lwork));
2274 
2275  // Finally compute the generalized eigenvalues.
2276  sygv(&itype,
2277  jobz,
2278  uplo,
2279  &nn,
2280  values_A,
2281  &nn,
2282  values_B,
2283  &nn,
2284  wr.data(),
2285  work.data(),
2286  &lwork,
2287  &info);
2288  // Negative return value implies a wrong argument. This should be internal.
2289 
2290  Assert(info >= 0, ExcInternalError());
2291  if (info != 0)
2292  std::cerr << "LAPACK error in sygv" << std::endl;
2293 
2294  for (size_type i = 0; i < eigenvectors.size(); ++i)
2295  {
2296  size_type col_begin(i * nn);
2297  eigenvectors[i].reinit(nn, true);
2298  for (size_type j = 0; j < static_cast<size_type>(nn); ++j)
2299  {
2300  eigenvectors[i](j) = values_A[col_begin + j];
2301  }
2302  }
2304 }
2305 
2306 
2307 template <typename number>
2308 void
2310  const unsigned int precision,
2311  const bool scientific,
2312  const unsigned int width_,
2313  const char * zero_string,
2314  const double denominator,
2315  const double threshold) const
2316 {
2317  unsigned int width = width_;
2318 
2319  Assert((!this->empty()) || (this->n() + this->m() == 0), ExcInternalError());
2320  Assert(state == LAPACKSupport::matrix ||
2321  state == LAPACKSupport::inverse_matrix ||
2322  state == LAPACKSupport::cholesky,
2323  ExcState(state));
2324 
2325  // set output format, but store old
2326  // state
2327  std::ios::fmtflags old_flags = out.flags();
2328  std::streamsize old_precision = out.precision(precision);
2329 
2330  if (scientific)
2331  {
2332  out.setf(std::ios::scientific, std::ios::floatfield);
2333  if (!width)
2334  width = precision + 7;
2335  }
2336  else
2337  {
2338  out.setf(std::ios::fixed, std::ios::floatfield);
2339  if (!width)
2340  width = precision + 2;
2341  }
2342 
2343  for (size_type i = 0; i < this->m(); ++i)
2344  {
2345  // Cholesky is stored in lower triangular, so just output this part:
2346  const size_type nc = state == LAPACKSupport::cholesky ? i + 1 : this->n();
2347  for (size_type j = 0; j < nc; ++j)
2348  // we might have complex numbers, so use abs also to check for nan
2349  // since there is no isnan on complex numbers
2350  if (std::isnan(std::abs((*this)(i, j))))
2351  out << std::setw(width) << (*this)(i, j) << ' ';
2352  else if (std::abs(this->el(i, j)) > threshold)
2353  out << std::setw(width) << this->el(i, j) * denominator << ' ';
2354  else
2355  out << std::setw(width) << zero_string << ' ';
2356  out << std::endl;
2357  }
2358 
2359  AssertThrow(out, ExcIO());
2360  // reset output format
2361  out.flags(old_flags);
2362  out.precision(old_precision);
2363 }
2364 
2365 
2366 //----------------------------------------------------------------------//
2367 
2368 template <typename number>
2369 void
2371 {
2372  matrix = &M;
2373  mem = nullptr;
2374 }
2375 
2376 
2377 template <typename number>
2378 void
2381 {
2382  matrix = &M;
2383  mem = &V;
2384 }
2385 
2386 
2387 template <typename number>
2388 void
2390  const Vector<number> &src) const
2391 {
2392  dst = src;
2393  matrix->apply_lu_factorization(dst, false);
2394 }
2395 
2396 
2397 template <typename number>
2398 void
2400  const Vector<number> &src) const
2401 {
2402  dst = src;
2403  matrix->apply_lu_factorization(dst, true);
2404 }
2405 
2406 
2407 template <typename number>
2408 void
2410  const BlockVector<number> &src) const
2411 {
2412  Assert(mem != nullptr, ExcNotInitialized());
2413  Vector<number> *aux = mem->alloc();
2414  *aux = src;
2415  matrix->apply_lu_factorization(*aux, false);
2416  dst = *aux;
2417 }
2418 
2419 
2420 template <typename number>
2421 void
2423  const BlockVector<number> &src) const
2424 {
2425  Assert(mem != nullptr, ExcNotInitialized());
2426  Vector<number> *aux = mem->alloc();
2427  *aux = src;
2428  matrix->apply_lu_factorization(*aux, true);
2429  dst = *aux;
2430 }
2431 
2432 
2433 
2434 #include "lapack_full_matrix.inst"
2435 
2436 
2437 DEAL_II_NAMESPACE_CLOSE
Matrix is symmetric.
size_type m() const
LAPACKFullMatrix(const size_type size=0)
static const char L
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1567
void rank1_update(const number a, const Vector< number > &v)
void TmTmult(LAPACKFullMatrix< number > &C, const LAPACKFullMatrix< number > &B, const bool adding=false) const
LAPACKFullMatrix< number > & operator/=(const number factor)
Contents is actually a matrix.
void Tvmult(Vector< number2 > &w, const Vector< number2 > &v, const bool adding=false) const
static ::ExceptionBase & ExcIO()
LAPACKSupport::State state
typename TableBase< 2, T >::size_type size_type
Definition: table.h:1936
void remove_row_and_column(const size_type row, const size_type col)
std::array< std::pair< Number, Tensor< 1, dim, Number > >, std::integral_constant< int, dim >::value > eigenvectors(const SymmetricTensor< 2, dim, Number > &T, const SymmetricTensorEigenvectorMethod method=SymmetricTensorEigenvectorMethod::ql_implicit_shifts)
static ::ExceptionBase & ExcScalarAssignmentOnlyForZeroValue()
Matrix is upper triangular.
size_type n() const
Contents is the inverse of a matrix.
#define AssertIndexRange(index, range)
Definition: exceptions.h:1637
size_type m() const
pointer data()
void apply_givens_rotation(const std::array< number, 3 > &csr, const size_type i, const size_type k, const bool left=true)
static const char U
static ::ExceptionBase & ExcNotInitialized()
#define AssertThrow(cond, exc)
Definition: exceptions.h:1519
static ::ExceptionBase & ExcErrorCode(std::string arg1, types::blas_int arg2)
pointer data()
void compute_eigenvalues_symmetric(const number lower_bound, const number upper_bound, const number abs_accuracy, Vector< number > &eigenvalues, FullMatrix< number > &eigenvectors)
void reinit(const size_type size)
number norm(const char type) const
void Tmmult(LAPACKFullMatrix< number > &C, const LAPACKFullMatrix< number > &B, const bool adding=false) const
LinearAlgebra::distributed::Vector< Number > Vector
static ::ExceptionBase & ExcSingular()
size_type n() const
static ::ExceptionBase & ExcState(State arg1)
static ::ExceptionBase & ExcMessage(std::string arg1)
void mTmult(LAPACKFullMatrix< number > &C, const LAPACKFullMatrix< number > &B, const bool adding=false) const
Contents is a Cholesky decomposition.
size_type n() const
VectorizedArray< Number > min(const ::VectorizedArray< Number > &x, const ::VectorizedArray< Number > &y)
#define Assert(cond, exc)
Definition: exceptions.h:1407
void compute_inverse_svd_with_kernel(const unsigned int kernel_size)
std::make_unsigned< types::blas_int >::type size_type
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
void add(const number a, const LAPACKFullMatrix< number > &B)
void reinit(const size_type size1, const size_type size2, const bool omit_default_initialization=false)
void transpose(LAPACKFullMatrix< number > &B) const
Contents is something useless.
std::array< NumberType, 3 > hyperbolic_rotation(const NumberType &x, const NumberType &y)
Matrix is the inverse of a singular value decomposition.
iterator begin()
void vmult(Vector< number2 > &w, const Vector< number2 > &v, const bool adding=false) const
void print_formatted(std::ostream &out, const unsigned int precision=3, const bool scientific=true, const unsigned int width=0, const char *zero_string=" ", const double denominator=1., const double threshold=0.) const
void grow_or_shrink(const size_type size)
void scale_rows(const Vector< number > &V)
LAPACKFullMatrix< number > & operator*=(const number factor)
void compute_inverse_svd(const double threshold=0.)
static ::ExceptionBase & ExcNotQuadratic()
static const char A
number linfty_norm() const
LAPACKFullMatrix< number > & operator=(const LAPACKFullMatrix< number > &)
int blas_int
TableBase< N, T > & operator=(const TableBase< N, T > &src)
number el(const size_type i, const size_type j) const
std::array< NumberType, 3 > givens_rotation(const NumberType &x, const NumberType &y)
number determinant() const
number frobenius_norm() const
void Tvmult_add(Vector< number2 > &w, const Vector< number2 > &v) const
Matrix contains singular value decomposition,.
void set_property(const LAPACKSupport::Property property)
size_type size() const
size_type m() const
void solve(Vector< number > &v, const bool transposed=false) const
No special properties.
void compute_generalized_eigenvalues_symmetric(LAPACKFullMatrix< number > &B, const number lower_bound, const number upper_bound, const number abs_accuracy, Vector< number > &eigenvalues, std::vector< Vector< number >> &eigenvectors, const types::blas_int itype=1)
void compute_eigenvalues(const bool right_eigenvectors=false, const bool left_eigenvectors=false)
static ::ExceptionBase & ExcNotImplemented()
static constexpr const number & conjugate(const number &x)
Definition: numbers.h:500
void apply_lu_factorization(Vector< number > &v, const bool transposed) const
std::array< Number, 1 > eigenvalues(const SymmetricTensor< 2, 1, Number > &T)
number l1_norm() const
number reciprocal_condition_number() const
Eigenvalue vector is filled.
AlignedVector< number > values
Definition: table.h:670
static ::ExceptionBase & ExcProperty(Property arg1)
LAPACKSupport::Property property
static ::ExceptionBase & ExcZero()
Matrix is lower triangular.
#define AssertIsFinite(number)
Definition: exceptions.h:1669
void vmult_add(Vector< number2 > &w, const Vector< number2 > &v) const
VectorizedArray< Number > max(const ::VectorizedArray< Number > &x, const ::VectorizedArray< Number > &y)
void mmult(LAPACKFullMatrix< number > &C, const LAPACKFullMatrix< number > &B, const bool adding=false) const
static bool equal(const T *p1, const T *p2)
static ::ExceptionBase & ExcInternalError()
Contents is an LU decomposition.