Reference documentation for deal.II version 9.0.0
lapack_full_matrix.cc
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2005 - 2018 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE at
12 // the top level of the deal.II distribution.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #include <deal.II/lac/lapack_full_matrix.h>
18 #include <deal.II/lac/lapack_templates.h>
19 #include <deal.II/lac/lapack_support.h>
20 #include <deal.II/lac/full_matrix.h>
21 #include <deal.II/lac/sparse_matrix.h>
22 #include <deal.II/lac/vector.h>
23 #include <deal.II/lac/block_vector.h>
24 
25 #include <deal.II/lac/utilities.h>
26 
27 #include <iostream>
28 #include <iomanip>
29 
30 DEAL_II_NAMESPACE_OPEN
31 
32 using namespace LAPACKSupport;
33 
34 template <typename number>
36  :
37  TransposeTable<number> (n,n),
38  state (matrix),
39  property(general)
40 {}
41 
42 
43 template <typename number>
45  const size_type n)
46  :
47  TransposeTable<number> (m, n),
48  state (matrix),
49  property(general)
50 {}
51 
52 
53 template <typename number>
55  :
56  TransposeTable<number> (M),
57  state (matrix),
58  property(general)
59 {}
60 
61 
62 template <typename number>
65 {
67  state = M.state;
68  property = M.property;
69  return *this;
70 }
71 
72 
73 
74 template <typename number>
75 void
77 {
78  this->TransposeTable<number>::reinit (n, n);
79  state = LAPACKSupport::matrix;
80 }
81 
82 
83 
84 template <typename number>
85 void
87 {
88  const size_type s = std::min(std::min(this->m(), n), this->n());
89  TransposeTable<number> copy(std::move(*this));
90  this->TransposeTable<number>::reinit (n, n);
91  for (size_type i = 0; i < s; ++i)
92  for (size_type j = 0; j < s; ++j)
93  (*this)(i,j) = copy(i,j);
94 }
95 
96 
97 
98 template <typename number>
99 void
101 {
102  AssertIndexRange (row,this->m());
103  AssertIndexRange (col,this->n());
104 
105  const size_type nrows = this->m()-1;
106  const size_type ncols = this->n()-1;
107 
108  TransposeTable<number> copy(std::move(*this));
109  this->TransposeTable<number>::reinit (nrows, ncols);
110 
111  for (size_type j = 0; j < ncols; ++j)
112  {
113  const size_type jj = ( j < col ? j : j+1);
114  for (size_type i = 0; i < nrows; ++i)
115  {
116  const size_type ii = ( i < row ? i : i+1);
117  (*this)(i,j) = copy(ii,jj);
118  }
119  }
120 }
121 
122 
123 
124 template <typename number>
125 void
127  const size_type n)
128 {
129  this->TransposeTable<number>::reinit (m, n);
130  state = LAPACKSupport::matrix;
131 }
132 
133 
134 template <typename number>
135 template <typename number2>
138 {
139  Assert (this->m() == M.m(), ExcDimensionMismatch(this->m(), M.m()));
140  Assert (this->n() == M.n(), ExcDimensionMismatch(this->n(), M.n()));
141  for (size_type i=0; i<this->m(); ++i)
142  for (size_type j=0; j<this->n(); ++j)
143  (*this)(i,j) = M(i,j);
144 
145  state = LAPACKSupport::matrix;
146  property = LAPACKSupport::general;
147  return *this;
148 }
149 
150 
151 template <typename number>
152 template <typename number2>
155 {
156  Assert (this->m() == M.n(), ExcDimensionMismatch(this->m(), M.n()));
157  Assert (this->n() == M.m(), ExcDimensionMismatch(this->n(), M.m()));
158  for (size_type i=0; i<this->m(); ++i)
159  for (size_type j=0; j<this->n(); ++j)
160  (*this)(i,j) = M.el(i,j);
161 
162  state = LAPACKSupport::matrix;
163  property = LAPACKSupport::general;
164  return *this;
165 }
166 
167 
168 template <typename number>
171 {
172  (void)d;
174 
175  if (this->n_elements() != 0)
176  this->reset_values();
177 
178  state = LAPACKSupport::matrix;
179  return *this;
180 }
181 
182 
183 template <typename number>
186 {
187  Assert(state == LAPACKSupport::matrix ||
189  ExcState(state));
190 
191  AssertIsFinite(factor);
192  const char type = 'G';
193  const number cfrom = 1.;
194  const types::blas_int m = this->m();
195  const types::blas_int n = this->n();
196  const types::blas_int lda = this->m();
197  types::blas_int info = 0;
198  // kl and ku will not be referenced for type = G (dense matrices).
199  const types::blas_int kl=0;
200  number *values = &this->values[0];
201 
202  lascl(&type,&kl,&kl,&cfrom,&factor,&m,&n,values,&lda,&info);
203 
204  // Negative return value implies a wrong argument. This should be internal.
205  Assert(info >= 0, ExcInternalError());
206 
207  return *this;
208 }
209 
210 
211 template <typename number>
214 {
215  Assert(state == LAPACKSupport::matrix ||
217  ExcState(state));
218 
219  AssertIsFinite(factor);
220  Assert (factor != number(0.), ExcZero() );
221 
222  const char type = 'G';
223  const number cto = 1.;
224  const types::blas_int m = this->m();
225  const types::blas_int n = this->n();
226  const types::blas_int lda = this->m();
227  types::blas_int info = 0;
228  // kl and ku will not be referenced for type = G (dense matrices).
229  const types::blas_int kl=0;
230  number *values = &this->values[0];
231 
232  lascl(&type,&kl,&kl,&factor,&cto,&m,&n,values,&lda,&info);
233 
234  // Negative return value implies a wrong argument. This should be internal.
235  Assert(info >= 0, ExcInternalError());
236 
237  return *this;
238 }
239 
240 
241 
242 template <typename number>
243 void
245  const LAPACKFullMatrix<number> &A)
246 {
247  Assert(state == LAPACKSupport::matrix ||
249  ExcState(state));
250 
251  Assert (m() == A.m(), ExcDimensionMismatch(m(), A.m()));
252  Assert (n() == A.n(), ExcDimensionMismatch(n(), A.n()));
253 
254  AssertIsFinite(a);
255 
256  // BLAS does not offer functions to add matrices.
257  // LapackFullMatrix is stored in contiguous array
258  // ==> use BLAS 1 for adding vectors
259  const types::blas_int n = this->m() * this->n();
260  const types::blas_int inc=1;
261  number *values = &this->values[0];
262  const number *values_A = &A.values[0];
263 
264  axpy(&n,&a,values_A,&inc,values,&inc);
265 }
266 
267 
268 
269 namespace
270 {
271  template <typename number>
272  void cholesky_rank1(LAPACKFullMatrix<number> &A,
273  const number a,
274  const Vector<number> &v)
275  {
276  const typename LAPACKFullMatrix<number>::size_type N = A.n();
277  Vector<number> z(v);
278  // Cholesky update / downdate, see
279  // 6.5.4 Cholesky Updating and Downdating, Golub 2013 Matrix computations
280  // Note that potrf() is called with LAPACKSupport::L , so the
281  // factorization is stored in lower triangular part.
282  // Also see discussion here http://icl.cs.utk.edu/lapack-forum/viewtopic.php?f=2&t=2646
283  if (a > 0.)
284  {
285  // simple update via a sequence of Givens rotations.
286  // Observe that
287  //
288  // | L^T |T | L^T |
289  // A <-- | | | | = L L^T + z z^T
290  // | z^T | | z^T |
291  //
292  // so we can get updated factor by doing a sequence of Givens
293  // rotations to make the matrix lower-triangular
294  // Also see LINPACK's dchud http://www.netlib.org/linpack/dchud.f
295  z *= std::sqrt(a);
296  for (typename LAPACKFullMatrix<number>::size_type k = 0; k < N; ++k)
297  {
298  const std::array<number,3> csr = Utilities::LinearAlgebra::givens_rotation(A(k,k),z(k));
299  A(k,k) = csr[2];
300  for (typename LAPACKFullMatrix<number>::size_type i = k+1; i < N; ++i)
301  {
302  const number t = A(i,k);
303  A(i,k) = csr[0] * A(i,k) + csr[1] * z(i);
304  z(i) = -csr[1] * t + csr[0] * z(i);
305  }
306  }
307  }
308  else
309  {
310  // downdating is not always possible as we may end up with
311  // negative definite matrix. If it's possible, then it boils
312  // down to application of hyperbolic rotations.
313  // Observe that
314  //
315  // | L^T |T | L^T |
316  // A <-- | | S | | = L L^T - z z^T
317  // | z^T | | z^T |
318  //
319  // |In 0 |
320  // S := | |
321  // |0 -1 |
322  //
323  // We are looking for H which is S-orthogonal (HSH^T=S) and
324  // can restore upper-triangular factor of the factorization of A above.
325  // We will use Hyperbolic rotations to do the job
326  //
327  // | c -s | | x1 | | r |
328  // | | = | | = | |, c^2 - s^2 = 1
329  // |-s c | | x2 | | 0 |
330  //
331  // which have real solution only if x2 <= x1.
332  // See also Linpack's http://www.netlib.org/linpack/dchdd.f and
333  // https://infoscience.epfl.ch/record/161468/files/cholupdate.pdf and
334  // "Analysis of a recursive Least Squares Hyperbolic Rotation Algorithm for Signal Processing", Alexander, Pan, Plemmons, 1988.
335  z *= std::sqrt(-a);
336  for (typename LAPACKFullMatrix<number>::size_type k = 0; k < N; ++k)
337  {
338  const std::array<number,3> csr = Utilities::LinearAlgebra::hyperbolic_rotation(A(k,k),z(k));
339  A(k,k) = csr[2];
340  for (typename LAPACKFullMatrix<number>::size_type i = k+1; i < N; ++i)
341  {
342  const number t = A(i,k);
343  A(i,k) = csr[0] * A(i,k) - csr[1] * z(i);
344  z(i) = -csr[1] * t + csr[0] * z(i);
345  }
346  }
347  }
348  }
349 
350 
351  template <typename number>
352  void cholesky_rank1(LAPACKFullMatrix<std::complex<number> > &/*A*/,
353  const std::complex<number> /*a*/,
354  const Vector<std::complex<number> > &/*v*/)
355  {
356  AssertThrow(false, ExcNotImplemented());
357  }
358 }
359 
360 
361 
362 template <typename number>
363 void
365  const Vector<number> &v)
366 {
367  Assert(property == LAPACKSupport::symmetric,
368  ExcProperty(property));
369 
370  Assert (n() == m(), ExcInternalError());
371  Assert (m() == v.size(), ExcDimensionMismatch(m(), v.size()));
372 
373  AssertIsFinite(a);
374 
375  if (state == LAPACKSupport::matrix)
376  {
377  {
378  const types::blas_int N = this->m();
379  const char uplo = LAPACKSupport::U;
380  const types::blas_int lda = N;
381  const types::blas_int incx=1;
382 
383  syr(&uplo, &N, &a, v.begin(), &incx, this->values.begin(), &lda);
384  }
385 
386  const size_type N = this->m();
387  // FIXME: we should really only update upper or lower triangular parts
388  // of symmetric matrices and make sure the interface is consistent,
389  // for example operator(i,j) gives correct results regardless of storage.
390  for (size_type i = 0; i < N; ++i)
391  for (size_type j = 0; j < i; ++j)
392  (*this)(i,j) = (*this)(j,i);
393  }
394  else if (state == LAPACKSupport::cholesky)
395  {
396  cholesky_rank1(*this, a, v);
397  }
398  else
399  AssertThrow(false, ExcState(state));
400 }
401 
402 
403 
404 template <typename number>
405 void
407  Vector<number> &w,
408  const Vector<number> &v,
409  const bool adding) const
410 {
411  const types::blas_int mm = this->m();
412  const types::blas_int nn = this->n();
413  const number alpha = 1.;
414  const number beta = (adding ? 1. : 0.);
415  const number null = 0.;
416 
417  // use trmv for triangular matrices
418  if ((property == upper_triangular ||
419  property == lower_triangular) &&
420  (mm==nn) &&
421  state == matrix)
422  {
423  Assert (adding == false,
425 
426  AssertDimension(v.size(), this->n());
427  AssertDimension(w.size(), this->m());
428 
429  const char diag = 'N';
430  const char trans = 'N';
431  const char uplo = (property == upper_triangular ? LAPACKSupport::U : LAPACKSupport::L);
432 
433  w = v;
434 
435  const types::blas_int N = mm;
436  const types::blas_int lda = N;
437  const types::blas_int incx = 1;
438 
439  trmv (&uplo, &trans, &diag,
440  &N, &this->values[0], &lda,
441  &w[0], &incx);
442 
443  return;
444  }
445 
446  switch (state)
447  {
448  case matrix:
449  case inverse_matrix:
450  {
451  AssertDimension(v.size(), this->n());
452  AssertDimension(w.size(), this->m());
453 
454  gemv("N", &mm, &nn, &alpha, &this->values[0], &mm, v.values.get(), &one, &beta, w.values.get(), &one);
455  break;
456  }
457  case svd:
458  {
459  Threads::Mutex::ScopedLock lock (mutex);
460  AssertDimension(v.size(), this->n());
461  AssertDimension(w.size(), this->m());
462  // Compute V^T v
463  work.resize(std::max(mm,nn));
464  gemv("N", &nn, &nn, &alpha, &svd_vt->values[0], &nn, v.values.get(), &one, &null, work.data(), &one);
465  // Multiply by singular values
466  for (size_type i=0; i<wr.size(); ++i)
467  work[i] *= wr[i];
468  // Multiply with U
469  gemv("N", &mm, &mm, &alpha, &svd_u->values[0], &mm, work.data(), &one, &beta, w.values.get(), &one);
470  break;
471  }
472  case inverse_svd:
473  {
474  Threads::Mutex::ScopedLock lock (mutex);
475  AssertDimension(w.size(), this->n());
476  AssertDimension(v.size(), this->m());
477  // Compute U^T v
478  work.resize(std::max(mm,nn));
479  gemv("T", &mm, &mm, &alpha, &svd_u->values[0], &mm, v.values.get(), &one, &null, work.data(), &one);
480  // Multiply by singular values
481  for (size_type i=0; i<wr.size(); ++i)
482  work[i] *= wr[i];
483  // Multiply with V
484  gemv("T", &nn, &nn, &alpha, &svd_vt->values[0], &nn, work.data(), &one, &beta, w.values.get(), &one);
485  break;
486  }
487  default:
488  Assert (false, ExcState(state));
489  }
490 }
491 
492 
493 template <typename number>
494 void
496  Vector<number> &w,
497  const Vector<number> &v,
498  const bool adding) const
499 {
500  const types::blas_int mm = this->m();
501  const types::blas_int nn = this->n();
502  const number alpha = 1.;
503  const number beta = (adding ? 1. : 0.);
504  const number null = 0.;
505 
506  // use trmv for triangular matrices
507  if ((property == upper_triangular ||
508  property == lower_triangular) &&
509  (mm==nn) &&
510  state == matrix)
511  {
512  Assert (adding == false,
514 
515  AssertDimension(v.size(), this->n());
516  AssertDimension(w.size(), this->m());
517 
518  const char diag = 'N';
519  const char trans = 'T';
520  const char uplo = (property == upper_triangular ? LAPACKSupport::U : LAPACKSupport::L);
521 
522  w = v;
523 
524  const types::blas_int N = mm;
525  const types::blas_int lda = N;
526  const types::blas_int incx = 1;
527 
528  trmv (&uplo, &trans, &diag,
529  &N, &this->values[0], &lda,
530  &w[0], &incx);
531 
532  return;
533  }
534 
535 
536  switch (state)
537  {
538  case matrix:
539  case inverse_matrix:
540  {
541  AssertDimension(w.size(), this->n());
542  AssertDimension(v.size(), this->m());
543 
544  gemv("T", &mm, &nn, &alpha, &this->values[0], &mm, v.values.get(), &one, &beta, w.values.get(), &one);
545  break;
546  }
547  case svd:
548  {
549  Threads::Mutex::ScopedLock lock (mutex);
550  AssertDimension(w.size(), this->n());
551  AssertDimension(v.size(), this->m());
552 
553  // Compute U^T v
554  work.resize(std::max(mm,nn));
555  gemv("T", &mm, &mm, &alpha, &svd_u->values[0], &mm, v.values.get(), &one, &null, work.data(), &one);
556  // Multiply by singular values
557  for (size_type i=0; i<wr.size(); ++i)
558  work[i] *= wr[i];
559  // Multiply with V
560  gemv("T", &nn, &nn, &alpha, &svd_vt->values[0], &nn, work.data(), &one, &beta, w.values.get(), &one);
561  break;
562  }
563  case inverse_svd:
564  {
565  Threads::Mutex::ScopedLock lock (mutex);
566  AssertDimension(v.size(), this->n());
567  AssertDimension(w.size(), this->m());
568 
569  // Compute V^T v
570  work.resize(std::max(mm,nn));
571  gemv("N", &nn, &nn, &alpha, &svd_vt->values[0], &nn, v.values.get(), &one, &null, work.data(), &one);
572  // Multiply by singular values
573  for (size_type i=0; i<wr.size(); ++i)
574  work[i] *= wr[i];
575  // Multiply with U
576  gemv("N", &mm, &mm, &alpha, &svd_u->values[0], &mm, work.data(), &one, &beta, w.values.get(), &one);
577  break;
578  }
579  default:
580  Assert (false, ExcState(state));
581  }
582 }
583 
584 
585 template <typename number>
586 void
588  const Vector<number> &v) const
589 {
590  vmult(w, v, true);
591 }
592 
593 
594 template <typename number>
595 void
597  const Vector<number> &v) const
598 {
599  Tvmult(w, v, true);
600 }
601 
602 
603 template <typename number>
604 void
606  const LAPACKFullMatrix<number> &B,
607  const bool adding) const
608 {
609  Assert(state == matrix || state == inverse_matrix, ExcState(state));
611  Assert(C.state == matrix || C.state == inverse_matrix, ExcState(C.state));
612  Assert (this->n() == B.m(), ExcDimensionMismatch(this->n(), B.m()));
613  Assert (C.n() == B.n(), ExcDimensionMismatch(C.n(), B.n()));
614  Assert (C.m() == this->m(), ExcDimensionMismatch(this->m(), C.m()));
615  const types::blas_int mm = this->m();
616  const types::blas_int nn = B.n();
617  const types::blas_int kk = this->n();
618  const number alpha = 1.;
619  const number beta = (adding ? 1. : 0.);
620 
621  gemm("N", "N", &mm, &nn, &kk, &alpha, &this->values[0], &mm, &B.values[0],
622  &kk, &beta, &C.values[0], &mm);
623 }
624 
625 
626 template <typename number>
627 void
629  const LAPACKFullMatrix<number> &B,
630  const bool adding) const
631 {
632  Assert(state == matrix || state == inverse_matrix, ExcState(state));
634  Assert (this->n() == B.m(), ExcDimensionMismatch(this->n(), B.m()));
635  Assert (C.n() == B.n(), ExcDimensionMismatch(C.n(), B.n()));
636  Assert (C.m() == this->m(), ExcDimensionMismatch(this->m(), C.m()));
637  const types::blas_int mm = this->m();
638  const types::blas_int nn = B.n();
639  const types::blas_int kk = this->n();
640  const number alpha = 1.;
641  const number beta = (adding ? 1. : 0.);
642 
643  // since FullMatrix stores the matrix in transposed order compared to this
644  // matrix, compute B^T * A^T = (A * B)^T
645  gemm("T", "T", &nn, &mm, &kk, &alpha, &B.values[0], &kk, &this->values[0],
646  &mm, &beta, &C(0,0), &nn);
647 }
648 
649 
650 
651 template <typename number>
652 void
654  const LAPACKFullMatrix<number> &B,
655  const Vector<number> &V,
656  const bool adding) const
657 {
658  Assert(state == matrix || state == inverse_matrix, ExcState(state));
660  Assert(C.state == matrix || C.state == inverse_matrix, ExcState(C.state));
661 
662  const LAPACKFullMatrix<number> &A = *this;
663 
664  Assert (A.m() == B.m(), ExcDimensionMismatch(A.m(), B.m()));
665  Assert (C.n() == B.n(), ExcDimensionMismatch(C.n(), B.n()));
666  Assert (C.m() == A.n(), ExcDimensionMismatch(A.n(), C.m()));
667  Assert (V.size() == A.m(), ExcDimensionMismatch(A.m(), V.size()));
668 
669  const types::blas_int mm = A.n();
670  const types::blas_int nn = B.n();
671  const types::blas_int kk = B.m();
672 
673  // lapack does not have any tripple product routines, including the case of
674  // diagonal matrix in the middle, see
675  // https://stackoverflow.com/questions/3548069/multiplying-three-matrices-in-blas-with-the-middle-one-being-diagonal
676  // http://mathforum.org/kb/message.jspa?messageID=3546564
677 
678  Threads::Mutex::ScopedLock lock (mutex);
679  // First, get V*B into "work" array
680  work.resize(kk*nn);
681  // following http://icl.cs.utk.edu/lapack-forum/viewtopic.php?f=2&t=768#p2577
682  // do left-multiplication manually. Note that Xscal would require to first
683  // copy the input vector as multiplication is done inplace.
684  for (types::blas_int j = 0; j < nn; ++j)
685  for (types::blas_int i = 0; i < kk; ++i)
686  {
687  Assert(j*kk+i<static_cast<types::blas_int>(work.size()), ExcInternalError());
688  work[j*kk+i]=V(i)*B(i,j);
689  }
690 
691  // Now do the standard Tmmult:
692  const number alpha = 1.;
693  const number beta = (adding ? 1. : 0.);
694 
695  gemm("T", "N", &mm, &nn, &kk, &alpha, &this->values[0], &kk, &work[0],
696  &kk, &beta, &C.values[0], &mm);
697 }
698 
699 
700 
701 template <typename number>
702 void
704 {
705  LAPACKFullMatrix<number> &A = *this;
706  Assert(state == matrix || state == inverse_matrix, ExcState(state));
707  Assert (V.size() == A.m(), ExcDimensionMismatch(A.m(), V.size()));
708 
709  const types::blas_int nn = A.n();
710  const types::blas_int kk = A.m();
711  for (types::blas_int j = 0; j < nn; ++j)
712  for (types::blas_int i = 0; i < kk; ++i)
713  A(i,j)*=V(i);
714 }
715 
716 
717 
718 template <typename number>
719 void
721  const LAPACKFullMatrix<number> &B,
722  const bool adding) const
723 {
724  Assert(state == matrix || state == inverse_matrix, ExcState(state));
726  Assert(C.state == matrix || C.state == inverse_matrix, ExcState(C.state));
727  Assert (this->m() == B.m(), ExcDimensionMismatch(this->m(), B.m()));
728  Assert (C.n() == B.n(), ExcDimensionMismatch(C.n(), B.n()));
729  Assert (C.m() == this->n(), ExcDimensionMismatch(this->n(), C.m()));
730  const types::blas_int mm = this->n();
731  const types::blas_int nn = B.n();
732  const types::blas_int kk = B.m();
733  const number alpha = 1.;
734  const number beta = (adding ? 1. : 0.);
735 
736  if (PointerComparison::equal (this, &B))
737  {
738  syrk(&LAPACKSupport::U,"T",&nn,&kk,&alpha,&this->values[0],&kk,&beta,&C.values[0],&nn);
739 
740  // fill-in lower triangular part
741  for (types::blas_int j = 0; j < nn; ++j)
742  for (types::blas_int i = 0; i < j; ++i)
743  C(j,i) = C(i,j);
744 
745  C.property = symmetric;
746  }
747  else
748  {
749  gemm("T", "N", &mm, &nn, &kk, &alpha, &this->values[0], &kk, &B.values[0],
750  &kk, &beta, &C.values[0], &mm);
751  }
752 }
753 
754 
755 template <typename number>
756 void
758  const LAPACKFullMatrix<number> &B,
759  const bool adding) const
760 {
761  Assert(state == matrix || state == inverse_matrix, ExcState(state));
763  Assert (this->m() == B.m(), ExcDimensionMismatch(this->m(), B.m()));
764  Assert (C.n() == B.n(), ExcDimensionMismatch(C.n(), B.n()));
765  Assert (C.m() == this->n(), ExcDimensionMismatch(this->n(), C.m()));
766  const types::blas_int mm = this->n();
767  const types::blas_int nn = B.n();
768  const types::blas_int kk = B.m();
769  const number alpha = 1.;
770  const number beta = (adding ? 1. : 0.);
771 
772  // since FullMatrix stores the matrix in transposed order compared to this
773  // matrix, compute B^T * A = (A^T * B)^T
774  gemm("T", "N", &nn, &mm, &kk, &alpha, &B.values[0], &kk, &this->values[0],
775  &kk, &beta, &C(0,0), &nn);
776 }
777 
778 
779 template <typename number>
780 void
782  const LAPACKFullMatrix<number> &B,
783  const bool adding) const
784 {
785  Assert(state == matrix || state == inverse_matrix, ExcState(state));
787  Assert(C.state == matrix || C.state == inverse_matrix, ExcState(C.state));
788  Assert (this->n() == B.n(), ExcDimensionMismatch(this->n(), B.n()));
789  Assert (C.n() == B.m(), ExcDimensionMismatch(C.n(), B.m()));
790  Assert (C.m() == this->m(), ExcDimensionMismatch(this->m(), C.m()));
791  const types::blas_int mm = this->m();
792  const types::blas_int nn = B.m();
793  const types::blas_int kk = B.n();
794  const number alpha = 1.;
795  const number beta = (adding ? 1. : 0.);
796 
797  if (PointerComparison::equal (this, &B))
798  {
799  syrk(&LAPACKSupport::U,"N",&nn,&kk,&alpha,&this->values[0],&nn,&beta,&C.values[0],&nn);
800 
801  // fill-in lower triangular part
802  for (types::blas_int j = 0; j < nn; ++j)
803  for (types::blas_int i = 0; i < j; ++i)
804  C(j,i) = C(i,j);
805 
806  C.property = symmetric;
807  }
808  else
809  {
810  gemm("N", "T", &mm, &nn, &kk, &alpha, &this->values[0], &mm, &B.values[0],
811  &nn, &beta, &C.values[0], &mm);
812  }
813 }
814 
815 
816 
817 template <typename number>
818 void
820  const LAPACKFullMatrix<number> &B,
821  const bool adding) const
822 {
823  Assert(state == matrix || state == inverse_matrix, ExcState(state));
825  Assert (this->n() == B.n(), ExcDimensionMismatch(this->n(), B.n()));
826  Assert (C.n() == B.m(), ExcDimensionMismatch(C.n(), B.m()));
827  Assert (C.m() == this->m(), ExcDimensionMismatch(this->m(), C.m()));
828  const types::blas_int mm = this->m();
829  const types::blas_int nn = B.m();
830  const types::blas_int kk = B.n();
831  const number alpha = 1.;
832  const number beta = (adding ? 1. : 0.);
833 
834  // since FullMatrix stores the matrix in transposed order compared to this
835  // matrix, compute B * A^T = (A * B^T)^T
836  gemm("N", "T", &nn, &mm, &kk, &alpha, &B.values[0], &nn, &this->values[0],
837  &mm, &beta, &C(0,0), &nn);
838 }
839 
840 
841 template <typename number>
842 void
844  const LAPACKFullMatrix<number> &B,
845  const bool adding) const
846 {
847  Assert(state == matrix || state == inverse_matrix, ExcState(state));
849  Assert(C.state == matrix || C.state == inverse_matrix, ExcState(C.state));
850  Assert (this->m() == B.n(), ExcDimensionMismatch(this->m(), B.n()));
851  Assert (C.n() == B.m(), ExcDimensionMismatch(C.n(), B.m()));
852  Assert (C.m() == this->n(), ExcDimensionMismatch(this->n(), C.m()));
853  const types::blas_int mm = this->n();
854  const types::blas_int nn = B.m();
855  const types::blas_int kk = B.n();
856  const number alpha = 1.;
857  const number beta = (adding ? 1. : 0.);
858 
859  gemm("T", "T", &mm, &nn, &kk, &alpha, &this->values[0], &kk, &B.values[0],
860  &nn, &beta, &C.values[0], &mm);
861 }
862 
863 
864 template <typename number>
865 void
867  const LAPACKFullMatrix<number> &B,
868  const bool adding) const
869 {
870  Assert(state == matrix || state == inverse_matrix, ExcState(state));
872  Assert (this->m() == B.n(), ExcDimensionMismatch(this->m(), B.n()));
873  Assert (C.n() == B.m(), ExcDimensionMismatch(C.n(), B.m()));
874  Assert (C.m() == this->n(), ExcDimensionMismatch(this->n(), C.m()));
875  const types::blas_int mm = this->n();
876  const types::blas_int nn = B.m();
877  const types::blas_int kk = B.n();
878  const number alpha = 1.;
879  const number beta = (adding ? 1. : 0.);
880 
881  // since FullMatrix stores the matrix in transposed order compared to this
882  // matrix, compute B * A = (A^T * B^T)^T
883  gemm("N", "N", &nn, &mm, &kk, &alpha, &B.values[0], &nn, &this->values[0],
884  &kk, &beta, &C(0,0), &nn);
885 }
886 
887 
888 template <typename number>
889 void
891 {
892  Assert(state == matrix, ExcState(state));
893  state = LAPACKSupport::unusable;
894 
895  const types::blas_int mm = this->m();
896  const types::blas_int nn = this->n();
897  number *const values = &this->values[0];
898  ipiv.resize(mm);
899  types::blas_int info = 0;
900  getrf(&mm, &nn, values, &mm, ipiv.data(), &info);
901 
902  Assert(info >= 0, ExcInternalError());
903 
904  // if info >= 0, the factorization has been completed
905  state = lu;
906 
908 }
909 
910 
911 
912 template <typename number>
913 void
915 {
916  property = p;
917 }
918 
919 
920 
921 template <typename number>
923 {
924  const char type('O');
925  return norm(type);
926 }
927 
928 
929 
930 template <typename number>
932 {
933  const char type('I');
934  return norm(type);
935 }
936 
937 
938 
939 template <typename number>
941 {
942  const char type('F');
943  return norm(type);
944 }
945 
946 
947 
948 template <typename number>
949 number LAPACKFullMatrix<number>::norm(const char type) const
950 {
951  Threads::Mutex::ScopedLock lock (mutex);
952 
953  Assert (state == LAPACKSupport::matrix ||
955  ExcMessage("norms can be called in matrix state only."));
956 
957  const types::blas_int N = this->n();
958  const types::blas_int M = this->m();
959  const number *const values = &this->values[0];
960  if (property == symmetric)
961  {
962  const types::blas_int lda = std::max<types::blas_int>(1,N);
963  const types::blas_int lwork = (type == 'I' || type == 'O') ?
964  std::max<types::blas_int>(1,N) :
965  0;
966  work.resize(lwork);
967  return lansy (&type, &LAPACKSupport::L, &N, values, &lda, work.data());
968  }
969  else
970  {
971  const types::blas_int lda = std::max<types::blas_int>(1,M);
972  const types::blas_int lwork = (type == 'I') ?
973  std::max<types::blas_int>(1,M) :
974  0;
975  work.resize(lwork);
976  return lange (&type, &M, &N, values, &lda, work.data());
977  }
978 }
979 
980 
981 
982 template <typename number>
984 {
985  Assert (state == LAPACKSupport::matrix ||
987  ExcMessage("Trace can be called in matrix state only."));
988  Assert (this->n() == this->m(),
989  ExcDimensionMismatch(this->n(), this->m()));
990 
991  number tr = 0;
992  for (size_type i=0; i<this->m(); ++i)
993  tr += (*this)(i,i);
994 
995  return tr;
996 }
997 
998 
999 
1000 template <typename number>
1001 void
1003 {
1004  Assert(state == matrix, ExcState(state));
1005  Assert(property == symmetric, ExcProperty(property));
1006  state = LAPACKSupport::unusable;
1007 
1008  const types::blas_int mm = this->m();
1009  const types::blas_int nn = this->n();
1010  (void) mm;
1011  Assert (mm == nn, ExcDimensionMismatch(mm,nn));
1012 
1013  number *const values = &this->values[0];
1014  types::blas_int info = 0;
1015  const types::blas_int lda = std::max<types::blas_int>(1,nn);
1016  potrf (&LAPACKSupport::L, &nn, values, &lda, &info);
1017 
1018  // info < 0 : the info-th argument had an illegal value
1019  Assert(info >= 0, ExcInternalError());
1020 
1021  state = cholesky;
1023 }
1024 
1025 
1026 
1027 template <typename number>
1028 number
1030 {
1031  Threads::Mutex::ScopedLock lock (mutex);
1032  Assert(state == cholesky, ExcState(state));
1033  number rcond = 0.;
1034 
1035  const types::blas_int N = this->m();
1036  const number *values = &this->values[0];
1037  types::blas_int info = 0;
1038  const types::blas_int lda = std::max<types::blas_int>(1,N);
1039  work.resize(3*N);
1040  iwork.resize(N);
1041 
1042  // use the same uplo as in Cholesky
1043  pocon (&LAPACKSupport::L, &N, values, &lda,
1044  &a_norm, &rcond,
1045  work.data(), iwork.data(), &info);
1046 
1047  Assert(info >= 0, ExcInternalError());
1048 
1049  return rcond;
1050 }
1051 
1052 
1053 
1054 template <typename number>
1055 number
1057 {
1058  Threads::Mutex::ScopedLock lock (mutex);
1059  Assert(property == upper_triangular ||
1060  property == lower_triangular,
1061  ExcProperty(property));
1062  number rcond = 0.;
1063 
1064  const types::blas_int N = this->m();
1065  const number *const values = &this->values[0];
1066  types::blas_int info = 0;
1067  const types::blas_int lda = std::max<types::blas_int>(1,N);
1068  work.resize(3*N);
1069  iwork.resize(N);
1070 
1071  const char norm = '1';
1072  const char diag = 'N';
1073  const char uplo = (property == upper_triangular ? LAPACKSupport::U : LAPACKSupport::L);
1074  trcon(&norm, &uplo, &diag,
1075  &N, values, &lda,
1076  &rcond,
1077  work.data(), iwork.data(), &info);
1078 
1079  Assert(info >= 0, ExcInternalError());
1080 
1081  return rcond;
1082 }
1083 
1084 
1085 
1086 template <typename number>
1087 void
1089 {
1090  Assert(state == matrix, ExcState(state));
1091  state = LAPACKSupport::unusable;
1092 
1093  const types::blas_int mm = this->m();
1094  const types::blas_int nn = this->n();
1095  number *const values = &this->values[0];
1096  wr.resize(std::max(mm,nn));
1097  std::fill(wr.begin(), wr.end(), 0.);
1098  ipiv.resize(8*mm);
1099 
1100  svd_u = std_cxx14::make_unique<LAPACKFullMatrix<number>>(mm,mm);
1101  svd_vt = std_cxx14::make_unique<LAPACKFullMatrix<number>>(nn,nn);
1102  number *const mu = &svd_u->values[0];
1103  number *const mvt = &svd_vt->values[0];
1104  types::blas_int info = 0;
1105 
1106  // First determine optimal workspace size
1107  work.resize(1);
1108  types::blas_int lwork = -1;
1109  gesdd(&LAPACKSupport::A, &mm, &nn, values, &mm,
1110  wr.data(), mu, &mm, mvt, &nn,
1111  work.data(), &lwork, ipiv.data(), &info);
1112  AssertThrow (info==0, LAPACKSupport::ExcErrorCode("gesdd", info));
1113  // Resize the work array. Add one to the size computed by LAPACK to be on
1114  // the safe side.
1115  lwork = static_cast<types::blas_int>(std::abs(work[0]) + 1);
1116 
1117  work.resize(lwork);
1118  // Do the actual SVD.
1119  gesdd(&LAPACKSupport::A, &mm, &nn, values, &mm,
1120  wr.data(), mu, &mm, mvt, &nn,
1121  work.data(), &lwork, ipiv.data(), &info);
1122  AssertThrow (info==0, LAPACKSupport::ExcErrorCode("gesdd", info));
1123 
1124  work.resize(0);
1125  ipiv.resize(0);
1126 
1127  state = LAPACKSupport::svd;
1128 }
1129 
1130 
1131 template <typename number>
1132 void
1134 {
1135  if (state == LAPACKSupport::matrix)
1136  compute_svd();
1137 
1138  Assert (state==LAPACKSupport::svd, ExcState(state));
1139 
1140  const double lim = std::abs(wr[0])*threshold;
1141  for (size_type i=0; i<wr.size(); ++i)
1142  {
1143  if (std::abs(wr[i]) > lim)
1144  wr[i] = number(1.)/wr[i];
1145  else
1146  wr[i] = 0.;
1147  }
1149 }
1150 
1151 
1152 
1153 template <typename number>
1154 void
1156 {
1157  if (state == LAPACKSupport::matrix)
1158  compute_svd();
1159 
1160  Assert (state==LAPACKSupport::svd, ExcState(state));
1161 
1162  const unsigned int n_wr = wr.size();
1163  for (size_type i=0; i<n_wr-kernel_size; ++i)
1164  wr[i] = number(1.)/wr[i];
1165  for (size_type i=n_wr-kernel_size; i<n_wr; ++i)
1166  wr[i] = 0.;
1168 }
1169 
1170 
1171 
1172 template <typename number>
1173 void
1175 {
1176  Assert(state == matrix || state == lu || state == cholesky,
1177  ExcState(state));
1178  const types::blas_int mm = this->m();
1179  const types::blas_int nn = this->n();
1180  Assert (nn == mm, ExcNotQuadratic());
1181 
1182  number *const values = &this->values[0];
1183  types::blas_int info = 0;
1184 
1185  if (property != symmetric)
1186  {
1187  if (state == matrix)
1188  compute_lu_factorization();
1189 
1190  ipiv.resize(mm);
1191  inv_work.resize (mm);
1192  getri(&mm, values, &mm, ipiv.data(), inv_work.data(), &mm, &info);
1193  }
1194  else
1195  {
1196  if (state == matrix)
1197  compute_cholesky_factorization();
1198 
1199  const types::blas_int lda = std::max<types::blas_int>(1,nn);
1200  potri(&LAPACKSupport::L, &nn, values,&lda,&info);
1201  // inverse is stored in lower diagonal, set the upper diagonal appropriately:
1202  for (types::blas_int i=0; i < nn; ++i)
1203  for (types::blas_int j=i+1; j < nn; ++j)
1204  this->el(i,j) = this->el(j,i);
1205  }
1206 
1207  Assert(info >= 0, ExcInternalError());
1209 
1210  state = inverse_matrix;
1211 }
1212 
1213 
1214 
1215 template <typename number>
1216 void
1218  const bool transposed) const
1219 {
1220  Assert(this->m() == this->n(),
1222  AssertDimension(this->m(), v.size());
1223  const char *trans = transposed ? &T : &N;
1224  const types::blas_int nn = this->n();
1225  const number *const values = &this->values[0];
1226  const types::blas_int n_rhs = 1;
1227  types::blas_int info = 0;
1228 
1229  if (state == lu)
1230  {
1231  getrs(trans, &nn, &n_rhs, values, &nn, ipiv.data(),
1232  v.begin(), &nn, &info);
1233  }
1234  else if (state == cholesky)
1235  {
1236  potrs(&LAPACKSupport::L, &nn, &n_rhs,
1237  values, &nn, v.begin(), &nn, &info);
1238  }
1239  else if (property == upper_triangular ||
1240  property == lower_triangular)
1241  {
1242  const char uplo = (property == upper_triangular ? LAPACKSupport::U : LAPACKSupport::L);
1243 
1244  const types::blas_int lda = nn;
1245  const types::blas_int ldb = nn;
1246  trtrs (&uplo, trans, "N",
1247  &nn, &n_rhs,
1248  values, &lda,
1249  v.begin(), &ldb, &info);
1250  }
1251  else
1252  {
1253  Assert(false,
1254  ExcMessage("The matrix has to be either factorized or triangular."));
1255  }
1256 
1257  Assert(info == 0, ExcInternalError());
1258 }
1259 
1260 
1261 
1262 template <typename number>
1263 void
1265  const bool transposed) const
1266 {
1267  Assert(B.state == matrix, ExcState(B.state));
1268 
1269  Assert(this->m() == this->n(),
1271  AssertDimension(this->m(), B.m());
1272  const char *trans = transposed ? &T : &N;
1273  const types::blas_int nn = this->n();
1274  const number *const values = &this->values[0];
1275  const types::blas_int n_rhs = B.n();
1276  types::blas_int info = 0;
1277 
1278  if (state == lu)
1279  {
1280  getrs(trans, &nn, &n_rhs, values, &nn, ipiv.data(),
1281  &B.values[0], &nn, &info);
1282  }
1283  else if (state == cholesky)
1284  {
1285  potrs(&LAPACKSupport::L, &nn, &n_rhs,
1286  values, &nn, &B.values[0], &nn, &info);
1287  }
1288  else if (property == upper_triangular ||
1289  property == lower_triangular)
1290  {
1291  const char uplo = (property == upper_triangular ? LAPACKSupport::U : LAPACKSupport::L);
1292 
1293  const types::blas_int lda = nn;
1294  const types::blas_int ldb = nn;
1295  trtrs (&uplo, trans, "N",
1296  &nn, &n_rhs,
1297  values, &lda,
1298  &B.values[0], &ldb, &info);
1299  }
1300  else
1301  {
1302  Assert(false,
1303  ExcMessage("The matrix has to be either factorized or triangular."));
1304  }
1305 
1306  Assert(info == 0, ExcInternalError());
1307 }
1308 
1309 
1310 
1311 template <typename number>
1312 void
1314  const bool transposed) const
1315 {
1316  solve(v,transposed);
1317 }
1318 
1319 
1320 
1321 template <typename number>
1322 void
1324  const bool transposed) const
1325 {
1326  solve(B,transposed);
1327 }
1328 
1329 
1330 
1331 template <typename number>
1332 number
1334 {
1335  Assert(this->m() == this->n(), LACExceptions::ExcNotQuadratic());
1336 
1337  // LAPACK doesn't offer a function to compute a matrix determinant.
1338  // This is due to the difficulty in maintaining numerical accuracy, as the
1339  // calculations are likely to overflow or underflow. See
1340  // http://www.netlib.org/lapack/faq.html#_are_there_routines_in_lapack_to_compute_determinants
1341  //
1342  // However, after a PLU decomposition one can compute this by multiplication
1343  // of the diagonal entries with one another. One must take into consideration
1344  // the number of permutations (row swaps) stored in the P matrix.
1345  //
1346  // See the implementations in the blaze library (detNxN)
1347  // https://bitbucket.org/blaze-lib/blaze
1348  // and also
1349  // https://dualm.wordpress.com/2012/01/06/computing-determinant-in-fortran/
1350  // http://icl.cs.utk.edu/lapack-forum/viewtopic.php?p=341&#p336
1351  // for further information.
1352  Assert(state == lu, ExcState(state));
1353  Assert(ipiv.size() == this->m(), ExcInternalError());
1354  number det = 1.0;
1355  for (size_type i=0; i<this->m(); ++i)
1356  det *= ( ipiv[i] == types::blas_int(i+1) ? this->el(i,i) : -this->el(i,i) );
1357  return det;
1358 }
1359 
1360 
1361 template <typename number>
1362 void
1364  const bool left)
1365 {
1366  Assert(state == matrix, ExcState(state));
1367  const types::blas_int nn = this->n();
1368  wr.resize(nn);
1369  wi.resize(nn);
1370  if (right) vr.resize(nn*nn);
1371  if (left) vl.resize(nn*nn);
1372 
1373  number *const values = &this->values[0];
1374 
1375  types::blas_int info = 0;
1376  types::blas_int lwork = 1;
1377  const char *const jobvr = (right) ? (&V) : (&N);
1378  const char *const jobvl = (left) ? (&V) : (&N);
1379 
1380  /*
1381  * The LAPACK routine xGEEV requires a sufficiently large work array; the
1382  * minimum requirement is
1383  *
1384  * work.size >= 4*nn.
1385  *
1386  * However, for better performance, a larger work array may be needed. The
1387  * first call determines the optimal work size and the second does the work.
1388  */
1389  lwork = -1;
1390  work.resize(1);
1391 
1392  geev(jobvl, jobvr, &nn, values, &nn,
1393  wr.data(), wi.data(),
1394  vl.data(), &nn, vr.data(), &nn,
1395  work.data(), &lwork, &info);
1396  // geev returns info=0 on success. Since we only queried the optimal size
1397  // for work, everything else would not be acceptable.
1398  Assert (info == 0, ExcInternalError());
1399  // Allocate working array according to suggestion (same strategy as was
1400  // noted in compute_svd).
1401  lwork = static_cast<types::blas_int>(std::abs(work[0]) + 1);
1402 
1403  // resize workspace array
1404  work.resize((size_type ) lwork);
1405 
1406  // Finally compute the eigenvalues.
1407  geev(jobvl, jobvr, &nn, values, &nn,
1408  wr.data(), wi.data(),
1409  vl.data(), &nn, vr.data(), &nn,
1410  work.data(), &lwork, &info);
1411  // Negative return value implies a wrong argument. This should be internal.
1412 
1413  Assert (info >=0, ExcInternalError());
1414 //TODO:[GK] What if the QR method fails?
1415  if (info != 0)
1416  std::cerr << "LAPACK error in geev" << std::endl;
1417 
1419 }
1420 
1421 
1422 template <typename number>
1423 void
1425  const number upper_bound,
1426  const number abs_accuracy,
1429 {
1430  Assert(state == matrix, ExcState(state));
1431  const types::blas_int nn = (this->n() > 0 ? this->n() : 1);
1432  Assert(static_cast<size_type>(nn) == this->m(), ExcNotQuadratic());
1433 
1434  wr.resize(nn);
1435  LAPACKFullMatrix<number> matrix_eigenvectors(nn, nn);
1436 
1437  number *const values_A = &this->values[0];
1438  number *const values_eigenvectors = &matrix_eigenvectors.values[0];
1439 
1440  types::blas_int info(0),
1441  lwork(-1),
1442  n_eigenpairs(0);
1443  const char *const jobz(&V);
1444  const char *const uplo(&U);
1445  const char *const range(&V);
1446  const types::blas_int *const dummy(&one);
1447  std::vector<types::blas_int> iwork(static_cast<size_type> (5*nn));
1448  std::vector<types::blas_int> ifail(static_cast<size_type> (nn));
1449 
1450 
1451  /*
1452  * The LAPACK routine xSYEVX requires a sufficiently large work array; the
1453  * minimum requirement is
1454  *
1455  * work.size >= 8*nn.
1456  *
1457  * However, for better performance, a larger work array may be needed. The
1458  * first call determines the optimal work size and the second does the work.
1459  */
1460  work.resize(1);
1461 
1462  syevx (jobz, range,
1463  uplo, &nn, values_A, &nn,
1464  &lower_bound, &upper_bound,
1465  dummy, dummy, &abs_accuracy,
1466  &n_eigenpairs, wr.data(), values_eigenvectors,
1467  &nn, work.data(), &lwork, iwork.data(),
1468  ifail.data(), &info);
1469  // syevx returns info=0 on success. Since we only queried the optimal size
1470  // for work, everything else would not be acceptable.
1471  Assert (info == 0, ExcInternalError());
1472  // Allocate working array according to suggestion (same strategy as was noted in
1473  // compute_svd).
1474  lwork = static_cast<types::blas_int>(std::abs(work[0]) + 1);
1475  work.resize(static_cast<size_type> (lwork));
1476 
1477  // Finally compute the eigenvalues.
1478  syevx (jobz, range,
1479  uplo, &nn, values_A, &nn,
1480  &lower_bound, &upper_bound,
1481  dummy, dummy, &abs_accuracy,
1482  &n_eigenpairs, wr.data(), values_eigenvectors,
1483  &nn, work.data(), &lwork, iwork.data(),
1484  ifail.data(), &info);
1485 
1486  // Negative return value implies a wrong argument. This should be internal.
1487  Assert (info >=0, ExcInternalError());
1488  if (info != 0)
1489  std::cerr << "LAPACK error in syevx" << std::endl;
1490 
1491  eigenvalues.reinit(n_eigenpairs);
1492  eigenvectors.reinit(nn, n_eigenpairs, true);
1493 
1494  for (size_type i=0; i < static_cast<size_type> (n_eigenpairs); ++i)
1495  {
1496  eigenvalues(i) = wr[i];
1497  size_type col_begin(i*nn);
1498  for (size_type j=0; j < static_cast<size_type> (nn); ++j)
1499  {
1500  eigenvectors(j,i) = values_eigenvectors[col_begin+j];
1501  }
1502  }
1503 
1504  state = LAPACKSupport::State(unusable);
1505 }
1506 
1507 
1508 template <typename number>
1509 void
1512  const number lower_bound,
1513  const number upper_bound,
1514  const number abs_accuracy,
1516  std::vector<Vector<number> > &eigenvectors,
1517  const types::blas_int itype)
1518 {
1519  Assert(state == matrix, ExcState(state));
1520  const types::blas_int nn = (this->n() > 0 ? this->n() : 1);
1521  Assert(static_cast<size_type>(nn) == this->m(), ExcNotQuadratic());
1522  Assert(B.m() == B.n(), ExcNotQuadratic());
1523  Assert(static_cast<size_type>(nn) == B.n(),
1524  ExcDimensionMismatch (nn, B.n()));
1525 
1526  wr.resize(nn);
1527  LAPACKFullMatrix<number> matrix_eigenvectors(nn, nn);
1528 
1529  number *const values_A = &this->values[0];
1530  number *const values_B = &B.values[0];
1531  number *const values_eigenvectors = &matrix_eigenvectors.values[0];
1532 
1533  types::blas_int info(0),
1534  lwork(-1),
1535  n_eigenpairs(0);
1536  const char *const jobz(&V);
1537  const char *const uplo(&U);
1538  const char *const range(&V);
1539  const types::blas_int *const dummy(&one);
1540  iwork.resize(static_cast<size_type> (5*nn));
1541  std::vector<types::blas_int> ifail(static_cast<size_type> (nn));
1542 
1543 
1544  /*
1545  * The LAPACK routine xSYGVX requires a sufficiently large work array; the
1546  * minimum requirement is
1547  *
1548  * work.size >= 8*nn.
1549  *
1550  * However, for better performance, a larger work array may be needed. The
1551  * first call determines the optimal work size and the second does the work.
1552  */
1553  work.resize(1);
1554 
1555  sygvx (&itype, jobz, range, uplo, &nn, values_A, &nn,
1556  values_B, &nn, &lower_bound, &upper_bound,
1557  dummy, dummy, &abs_accuracy, &n_eigenpairs,
1558  wr.data(), values_eigenvectors, &nn, work.data(),
1559  &lwork, iwork.data(), ifail.data(), &info);
1560  // sygvx returns info=0 on success. Since we only queried the optimal size
1561  // for work, everything else would not be acceptable.
1562  Assert (info == 0, ExcInternalError());
1563  // Allocate working array according to suggestion (same strategy as was
1564  // noted in compute_svd).
1565  lwork = static_cast<types::blas_int>(std::abs(work[0]) + 1);
1566 
1567  // resize workspace arrays
1568  work.resize(static_cast<size_type> (lwork));
1569 
1570  // Finally compute the generalized eigenvalues.
1571  sygvx (&itype, jobz, range, uplo, &nn, values_A, &nn,
1572  values_B, &nn, &lower_bound, &upper_bound,
1573  dummy, dummy, &abs_accuracy, &n_eigenpairs,
1574  wr.data(), values_eigenvectors, &nn, work.data(),
1575  &lwork, iwork.data(), ifail.data(), &info);
1576 
1577  // Negative return value implies a wrong argument. This should be internal.
1578  Assert (info >=0, ExcInternalError());
1579  if (info != 0)
1580  std::cerr << "LAPACK error in sygvx" << std::endl;
1581 
1582  eigenvalues.reinit(n_eigenpairs);
1583  eigenvectors.resize(n_eigenpairs);
1584 
1585  for (size_type i=0; i < static_cast<size_type> (n_eigenpairs); ++i)
1586  {
1587  eigenvalues(i) = wr[i];
1588  size_type col_begin(i*nn);
1589  eigenvectors[i].reinit(nn, true);
1590  for (size_type j=0; j < static_cast<size_type> (nn); ++j)
1591  {
1592  eigenvectors[i](j) = values_eigenvectors[col_begin+j];
1593  }
1594  }
1595 
1596  state = LAPACKSupport::State(unusable);
1597 }
1598 
1599 
1600 template <typename number>
1601 void
1604  std::vector<Vector<number> > &eigenvectors,
1605  const types::blas_int itype)
1606 {
1607  Assert(state == matrix, ExcState(state));
1608  const types::blas_int nn = this->n();
1609  Assert(static_cast<size_type>(nn) == this->m(), ExcNotQuadratic());
1610  Assert(B.m() == B.n(), ExcNotQuadratic());
1611  Assert(static_cast<size_type>(nn) == B.n(),
1612  ExcDimensionMismatch (nn, B.n()));
1613  Assert(eigenvectors.size() <= static_cast<size_type>(nn),
1614  ExcMessage ("eigenvectors.size() > matrix.n()"));
1615 
1616  wr.resize(nn);
1617  wi.resize(nn); //This is set purely for consistency reasons with the
1618  //eigenvalues() function.
1619 
1620  number *const values_A = &this->values[0];
1621  number *const values_B = &B.values[0];
1622 
1623  types::blas_int info = 0;
1624  types::blas_int lwork = -1;
1625  const char *const jobz = (eigenvectors.size() > 0) ? (&V) : (&N);
1626  const char *const uplo = (&U);
1627 
1628  /*
1629  * The LAPACK routine xSYGV requires a sufficiently large work array; the
1630  * minimum requirement is
1631  *
1632  * work.size >= 3*nn - 1.
1633  *
1634  * However, for better performance, a larger work array may be needed. The
1635  * first call determines the optimal work size and the second does the work.
1636  */
1637  work.resize(1);
1638 
1639  sygv (&itype, jobz, uplo, &nn, values_A, &nn,
1640  values_B, &nn,
1641  wr.data(), work.data(), &lwork, &info);
1642  // sygv returns info=0 on success. Since we only queried the optimal size
1643  // for work, everything else would not be acceptable.
1644  Assert (info == 0, ExcInternalError());
1645  // Allocate working array according to suggestion (same strategy as was
1646  // noted in compute_svd).
1647  lwork = static_cast<types::blas_int>(std::abs(work[0]) + 1);
1648 
1649  // resize workspace array
1650  work.resize(static_cast<size_type>(lwork));
1651 
1652  // Finally compute the generalized eigenvalues.
1653  sygv (&itype, jobz, uplo, &nn, values_A, &nn,
1654  values_B, &nn,
1655  wr.data(), work.data(), &lwork, &info);
1656  // Negative return value implies a wrong argument. This should be internal.
1657 
1658  Assert (info >=0, ExcInternalError());
1659  if (info != 0)
1660  std::cerr << "LAPACK error in sygv" << std::endl;
1661 
1662  for (size_type i=0; i < eigenvectors.size(); ++i)
1663  {
1664  size_type col_begin(i*nn);
1665  eigenvectors[i].reinit(nn, true);
1666  for (size_type j=0; j < static_cast<size_type>(nn); ++j)
1667  {
1668  eigenvectors[i](j) = values_A[col_begin+j];
1669  }
1670  }
1672 }
1673 
1674 
1675 template <typename number>
1676 void
1678  std::ostream &out,
1679  const unsigned int precision,
1680  const bool scientific,
1681  const unsigned int width_,
1682  const char *zero_string,
1683  const double denominator,
1684  const double threshold) const
1685 {
1686  unsigned int width = width_;
1687 
1688  Assert ((!this->empty()) || (this->n()+this->m()==0),
1689  ExcInternalError());
1690  Assert (state == LAPACKSupport::matrix ||
1691  state == LAPACKSupport::inverse_matrix ||
1692  state == LAPACKSupport::cholesky,
1693  ExcState(state));
1694 
1695  // set output format, but store old
1696  // state
1697  std::ios::fmtflags old_flags = out.flags();
1698  std::streamsize old_precision = out.precision (precision);
1699 
1700  if (scientific)
1701  {
1702  out.setf (std::ios::scientific, std::ios::floatfield);
1703  if (!width)
1704  width = precision+7;
1705  }
1706  else
1707  {
1708  out.setf (std::ios::fixed, std::ios::floatfield);
1709  if (!width)
1710  width = precision+2;
1711  }
1712 
1713  for (size_type i=0; i<this->m(); ++i)
1714  {
1715  // Cholesky is stored in lower triangular, so just output this part:
1716  const size_type nc = state == LAPACKSupport::cholesky ? i+1 : this->n();
1717  for (size_type j=0; j<nc; ++j)
1718  // we might have complex numbers, so use abs also to check for nan
1719  // since there is no isnan on complex numbers
1720  if (std::isnan(std::abs((*this)(i,j))))
1721  out << std::setw(width) << (*this)(i,j) << ' ';
1722  else if (std::abs(this->el(i,j)) > threshold)
1723  out << std::setw(width)
1724  << this->el(i,j) * denominator << ' ';
1725  else
1726  out << std::setw(width) << zero_string << ' ';
1727  out << std::endl;
1728  };
1729 
1730  AssertThrow (out, ExcIO());
1731  // reset output format
1732  out.flags (old_flags);
1733  out.precision(old_precision);
1734 }
1735 
1736 
1737 //----------------------------------------------------------------------//
1738 
1739 template <typename number>
1740 void
1742 {
1743  matrix = &M;
1744  mem = nullptr;
1745 }
1746 
1747 
1748 template <typename number>
1749 void
1752 {
1753  matrix = &M;
1754  mem = &V;
1755 }
1756 
1757 
1758 template <typename number>
1759 void
1761  const Vector<number> &src) const
1762 {
1763  dst = src;
1764  matrix->apply_lu_factorization(dst, false);
1765 }
1766 
1767 
1768 template <typename number>
1769 void
1771  const Vector<number> &src) const
1772 {
1773  dst = src;
1774  matrix->apply_lu_factorization(dst, true);
1775 }
1776 
1777 
1778 template <typename number>
1779 void
1781  const BlockVector<number> &src) const
1782 {
1783  Assert(mem != nullptr, ExcNotInitialized());
1784  Vector<number> *aux = mem->alloc();
1785  *aux = src;
1786  matrix->apply_lu_factorization(*aux, false);
1787  dst = *aux;
1788 }
1789 
1790 
1791 template <typename number>
1792 void
1794  const BlockVector<number> &src) const
1795 {
1796  Assert(mem != nullptr, ExcNotInitialized());
1797  Vector<number> *aux = mem->alloc();
1798  *aux = src;
1799  matrix->apply_lu_factorization(*aux, true);
1800  dst = *aux;
1801 }
1802 
1803 
1804 
1805 #include "lapack_full_matrix.inst"
1806 
1807 
1808 DEAL_II_NAMESPACE_CLOSE
Matrix is symmetric.
size_type m() const
LAPACKFullMatrix(const size_type size=0)
static const char L
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1248
void rank1_update(const number a, const Vector< number > &v)
void TmTmult(LAPACKFullMatrix< number > &C, const LAPACKFullMatrix< number > &B, const bool adding=false) const
LAPACKFullMatrix< number > & operator/=(const number factor)
int blas_int
Contents is actually a matrix.
std::array< NumberType, 3 > givens_rotation(const NumberType &x, const NumberType &y)
void Tvmult(Vector< number2 > &w, const Vector< number2 > &v, const bool adding=false) const
static ::ExceptionBase & ExcIO()
LAPACKSupport::State state
void remove_row_and_column(const size_type row, const size_type col)
std::array< std::pair< Number, Tensor< 1, dim, Number > >, std::integral_constant< int, dim >::value > eigenvectors(const SymmetricTensor< 2, dim, Number > &T, const SymmetricTensorEigenvectorMethod method=SymmetricTensorEigenvectorMethod::ql_implicit_shifts)
static ::ExceptionBase & ExcScalarAssignmentOnlyForZeroValue()
Matrix is upper triangular.
size_type n() const
Contents is the inverse of a matrix.
#define AssertIndexRange(index, range)
Definition: exceptions.h:1284
size_type m() const
void compute_generalized_eigenvalues_symmetric(LAPACKFullMatrix< number > &B, const number lower_bound, const number upper_bound, const number abs_accuracy, Vector< number > &eigenvalues, std::vector< Vector< number > > &eigenvectors, const types::blas_int itype=1)
TableBase< N, T > & operator=(const TableBase< N, T > &src)
static const char U
static ::ExceptionBase & ExcNotInitialized()
#define AssertThrow(cond, exc)
Definition: exceptions.h:1221
std::array< NumberType, 3 > hyperbolic_rotation(const NumberType &x, const NumberType &y)
void compute_eigenvalues_symmetric(const number lower_bound, const number upper_bound, const number abs_accuracy, Vector< number > &eigenvalues, FullMatrix< number > &eigenvectors)
void reinit(const size_type size)
number norm(const char type) const
void resize(const size_type size_in)
void Tmmult(LAPACKFullMatrix< number > &C, const LAPACKFullMatrix< number > &B, const bool adding=false) const
static ::ExceptionBase & ExcSingular()
size_type n() const
static ::ExceptionBase & ExcState(State arg1)
static ::ExceptionBase & ExcMessage(std::string arg1)
void mTmult(LAPACKFullMatrix< number > &C, const LAPACKFullMatrix< number > &B, const bool adding=false) const
TableBase< 2, T >::size_type size_type
Definition: table.h:1720
Contents is a Cholesky decomposition.
size_type n() const
#define Assert(cond, exc)
Definition: exceptions.h:1142
void compute_inverse_svd_with_kernel(const unsigned int kernel_size)
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
static ::ExceptionBase & ExcErrorCode(char *arg1, types::blas_int arg2)
void add(const number a, const LAPACKFullMatrix< number > &B)
void reinit(const size_type size1, const size_type size2, const bool omit_default_initialization=false)
Contents is something useless.
Matrix is the inverse of a singular value decomposition.
iterator begin()
void vmult(Vector< number2 > &w, const Vector< number2 > &v, const bool adding=false) const
std::unique_ptr< Number[], decltype(&free)> values
Definition: vector.h:936
void print_formatted(std::ostream &out, const unsigned int precision=3, const bool scientific=true, const unsigned int width=0, const char *zero_string=" ", const double denominator=1., const double threshold=0.) const
void grow_or_shrink(const size_type size)
void scale_rows(const Vector< number > &V)
LAPACKFullMatrix< number > & operator*=(const number factor)
void compute_inverse_svd(const double threshold=0.)
static ::ExceptionBase & ExcNotQuadratic()
static const char A
std::make_unsigned< types::blas_int >::type size_type
number linfty_norm() const
LAPACKFullMatrix< number > & operator=(const LAPACKFullMatrix< number > &)
number el(const size_type i, const size_type j) const
number determinant() const
number frobenius_norm() const
void Tvmult_add(Vector< number2 > &w, const Vector< number2 > &v) const
Matrix contains singular value decomposition,.
void set_property(const LAPACKSupport::Property property)
size_type size() const
size_type m() const
void solve(Vector< number > &v, const bool transposed=false) const
No special properties.
void compute_eigenvalues(const bool right_eigenvectors=false, const bool left_eigenvectors=false)
static ::ExceptionBase & ExcNotImplemented()
void apply_lu_factorization(Vector< number > &v, const bool transposed) const
std::array< Number, 1 > eigenvalues(const SymmetricTensor< 2, 1, Number > &T)
number l1_norm() const
number reciprocal_condition_number() const
Eigenvalue vector is filled.
AlignedVector< number > values
Definition: table.h:648
static ::ExceptionBase & ExcProperty(Property arg1)
LAPACKSupport::Property property
static ::ExceptionBase & ExcZero()
Matrix is lower triangular.
#define AssertIsFinite(number)
Definition: exceptions.h:1299
void vmult_add(Vector< number2 > &w, const Vector< number2 > &v) const
void mmult(LAPACKFullMatrix< number > &C, const LAPACKFullMatrix< number > &B, const bool adding=false) const
number trace() const
static bool equal(const T *p1, const T *p2)
static ::ExceptionBase & ExcInternalError()
Contents is an LU decomposition.