Reference documentation for deal.II version 9.1.1
\(\newcommand{\dealcoloneq}{\mathrel{\vcenter{:}}=}\)
trilinos_sparse_matrix.cc
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2008 - 2019 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 #include <deal.II/lac/trilinos_index_access.h>
17 #include <deal.II/lac/trilinos_sparse_matrix.h>
18 
19 #ifdef DEAL_II_WITH_TRILINOS
20 
21 # include <deal.II/base/utilities.h>
22 
23 # include <deal.II/lac/dynamic_sparsity_pattern.h>
24 # include <deal.II/lac/la_parallel_vector.h>
25 # include <deal.II/lac/sparse_matrix.h>
26 # include <deal.II/lac/sparsity_pattern.h>
27 # include <deal.II/lac/sparsity_tools.h>
28 # include <deal.II/lac/trilinos_index_access.h>
29 # include <deal.II/lac/trilinos_precondition.h>
30 # include <deal.II/lac/trilinos_sparsity_pattern.h>
31 
32 # include <boost/container/small_vector.hpp>
33 
34 # ifdef DEAL_II_TRILINOS_WITH_EPETRAEXT
35 # include <EpetraExt_MatrixMatrix.h>
36 # endif
37 # include <Epetra_Export.h>
38 # include <Teuchos_RCP.hpp>
39 # include <ml_epetra_utils.h>
40 # include <ml_struct.h>
41 
42 # include <memory>
43 
44 DEAL_II_NAMESPACE_OPEN
45 
46 namespace TrilinosWrappers
47 {
48  namespace internal
49  {
50  template <typename VectorType>
51  typename VectorType::value_type *
52  begin(VectorType &V)
53  {
54  return V.begin();
55  }
56 
57  template <typename VectorType>
58  const typename VectorType::value_type *
59  begin(const VectorType &V)
60  {
61  return V.begin();
62  }
63 
64  template <typename VectorType>
65  typename VectorType::value_type *
66  end(VectorType &V)
67  {
68  return V.end();
69  }
70 
71  template <typename VectorType>
72  const typename VectorType::value_type *
73  end(const VectorType &V)
74  {
75  return V.end();
76  }
77 
78 # ifdef DEAL_II_WITH_MPI
79  template <>
80  double *
82  {
83  return V.trilinos_vector()[0];
84  }
85 
86  template <>
87  const double *
89  {
90  return V.trilinos_vector()[0];
91  }
92 
93  template <>
94  double *
96  {
97  return V.trilinos_vector()[0] + V.trilinos_vector().MyLength();
98  }
99 
100  template <>
101  const double *
103  {
104  return V.trilinos_vector()[0] + V.trilinos_vector().MyLength();
105  }
106 
107 # ifdef DEAL_II_TRILINOS_WITH_TPETRA
108  template <typename Number>
109  Number *
110  begin(LinearAlgebra::TpetraWrappers::Vector<Number> &V)
111  {
112  return V.trilinos_vector().getDataNonConst().get();
113  }
114 
115  template <typename Number>
116  const Number *
117  begin(const LinearAlgebra::TpetraWrappers::Vector<Number> &V)
118  {
119  return V.trilinos_vector().getData().get();
120  }
121 
122  template <typename Number>
123  Number *
124  end(LinearAlgebra::TpetraWrappers::Vector<Number> &V)
125  {
126  return V.trilinos_vector().getDataNonConst().get() +
127  V.trilinos_vector().getLocalLength();
128  }
129 
130  template <typename Number>
131  const Number *
132  end(const LinearAlgebra::TpetraWrappers::Vector<Number> &V)
133  {
134  return V.trilinos_vector().getData().get() +
135  V.trilinos_vector().getLocalLength();
136  }
137 # endif
138 # endif
139  } // namespace internal
140 
141 
142  namespace SparseMatrixIterators
143  {
144  void
146  {
147  // if we are asked to visit the past-the-end line, then simply
148  // release all our caches and go on with life.
149  //
150  // do the same if the row we're supposed to visit is not locally
151  // owned. this is simply going to make non-locally owned rows
152  // look like they're empty
153  if ((this->a_row == matrix->m()) ||
154  (matrix->in_local_range(this->a_row) == false))
155  {
156  colnum_cache.reset();
157  value_cache.reset();
158 
159  return;
160  }
161 
162  // get a representation of the present row
163  int ncols;
164  TrilinosWrappers::types::int_type colnums = matrix->n();
165  if (value_cache.get() == nullptr)
166  {
167  value_cache =
168  std::make_shared<std::vector<TrilinosScalar>>(matrix->n());
169  colnum_cache = std::make_shared<std::vector<size_type>>(matrix->n());
170  }
171  else
172  {
173  value_cache->resize(matrix->n());
174  colnum_cache->resize(matrix->n());
175  }
176 
177  int ierr = matrix->trilinos_matrix().ExtractGlobalRowCopy(
178  this->a_row,
179  colnums,
180  ncols,
181  value_cache->data(),
182  reinterpret_cast<TrilinosWrappers::types::int_type *>(
183  colnum_cache->data()));
184  value_cache->resize(ncols);
185  colnum_cache->resize(ncols);
186  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
187 
188  // copy it into our caches if the
189  // line isn't empty. if it is, then
190  // we've done something wrong, since
191  // we shouldn't have initialized an
192  // iterator for an empty line (what
193  // would it point to?)
194  }
195  } // namespace SparseMatrixIterators
196 
197 
198  // The constructor is actually the
199  // only point where we have to check
200  // whether we build a serial or a
201  // parallel Trilinos matrix.
202  // Actually, it does not even matter
203  // how many threads there are, but
204  // only if we use an MPI compiler or
205  // a standard compiler. So, even one
206  // thread on a configuration with
207  // MPI will still get a parallel
208  // interface.
210  : column_space_map(new Epetra_Map(0, 0, Utilities::Trilinos::comm_self()))
211  , matrix(
212  new Epetra_FECrsMatrix(View, *column_space_map, *column_space_map, 0))
213  , last_action(Zero)
214  , compressed(true)
215  {
216  matrix->FillComplete();
217  }
218 
219 
220 
221  SparseMatrix::SparseMatrix(const Epetra_Map &input_map,
222  const size_type n_max_entries_per_row)
223  : column_space_map(new Epetra_Map(input_map))
224  , matrix(new Epetra_FECrsMatrix(Copy,
225  *column_space_map,
226  TrilinosWrappers::types::int_type(
227  n_max_entries_per_row),
228  false))
229  , last_action(Zero)
230  , compressed(false)
231  {}
232 
233 
234 
235  SparseMatrix::SparseMatrix(const Epetra_Map & input_map,
236  const std::vector<unsigned int> &n_entries_per_row)
237  : column_space_map(new Epetra_Map(input_map))
238  , matrix(new Epetra_FECrsMatrix(Copy,
239  *column_space_map,
240  reinterpret_cast<int *>(
241  const_cast<unsigned int *>(
242  n_entries_per_row.data())),
243  false))
244  , last_action(Zero)
245  , compressed(false)
246  {}
247 
248 
249 
250  SparseMatrix::SparseMatrix(const Epetra_Map &input_row_map,
251  const Epetra_Map &input_col_map,
252  const size_type n_max_entries_per_row)
253  : column_space_map(new Epetra_Map(input_col_map))
254  , matrix(new Epetra_FECrsMatrix(Copy,
255  input_row_map,
256  TrilinosWrappers::types::int_type(
257  n_max_entries_per_row),
258  false))
259  , last_action(Zero)
260  , compressed(false)
261  {}
262 
263 
264 
265  SparseMatrix::SparseMatrix(const Epetra_Map & input_row_map,
266  const Epetra_Map & input_col_map,
267  const std::vector<unsigned int> &n_entries_per_row)
268  : column_space_map(new Epetra_Map(input_col_map))
269  , matrix(new Epetra_FECrsMatrix(Copy,
270  input_row_map,
271  reinterpret_cast<int *>(
272  const_cast<unsigned int *>(
273  n_entries_per_row.data())),
274  false))
275  , last_action(Zero)
276  , compressed(false)
277  {}
278 
279 
280 
282  const size_type n,
283  const unsigned int n_max_entries_per_row)
284  : column_space_map(
285  new Epetra_Map(static_cast<TrilinosWrappers::types::int_type>(n),
286  0,
287  Utilities::Trilinos::comm_self()))
288  ,
289 
290  // on one processor only, we know how the
291  // columns of the matrix will be
292  // distributed (everything on one
293  // processor), so we can hand in this
294  // information to the constructor. we
295  // can't do so in parallel, where the
296  // information from columns is only
297  // available when entries have been added
298  matrix(new Epetra_FECrsMatrix(
299  Copy,
300  Epetra_Map(static_cast<TrilinosWrappers::types::int_type>(m),
301  0,
302  Utilities::Trilinos::comm_self()),
303  *column_space_map,
304  n_max_entries_per_row,
305  false))
306  , last_action(Zero)
307  , compressed(false)
308  {}
309 
310 
311 
313  const size_type n,
314  const std::vector<unsigned int> &n_entries_per_row)
315  : column_space_map(
316  new Epetra_Map(static_cast<TrilinosWrappers::types::int_type>(n),
317  0,
318  Utilities::Trilinos::comm_self()))
319  , matrix(new Epetra_FECrsMatrix(
320  Copy,
321  Epetra_Map(static_cast<TrilinosWrappers::types::int_type>(m),
322  0,
323  Utilities::Trilinos::comm_self()),
324  *column_space_map,
325  reinterpret_cast<int *>(
326  const_cast<unsigned int *>(n_entries_per_row.data())),
327  false))
328  , last_action(Zero)
329  , compressed(false)
330  {}
331 
332 
333 
334  SparseMatrix::SparseMatrix(const IndexSet & parallel_partitioning,
335  const MPI_Comm & communicator,
336  const unsigned int n_max_entries_per_row)
337  : column_space_map(new Epetra_Map(
338  parallel_partitioning.make_trilinos_map(communicator, false)))
339  , matrix(new Epetra_FECrsMatrix(Copy,
340  *column_space_map,
341  n_max_entries_per_row,
342  false))
343  , last_action(Zero)
344  , compressed(false)
345  {}
346 
347 
348 
349  SparseMatrix::SparseMatrix(const IndexSet &parallel_partitioning,
350  const MPI_Comm &communicator,
351  const std::vector<unsigned int> &n_entries_per_row)
352  : column_space_map(new Epetra_Map(
353  parallel_partitioning.make_trilinos_map(communicator, false)))
354  , matrix(new Epetra_FECrsMatrix(Copy,
355  *column_space_map,
356  reinterpret_cast<int *>(
357  const_cast<unsigned int *>(
358  n_entries_per_row.data())),
359  false))
360  , last_action(Zero)
361  , compressed(false)
362  {}
363 
364 
365 
366  SparseMatrix::SparseMatrix(const IndexSet &row_parallel_partitioning,
367  const IndexSet &col_parallel_partitioning,
368  const MPI_Comm &communicator,
369  const size_type n_max_entries_per_row)
370  : column_space_map(new Epetra_Map(
371  col_parallel_partitioning.make_trilinos_map(communicator, false)))
372  , matrix(new Epetra_FECrsMatrix(
373  Copy,
374  row_parallel_partitioning.make_trilinos_map(communicator, false),
375  n_max_entries_per_row,
376  false))
377  , last_action(Zero)
378  , compressed(false)
379  {}
380 
381 
382 
383  SparseMatrix::SparseMatrix(const IndexSet &row_parallel_partitioning,
384  const IndexSet &col_parallel_partitioning,
385  const MPI_Comm &communicator,
386  const std::vector<unsigned int> &n_entries_per_row)
387  : column_space_map(new Epetra_Map(
388  col_parallel_partitioning.make_trilinos_map(communicator, false)))
389  , matrix(new Epetra_FECrsMatrix(
390  Copy,
391  row_parallel_partitioning.make_trilinos_map(communicator, false),
392  reinterpret_cast<int *>(
393  const_cast<unsigned int *>(n_entries_per_row.data())),
394  false))
395  , last_action(Zero)
396  , compressed(false)
397  {}
398 
399 
400 
402  : column_space_map(new Epetra_Map(sparsity_pattern.domain_partitioner()))
403  , matrix(
404  new Epetra_FECrsMatrix(Copy,
405  sparsity_pattern.trilinos_sparsity_pattern(),
406  false))
407  , last_action(Zero)
408  , compressed(true)
409  {
410  Assert(sparsity_pattern.trilinos_sparsity_pattern().Filled() == true,
411  ExcMessage(
412  "The Trilinos sparsity pattern has not been compressed."));
414  }
415 
416 
417 
419  : column_space_map(std::move(other.column_space_map))
420  , matrix(std::move(other.matrix))
421  , nonlocal_matrix(std::move(other.nonlocal_matrix))
422  , nonlocal_matrix_exporter(std::move(other.nonlocal_matrix_exporter))
423  , last_action(other.last_action)
424  , compressed(other.compressed)
425  {
426  other.last_action = Zero;
427  other.compressed = false;
428  }
429 
430 
431 
432  void
434  {
435  if (this == &rhs)
436  return;
437 
438  nonlocal_matrix.reset();
439  nonlocal_matrix_exporter.reset();
440 
441  // check whether we need to update the whole matrix layout (we have
442  // different maps or if we detect a row where the columns of the two
443  // matrices do not match)
444  bool needs_deep_copy =
445  !matrix->RowMap().SameAs(rhs.matrix->RowMap()) ||
446  !matrix->ColMap().SameAs(rhs.matrix->ColMap()) ||
447  !matrix->DomainMap().SameAs(rhs.matrix->DomainMap()) ||
449  if (!needs_deep_copy)
450  {
451  // Try to copy all the rows of the matrix one by one. In case of error
452  // (i.e., the column indices are different), we need to abort and blow
453  // away the matrix.
454  for (const auto row : locally_owned_range_indices())
455  {
456  const int row_local = matrix->RowMap().LID(
457  static_cast<TrilinosWrappers::types::int_type>(row));
458  Assert((row_local >= 0), ExcAccessToNonlocalRow(row));
459 
460  int n_entries, rhs_n_entries;
461  TrilinosScalar *value_ptr, *rhs_value_ptr;
462  int * index_ptr, *rhs_index_ptr;
463  int ierr = rhs.matrix->ExtractMyRowView(row_local,
464  rhs_n_entries,
465  rhs_value_ptr,
466  rhs_index_ptr);
467  (void)ierr;
468  Assert(ierr == 0, ExcTrilinosError(ierr));
469 
470  ierr = matrix->ExtractMyRowView(row_local,
471  n_entries,
472  value_ptr,
473  index_ptr);
474  Assert(ierr == 0, ExcTrilinosError(ierr));
475 
476  if (n_entries != rhs_n_entries ||
477  std::memcmp(static_cast<void *>(index_ptr),
478  static_cast<void *>(rhs_index_ptr),
479  sizeof(int) * n_entries) != 0)
480  {
481  needs_deep_copy = true;
482  break;
483  }
484 
485  for (int i = 0; i < n_entries; ++i)
486  value_ptr[i] = rhs_value_ptr[i];
487  }
488  }
489 
490  if (needs_deep_copy)
491  {
493  std_cxx14::make_unique<Epetra_Map>(rhs.domain_partitioner());
494 
495  // release memory before reallocation
496  matrix = std_cxx14::make_unique<Epetra_FECrsMatrix>(*rhs.matrix);
497 
498  matrix->FillComplete(*column_space_map, matrix->RowMap());
499  }
500 
501  if (rhs.nonlocal_matrix.get() != nullptr)
503  std_cxx14::make_unique<Epetra_CrsMatrix>(Copy,
504  rhs.nonlocal_matrix->Graph());
505  }
506 
507 
508 
509  namespace
510  {
511  using size_type = SparseMatrix::size_type;
512 
513  template <typename SparsityPatternType>
514  void
515  reinit_matrix(const Epetra_Map & input_row_map,
516  const Epetra_Map & input_col_map,
517  const SparsityPatternType & sparsity_pattern,
518  const bool exchange_data,
519  std::unique_ptr<Epetra_Map> & column_space_map,
520  std::unique_ptr<Epetra_FECrsMatrix> &matrix,
521  std::unique_ptr<Epetra_CrsMatrix> & nonlocal_matrix,
522  std::unique_ptr<Epetra_Export> & nonlocal_matrix_exporter)
523  {
524  // release memory before reallocation
525  matrix.reset();
526  nonlocal_matrix.reset();
527  nonlocal_matrix_exporter.reset();
528 
529  if (input_row_map.Comm().MyPID() == 0)
530  {
531  AssertDimension(sparsity_pattern.n_rows(),
532  TrilinosWrappers::n_global_elements(input_row_map));
533  AssertDimension(sparsity_pattern.n_cols(),
534  TrilinosWrappers::n_global_elements(input_col_map));
535  }
536 
537  column_space_map = std_cxx14::make_unique<Epetra_Map>(input_col_map);
538 
539  // if we want to exchange data, build a usual Trilinos sparsity pattern
540  // and let that handle the exchange. otherwise, manually create a
541  // CrsGraph, which consumes considerably less memory because it can set
542  // correct number of indices right from the start
543  if (exchange_data)
544  {
545  SparsityPattern trilinos_sparsity;
546  trilinos_sparsity.reinit(input_row_map,
547  input_col_map,
548  sparsity_pattern,
549  exchange_data);
550  matrix = std_cxx14::make_unique<Epetra_FECrsMatrix>(
551  Copy, trilinos_sparsity.trilinos_sparsity_pattern(), false);
552 
553  return;
554  }
555 
556  const size_type first_row = TrilinosWrappers::min_my_gid(input_row_map),
557  last_row =
558  TrilinosWrappers::max_my_gid(input_row_map) + 1;
559  std::vector<int> n_entries_per_row(last_row - first_row);
560 
561  for (size_type row = first_row; row < last_row; ++row)
562  n_entries_per_row[row - first_row] = sparsity_pattern.row_length(row);
563 
564  // The deal.II notation of a Sparsity pattern corresponds to the Epetra
565  // concept of a Graph. Hence, we generate a graph by copying the
566  // sparsity pattern into it, and then build up the matrix from the
567  // graph. This is considerable faster than directly filling elements
568  // into the matrix. Moreover, it consumes less memory, since the
569  // internal reordering is done on ints only, and we can leave the
570  // doubles aside.
571 
572  // for more than one processor, need to specify only row map first and
573  // let the matrix entries decide about the column map (which says which
574  // columns are present in the matrix, not to be confused with the
575  // col_map that tells how the domain dofs of the matrix will be
576  // distributed). for only one processor, we can directly assign the
577  // columns as well. Compare this with bug # 4123 in the Sandia Bugzilla.
578  std::unique_ptr<Epetra_CrsGraph> graph;
579  if (input_row_map.Comm().NumProc() > 1)
580  graph = std_cxx14::make_unique<Epetra_CrsGraph>(
581  Copy, input_row_map, n_entries_per_row.data(), true);
582  else
583  graph = std_cxx14::make_unique<Epetra_CrsGraph>(
584  Copy, input_row_map, input_col_map, n_entries_per_row.data(), true);
585 
586  // This functions assumes that the sparsity pattern sits on all
587  // processors (completely). The parallel version uses an Epetra graph
588  // that is already distributed.
589 
590  // now insert the indices
591  std::vector<TrilinosWrappers::types::int_type> row_indices;
592 
593  for (size_type row = first_row; row < last_row; ++row)
594  {
595  const int row_length = sparsity_pattern.row_length(row);
596  if (row_length == 0)
597  continue;
598 
599  row_indices.resize(row_length, -1);
600  {
601  typename SparsityPatternType::iterator p =
602  sparsity_pattern.begin(row);
603  for (size_type col = 0; p != sparsity_pattern.end(row); ++p, ++col)
604  row_indices[col] = p->column();
605  }
606  graph->Epetra_CrsGraph::InsertGlobalIndices(row,
607  row_length,
608  row_indices.data());
609  }
610 
611  // Eventually, optimize the graph structure (sort indices, make memory
612  // contiguous, etc). note that the documentation of the function indeed
613  // states that we first need to provide the column (domain) map and then
614  // the row (range) map
615  graph->FillComplete(input_col_map, input_row_map);
616  graph->OptimizeStorage();
617 
618  // check whether we got the number of columns right.
619  AssertDimension(sparsity_pattern.n_cols(),
621  (void)n_global_cols;
622 
623  // And now finally generate the matrix.
624  matrix = std_cxx14::make_unique<Epetra_FECrsMatrix>(Copy, *graph, false);
625  }
626 
627 
628 
629  // for the non-local graph, we need to circumvent the problem that some
630  // processors will not add into the non-local graph at all: We do not want
631  // to insert dummy elements on >5000 processors because that gets very
632  // slow. Thus, we set a flag in Epetra_CrsGraph that sets the correct
633  // flag. Since it is protected, we need to expose this information by
634  // deriving a class from Epetra_CrsGraph for the purpose of creating the
635  // data structure
636  class Epetra_CrsGraphMod : public Epetra_CrsGraph
637  {
638  public:
639  Epetra_CrsGraphMod(const Epetra_Map &row_map,
640  const int * n_entries_per_row)
641  : Epetra_CrsGraph(Copy, row_map, n_entries_per_row, true)
642  {}
643 
644  void
645  SetIndicesAreGlobal()
646  {
647  this->Epetra_CrsGraph::SetIndicesAreGlobal(true);
648  }
649  };
650 
651 
652  // specialization for DynamicSparsityPattern which can provide us with
653  // more information about the non-locally owned rows
654  template <>
655  void
656  reinit_matrix(const Epetra_Map & input_row_map,
657  const Epetra_Map & input_col_map,
658  const DynamicSparsityPattern & sparsity_pattern,
659  const bool exchange_data,
660  std::unique_ptr<Epetra_Map> & column_space_map,
661  std::unique_ptr<Epetra_FECrsMatrix> &matrix,
662  std::unique_ptr<Epetra_CrsMatrix> & nonlocal_matrix,
663  std::unique_ptr<Epetra_Export> & nonlocal_matrix_exporter)
664  {
665  matrix.reset();
666  nonlocal_matrix.reset();
667  nonlocal_matrix_exporter.reset();
668 
669  AssertDimension(sparsity_pattern.n_rows(),
670  TrilinosWrappers::n_global_elements(input_row_map));
671  AssertDimension(sparsity_pattern.n_cols(),
672  TrilinosWrappers::n_global_elements(input_col_map));
673 
674  column_space_map = std_cxx14::make_unique<Epetra_Map>(input_col_map);
675 
676  IndexSet relevant_rows(sparsity_pattern.row_index_set());
677  // serial case
678  if (relevant_rows.size() == 0)
679  {
680  relevant_rows.set_size(
681  TrilinosWrappers::n_global_elements(input_row_map));
682  relevant_rows.add_range(
683  0, TrilinosWrappers::n_global_elements(input_row_map));
684  }
685  relevant_rows.compress();
686  Assert(relevant_rows.n_elements() >=
687  static_cast<unsigned int>(input_row_map.NumMyElements()),
688  ExcMessage(
689  "Locally relevant rows of sparsity pattern must contain "
690  "all locally owned rows"));
691 
692  // check whether the relevant rows correspond to exactly the same map as
693  // the owned rows. In that case, do not create the nonlocal graph and
694  // fill the columns by demand
695  bool have_ghost_rows = false;
696  {
697  std::vector<::types::global_dof_index> indices;
698  relevant_rows.fill_index_vector(indices);
699  Epetra_Map relevant_map(
700  TrilinosWrappers::types::int_type(-1),
701  TrilinosWrappers::types::int_type(relevant_rows.n_elements()),
702  (indices.empty() ?
703  nullptr :
704  reinterpret_cast<TrilinosWrappers::types::int_type *>(
705  indices.data())),
706  0,
707  input_row_map.Comm());
708  if (relevant_map.SameAs(input_row_map))
709  have_ghost_rows = false;
710  else
711  have_ghost_rows = true;
712  }
713 
714  const unsigned int n_rows = relevant_rows.n_elements();
715  std::vector<TrilinosWrappers::types::int_type> ghost_rows;
716  std::vector<int> n_entries_per_row(input_row_map.NumMyElements());
717  std::vector<int> n_entries_per_ghost_row;
718  for (unsigned int i = 0, own = 0; i < n_rows; ++i)
719  {
720  const TrilinosWrappers::types::int_type global_row =
721  relevant_rows.nth_index_in_set(i);
722  if (input_row_map.MyGID(global_row))
723  n_entries_per_row[own++] = sparsity_pattern.row_length(global_row);
724  else if (sparsity_pattern.row_length(global_row) > 0)
725  {
726  ghost_rows.push_back(global_row);
727  n_entries_per_ghost_row.push_back(
728  sparsity_pattern.row_length(global_row));
729  }
730  }
731 
732  Epetra_Map off_processor_map(-1,
733  ghost_rows.size(),
734  (ghost_rows.size() > 0) ?
735  (ghost_rows.data()) :
736  nullptr,
737  0,
738  input_row_map.Comm());
739 
740  std::unique_ptr<Epetra_CrsGraph> graph;
741  std::unique_ptr<Epetra_CrsGraphMod> nonlocal_graph;
742  if (input_row_map.Comm().NumProc() > 1)
743  {
744  graph = std_cxx14::make_unique<Epetra_CrsGraph>(
745  Copy,
746  input_row_map,
747  (n_entries_per_row.size() > 0) ? (n_entries_per_row.data()) :
748  nullptr,
749  exchange_data ? false : true);
750  if (have_ghost_rows == true)
751  nonlocal_graph = std_cxx14::make_unique<Epetra_CrsGraphMod>(
752  off_processor_map, n_entries_per_ghost_row.data());
753  }
754  else
755  graph = std_cxx14::make_unique<Epetra_CrsGraph>(
756  Copy,
757  input_row_map,
758  input_col_map,
759  (n_entries_per_row.size() > 0) ? (n_entries_per_row.data()) : nullptr,
760  true);
761 
762  // now insert the indices, select between the right matrix
763  std::vector<TrilinosWrappers::types::int_type> row_indices;
764 
765  for (unsigned int i = 0; i < n_rows; ++i)
766  {
767  const TrilinosWrappers::types::int_type global_row =
768  relevant_rows.nth_index_in_set(i);
769  const int row_length = sparsity_pattern.row_length(global_row);
770  if (row_length == 0)
771  continue;
772 
773  row_indices.resize(row_length, -1);
774  for (int col = 0; col < row_length; ++col)
775  row_indices[col] = sparsity_pattern.column_number(global_row, col);
776 
777  if (input_row_map.MyGID(global_row))
778  graph->InsertGlobalIndices(global_row,
779  row_length,
780  row_indices.data());
781  else
782  {
783  Assert(nonlocal_graph.get() != nullptr, ExcInternalError());
784  nonlocal_graph->InsertGlobalIndices(global_row,
785  row_length,
786  row_indices.data());
787  }
788  }
789 
790  // finalize nonlocal graph and create nonlocal matrix
791  if (nonlocal_graph.get() != nullptr)
792  {
793  // must make sure the IndicesAreGlobal flag is set on all processors
794  // because some processors might not call InsertGlobalIndices (and
795  // we do not want to insert dummy indices on all processors for
796  // large-scale simulations due to the bad impact on performance)
797  nonlocal_graph->SetIndicesAreGlobal();
798  Assert(nonlocal_graph->IndicesAreGlobal() == true,
799  ExcInternalError());
800  nonlocal_graph->FillComplete(input_col_map, input_row_map);
801  nonlocal_graph->OptimizeStorage();
802 
803  // insert data from nonlocal graph into the final sparsity pattern
804  if (exchange_data)
805  {
806  Epetra_Export exporter(nonlocal_graph->RowMap(), input_row_map);
807  int ierr = graph->Export(*nonlocal_graph, exporter, Add);
808  (void)ierr;
809  Assert(ierr == 0, ExcTrilinosError(ierr));
810  }
811 
812  nonlocal_matrix =
813  std_cxx14::make_unique<Epetra_CrsMatrix>(Copy, *nonlocal_graph);
814  }
815 
816  graph->FillComplete(input_col_map, input_row_map);
817  graph->OptimizeStorage();
818 
819  AssertDimension(sparsity_pattern.n_cols(),
821 
822  matrix = std_cxx14::make_unique<Epetra_FECrsMatrix>(Copy, *graph, false);
823  }
824  } // namespace
825 
826 
827 
828  template <typename SparsityPatternType>
829  void
830  SparseMatrix::reinit(const SparsityPatternType &sparsity_pattern)
831  {
832  const Epetra_Map rows(static_cast<TrilinosWrappers::types::int_type>(
833  sparsity_pattern.n_rows()),
834  0,
836  const Epetra_Map columns(static_cast<TrilinosWrappers::types::int_type>(
837  sparsity_pattern.n_cols()),
838  0,
840 
841  reinit_matrix(rows,
842  columns,
843  sparsity_pattern,
844  false,
846  matrix,
849  }
850 
851 
852 
853  template <typename SparsityPatternType>
854  void
855  SparseMatrix::reinit(const Epetra_Map & input_map,
856  const SparsityPatternType &sparsity_pattern,
857  const bool exchange_data)
858  {
859  reinit_matrix(input_map,
860  input_map,
861  sparsity_pattern,
862  exchange_data,
864  matrix,
867  }
868 
869 
870 
871  template <typename SparsityPatternType>
872  inline void
873  SparseMatrix::reinit(const IndexSet & row_parallel_partitioning,
874  const IndexSet & col_parallel_partitioning,
875  const SparsityPatternType &sparsity_pattern,
876  const MPI_Comm & communicator,
877  const bool exchange_data)
878  {
879  Epetra_Map row_map =
880  row_parallel_partitioning.make_trilinos_map(communicator, false);
881  Epetra_Map col_map =
882  col_parallel_partitioning.make_trilinos_map(communicator, false);
883  reinit_matrix(row_map,
884  col_map,
885  sparsity_pattern,
886  exchange_data,
888  matrix,
891 
892  // In the end, the matrix needs to be compressed in order to be really
893  // ready.
894  last_action = Zero;
896  }
897 
898 
899 
900  template <typename SparsityPatternType>
901  inline void
902  SparseMatrix::reinit(const Epetra_Map & row_map,
903  const Epetra_Map & col_map,
904  const SparsityPatternType &sparsity_pattern,
905  const bool exchange_data)
906  {
907  reinit_matrix(row_map,
908  col_map,
909  sparsity_pattern,
910  exchange_data,
912  matrix,
915 
916  // In the end, the matrix needs to be compressed in order to be really
917  // ready.
918  last_action = Zero;
920  }
921 
922 
923 
924  void
925  SparseMatrix::reinit(const SparsityPattern &sparsity_pattern)
926  {
927  matrix.reset();
928  nonlocal_matrix_exporter.reset();
929 
930  // reinit with a (parallel) Trilinos sparsity pattern.
932  std_cxx14::make_unique<Epetra_Map>(sparsity_pattern.domain_partitioner());
933  matrix = std_cxx14::make_unique<Epetra_FECrsMatrix>(
934  Copy, sparsity_pattern.trilinos_sparsity_pattern(), false);
935 
936  if (sparsity_pattern.nonlocal_graph.get() != nullptr)
937  nonlocal_matrix = std_cxx14::make_unique<Epetra_CrsMatrix>(
938  Copy, *sparsity_pattern.nonlocal_graph);
939  else
940  nonlocal_matrix.reset();
941 
942  last_action = Zero;
944  }
945 
946 
947 
948  void
949  SparseMatrix::reinit(const SparseMatrix &sparse_matrix)
950  {
951  if (this == &sparse_matrix)
952  return;
953 
955  std_cxx14::make_unique<Epetra_Map>(sparse_matrix.domain_partitioner());
956  matrix.reset();
957  nonlocal_matrix_exporter.reset();
958  matrix = std_cxx14::make_unique<Epetra_FECrsMatrix>(
959  Copy, sparse_matrix.trilinos_sparsity_pattern(), false);
960 
961  if (sparse_matrix.nonlocal_matrix != nullptr)
962  nonlocal_matrix = std_cxx14::make_unique<Epetra_CrsMatrix>(
963  Copy, sparse_matrix.nonlocal_matrix->Graph());
964  else
965  nonlocal_matrix.reset();
966 
967  last_action = Zero;
969  }
970 
971 
972 
973  template <typename number>
974  inline void
976  const IndexSet & row_parallel_partitioning,
977  const IndexSet & col_parallel_partitioning,
978  const ::SparseMatrix<number> &dealii_sparse_matrix,
979  const MPI_Comm & communicator,
980  const double drop_tolerance,
981  const bool copy_values,
982  const ::SparsityPattern * use_this_sparsity)
983  {
984  if (copy_values == false)
985  {
986  // in case we do not copy values, just
987  // call the other function.
988  if (use_this_sparsity == nullptr)
989  reinit(row_parallel_partitioning,
990  col_parallel_partitioning,
991  dealii_sparse_matrix.get_sparsity_pattern(),
992  communicator,
993  false);
994  else
995  reinit(row_parallel_partitioning,
996  col_parallel_partitioning,
997  *use_this_sparsity,
998  communicator,
999  false);
1000  return;
1001  }
1002 
1003  const size_type n_rows = dealii_sparse_matrix.m();
1004 
1005  AssertDimension(row_parallel_partitioning.size(), n_rows);
1006  AssertDimension(col_parallel_partitioning.size(), dealii_sparse_matrix.n());
1007 
1008  const ::SparsityPattern &sparsity_pattern =
1009  (use_this_sparsity != nullptr) ?
1010  *use_this_sparsity :
1011  dealii_sparse_matrix.get_sparsity_pattern();
1012 
1013  if (matrix.get() == nullptr || m() != n_rows ||
1014  n_nonzero_elements() != sparsity_pattern.n_nonzero_elements())
1015  {
1016  reinit(row_parallel_partitioning,
1017  col_parallel_partitioning,
1018  sparsity_pattern,
1019  communicator,
1020  false);
1021  }
1022 
1023  // fill the values. the same as above: go through all rows of the
1024  // matrix, and then all columns. since the sparsity patterns of the
1025  // input matrix and the specified sparsity pattern might be different,
1026  // need to go through the row for both these sparsity structures
1027  // simultaneously in order to really set the correct values.
1028  size_type maximum_row_length = matrix->MaxNumEntries();
1029  std::vector<size_type> row_indices(maximum_row_length);
1030  std::vector<TrilinosScalar> values(maximum_row_length);
1031 
1032  for (size_type row = 0; row < n_rows; ++row)
1033  // see if the row is locally stored on this processor
1034  if (row_parallel_partitioning.is_element(row) == true)
1035  {
1036  ::SparsityPattern::iterator select_index =
1037  sparsity_pattern.begin(row);
1038  typename ::SparseMatrix<number>::const_iterator it =
1039  dealii_sparse_matrix.begin(row);
1040  size_type col = 0;
1041  if (sparsity_pattern.n_rows() == sparsity_pattern.n_cols())
1042  {
1043  // optimized diagonal
1044  AssertDimension(it->column(), row);
1045  if (std::fabs(it->value()) > drop_tolerance)
1046  {
1047  values[col] = it->value();
1048  row_indices[col++] = it->column();
1049  }
1050  ++select_index;
1051  ++it;
1052  }
1053 
1054  while (it != dealii_sparse_matrix.end(row) &&
1055  select_index != sparsity_pattern.end(row))
1056  {
1057  while (select_index->column() < it->column() &&
1058  select_index != sparsity_pattern.end(row))
1059  ++select_index;
1060  while (it->column() < select_index->column() &&
1061  it != dealii_sparse_matrix.end(row))
1062  ++it;
1063 
1064  if (it == dealii_sparse_matrix.end(row))
1065  break;
1066  if (std::fabs(it->value()) > drop_tolerance)
1067  {
1068  values[col] = it->value();
1069  row_indices[col++] = it->column();
1070  }
1071  ++select_index;
1072  ++it;
1073  }
1074  set(row,
1075  col,
1076  reinterpret_cast<size_type *>(row_indices.data()),
1077  values.data(),
1078  false);
1079  }
1081  }
1082 
1083 
1084 
1085  template <typename number>
1086  void
1088  const ::SparseMatrix<number> &dealii_sparse_matrix,
1089  const double drop_tolerance,
1090  const bool copy_values,
1091  const ::SparsityPattern * use_this_sparsity)
1092  {
1093  reinit(complete_index_set(dealii_sparse_matrix.m()),
1094  complete_index_set(dealii_sparse_matrix.n()),
1095  dealii_sparse_matrix,
1096  MPI_COMM_SELF,
1097  drop_tolerance,
1098  copy_values,
1099  use_this_sparsity);
1100  }
1101 
1102 
1103 
1104  template <typename number>
1105  void
1107  const Epetra_Map & input_map,
1108  const ::SparseMatrix<number> &dealii_sparse_matrix,
1109  const double drop_tolerance,
1110  const bool copy_values,
1111  const ::SparsityPattern * use_this_sparsity)
1112  {
1113  reinit(IndexSet(input_map),
1114  IndexSet(input_map),
1115  dealii_sparse_matrix,
1116  MPI_COMM_SELF,
1117  drop_tolerance,
1118  copy_values,
1119  use_this_sparsity);
1120  }
1121 
1122 
1123 
1124  template <typename number>
1125  void
1127  const Epetra_Map & input_row_map,
1128  const Epetra_Map & input_col_map,
1129  const ::SparseMatrix<number> &dealii_sparse_matrix,
1130  const double drop_tolerance,
1131  const bool copy_values,
1132  const ::SparsityPattern * use_this_sparsity)
1133  {
1134  reinit(IndexSet(input_row_map),
1135  IndexSet(input_col_map),
1136  dealii_sparse_matrix,
1137  MPI_COMM_SELF,
1138  drop_tolerance,
1139  copy_values,
1140  use_this_sparsity);
1141  }
1142 
1143 
1144 
1145  void
1146  SparseMatrix::reinit(const Epetra_CrsMatrix &input_matrix,
1147  const bool copy_values)
1148  {
1149  Assert(input_matrix.Filled() == true,
1150  ExcMessage("Input CrsMatrix has not called FillComplete()!"));
1151 
1153  std_cxx14::make_unique<Epetra_Map>(input_matrix.DomainMap());
1154 
1155  const Epetra_CrsGraph *graph = &input_matrix.Graph();
1156 
1157  nonlocal_matrix.reset();
1158  nonlocal_matrix_exporter.reset();
1159  matrix.reset();
1160  matrix = std_cxx14::make_unique<Epetra_FECrsMatrix>(Copy, *graph, false);
1161 
1162  matrix->FillComplete(*column_space_map, input_matrix.RangeMap(), true);
1163 
1164  if (copy_values == true)
1165  {
1166  // point to the first data entry in the two
1167  // matrices and copy the content
1168  const TrilinosScalar *in_values = input_matrix[0];
1169  TrilinosScalar * values = (*matrix)[0];
1170  const size_type my_nonzeros = input_matrix.NumMyNonzeros();
1171  std::memcpy(values, in_values, my_nonzeros * sizeof(TrilinosScalar));
1172  }
1173 
1174  last_action = Zero;
1176  }
1177 
1178 
1179 
1180  void
1182  {
1183  Epetra_CombineMode mode = last_action;
1184  if (last_action == Zero)
1185  {
1186  if ((operation == ::VectorOperation::add) ||
1187  (operation == ::VectorOperation::unknown))
1188  mode = Add;
1189  else if (operation == ::VectorOperation::insert)
1190  mode = Insert;
1191  else
1192  Assert(
1193  false,
1194  ExcMessage(
1195  "compress() can only be called with VectorOperation add, insert, or unknown"));
1196  }
1197  else
1198  {
1199  Assert(((last_action == Add) &&
1200  (operation != ::VectorOperation::insert)) ||
1201  ((last_action == Insert) &&
1202  (operation != ::VectorOperation::add)),
1203  ExcMessage("Operation and argument to compress() do not match"));
1204  }
1205 
1206  // flush buffers
1207  int ierr;
1208  if (nonlocal_matrix.get() != nullptr && mode == Add)
1209  {
1210  // do only export in case of an add() operation, otherwise the owning
1211  // processor must have set the correct entry
1212  nonlocal_matrix->FillComplete(*column_space_map, matrix->RowMap());
1213  if (nonlocal_matrix_exporter.get() == nullptr)
1215  std_cxx14::make_unique<Epetra_Export>(nonlocal_matrix->RowMap(),
1216  matrix->RowMap());
1217  ierr =
1219  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1220  ierr = matrix->FillComplete(*column_space_map, matrix->RowMap());
1221  nonlocal_matrix->PutScalar(0);
1222  }
1223  else
1224  ierr =
1225  matrix->GlobalAssemble(*column_space_map, matrix->RowMap(), true, mode);
1226 
1227  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1228 
1229  ierr = matrix->OptimizeStorage();
1230  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1231 
1232  last_action = Zero;
1233 
1234  compressed = true;
1235  }
1236 
1237 
1238 
1239  void
1241  {
1242  // When we clear the matrix, reset
1243  // the pointer and generate an
1244  // empty matrix.
1246  std_cxx14::make_unique<Epetra_Map>(0,
1247  0,
1249  matrix =
1250  std_cxx14::make_unique<Epetra_FECrsMatrix>(View, *column_space_map, 0);
1251  nonlocal_matrix.reset();
1252  nonlocal_matrix_exporter.reset();
1253 
1254  matrix->FillComplete();
1255 
1256  compressed = true;
1257  }
1258 
1259 
1260 
1261  void
1263  const TrilinosScalar new_diag_value)
1264  {
1265  Assert(matrix->Filled() == true, ExcMatrixNotCompressed());
1266 
1267  // Only do this on the rows owned
1268  // locally on this processor.
1269  int local_row =
1270  matrix->LRID(static_cast<TrilinosWrappers::types::int_type>(row));
1271  if (local_row >= 0)
1272  {
1273  TrilinosScalar *values;
1274  int * col_indices;
1275  int num_entries;
1276  const int ierr =
1277  matrix->ExtractMyRowView(local_row, num_entries, values, col_indices);
1278  (void)ierr;
1279 
1280  Assert(ierr == 0, ExcTrilinosError(ierr));
1281 
1282  const std::ptrdiff_t diag_index =
1283  std::find(col_indices, col_indices + num_entries, local_row) -
1284  col_indices;
1285 
1286  for (TrilinosWrappers::types::int_type j = 0; j < num_entries; ++j)
1287  if (diag_index != j || new_diag_value == 0)
1288  values[j] = 0.;
1289 
1290  if (diag_index != num_entries && std::fabs(values[diag_index]) == 0.0 &&
1291  new_diag_value != 0.0)
1292  values[diag_index] = new_diag_value;
1293  }
1294  }
1295 
1296 
1297 
1298  void
1299  SparseMatrix::clear_rows(const std::vector<size_type> &rows,
1300  const TrilinosScalar new_diag_value)
1301  {
1302  for (const auto row : rows)
1303  clear_row(row, new_diag_value);
1304  }
1305 
1306 
1307 
1308  TrilinosScalar
1310  {
1311  // Extract local indices in
1312  // the matrix.
1313  int trilinos_i =
1314  matrix->LRID(static_cast<TrilinosWrappers::types::int_type>(i)),
1315  trilinos_j =
1316  matrix->LCID(static_cast<TrilinosWrappers::types::int_type>(j));
1317  TrilinosScalar value = 0.;
1318 
1319  // If the data is not on the
1320  // present processor, we throw
1321  // an exception. This is one of
1322  // the two tiny differences to
1323  // the el(i,j) call, which does
1324  // not throw any assertions.
1325  if (trilinos_i == -1)
1326  {
1327  Assert(false,
1329  i, j, local_range().first, local_range().second));
1330  }
1331  else
1332  {
1333  // Check whether the matrix has
1334  // already been transformed to local
1335  // indices.
1336  Assert(matrix->Filled(), ExcMatrixNotCompressed());
1337 
1338  // Prepare pointers for extraction
1339  // of a view of the row.
1340  int nnz_present = matrix->NumMyEntries(trilinos_i);
1341  int nnz_extracted;
1342  int * col_indices;
1343  TrilinosScalar *values;
1344 
1345  // Generate the view and make
1346  // sure that we have not generated
1347  // an error.
1348  // TODO Check that col_indices are int and not long long
1349  int ierr = matrix->ExtractMyRowView(trilinos_i,
1350  nnz_extracted,
1351  values,
1352  col_indices);
1353  (void)ierr;
1354  Assert(ierr == 0, ExcTrilinosError(ierr));
1355 
1356  Assert(nnz_present == nnz_extracted,
1357  ExcDimensionMismatch(nnz_present, nnz_extracted));
1358 
1359  // Search the index where we
1360  // look for the value, and then
1361  // finally get it.
1362  const std::ptrdiff_t local_col_index =
1363  std::find(col_indices, col_indices + nnz_present, trilinos_j) -
1364  col_indices;
1365 
1366  // This is actually the only
1367  // difference to the el(i,j)
1368  // function, which means that
1369  // we throw an exception in
1370  // this case instead of just
1371  // returning zero for an
1372  // element that is not present
1373  // in the sparsity pattern.
1374  if (local_col_index == nnz_present)
1375  {
1376  Assert(false, ExcInvalidIndex(i, j));
1377  }
1378  else
1379  value = values[local_col_index];
1380  }
1381 
1382  return value;
1383  }
1384 
1385 
1386 
1387  TrilinosScalar
1388  SparseMatrix::el(const size_type i, const size_type j) const
1389  {
1390  // Extract local indices in
1391  // the matrix.
1392  int trilinos_i =
1393  matrix->LRID(static_cast<TrilinosWrappers::types::int_type>(i)),
1394  trilinos_j =
1395  matrix->LCID(static_cast<TrilinosWrappers::types::int_type>(j));
1396  TrilinosScalar value = 0.;
1397 
1398  // If the data is not on the
1399  // present processor, we can't
1400  // continue. Just print out zero
1401  // as discussed in the
1402  // documentation of this
1403  // function. if you want error
1404  // checking, use operator().
1405  if ((trilinos_i == -1) || (trilinos_j == -1))
1406  return 0.;
1407  else
1408  {
1409  // Check whether the matrix
1410  // already is transformed to
1411  // local indices.
1412  Assert(matrix->Filled(), ExcMatrixNotCompressed());
1413 
1414  // Prepare pointers for extraction
1415  // of a view of the row.
1416  int nnz_present = matrix->NumMyEntries(trilinos_i);
1417  int nnz_extracted;
1418  int * col_indices;
1419  TrilinosScalar *values;
1420 
1421  // Generate the view and make
1422  // sure that we have not generated
1423  // an error.
1424  int ierr = matrix->ExtractMyRowView(trilinos_i,
1425  nnz_extracted,
1426  values,
1427  col_indices);
1428  (void)ierr;
1429  Assert(ierr == 0, ExcTrilinosError(ierr));
1430 
1431  Assert(nnz_present == nnz_extracted,
1432  ExcDimensionMismatch(nnz_present, nnz_extracted));
1433 
1434  // Search the index where we
1435  // look for the value, and then
1436  // finally get it.
1437  const std::ptrdiff_t local_col_index =
1438  std::find(col_indices, col_indices + nnz_present, trilinos_j) -
1439  col_indices;
1440 
1441  // This is actually the only
1442  // difference to the () function
1443  // querying (i,j), where we throw an
1444  // exception instead of just
1445  // returning zero for an element
1446  // that is not present in the
1447  // sparsity pattern.
1448  if (local_col_index == nnz_present)
1449  value = 0;
1450  else
1451  value = values[local_col_index];
1452  }
1453 
1454  return value;
1455  }
1456 
1457 
1458 
1459  TrilinosScalar
1461  {
1462  Assert(m() == n(), ExcNotQuadratic());
1463 
1464 # ifdef DEBUG
1465  // use operator() in debug mode because
1466  // it checks if this is a valid element
1467  // (in parallel)
1468  return operator()(i, i);
1469 # else
1470  // Trilinos doesn't seem to have a
1471  // more efficient way to access the
1472  // diagonal than by just using the
1473  // standard el(i,j) function.
1474  return el(i, i);
1475 # endif
1476  }
1477 
1478 
1479 
1480  unsigned int
1482  {
1483  Assert(row < m(), ExcInternalError());
1484 
1485  // get a representation of the
1486  // present row
1487  int ncols = -1;
1488  int local_row =
1489  matrix->LRID(static_cast<TrilinosWrappers::types::int_type>(row));
1490  Assert((local_row >= 0), ExcAccessToNonlocalRow(row));
1491 
1492  // on the processor who owns this
1493  // row, we'll have a non-negative
1494  // value.
1495  if (local_row >= 0)
1496  {
1497  int ierr = matrix->NumMyRowEntries(local_row, ncols);
1498  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1499  }
1500 
1501  return static_cast<unsigned int>(ncols);
1502  }
1503 
1504 
1505 
1506  void
1507  SparseMatrix::set(const std::vector<size_type> & row_indices,
1508  const std::vector<size_type> & col_indices,
1509  const FullMatrix<TrilinosScalar> &values,
1510  const bool elide_zero_values)
1511  {
1512  Assert(row_indices.size() == values.m(),
1513  ExcDimensionMismatch(row_indices.size(), values.m()));
1514  Assert(col_indices.size() == values.n(),
1515  ExcDimensionMismatch(col_indices.size(), values.n()));
1516 
1517  for (size_type i = 0; i < row_indices.size(); ++i)
1518  set(row_indices[i],
1519  col_indices.size(),
1520  col_indices.data(),
1521  &values(i, 0),
1522  elide_zero_values);
1523  }
1524 
1525 
1526 
1527  void
1529  const std::vector<size_type> & col_indices,
1530  const std::vector<TrilinosScalar> &values,
1531  const bool elide_zero_values)
1532  {
1533  Assert(col_indices.size() == values.size(),
1534  ExcDimensionMismatch(col_indices.size(), values.size()));
1535 
1536  set(row,
1537  col_indices.size(),
1538  col_indices.data(),
1539  values.data(),
1540  elide_zero_values);
1541  }
1542 
1543 
1544 
1545  template <>
1546  void
1547  SparseMatrix::set<TrilinosScalar>(const size_type row,
1548  const size_type n_cols,
1549  const size_type * col_indices,
1550  const TrilinosScalar *values,
1551  const bool elide_zero_values)
1552  {
1553  AssertIndexRange(row, this->m());
1554 
1555  int ierr;
1556  if (last_action == Add)
1557  {
1558  ierr =
1559  matrix->GlobalAssemble(*column_space_map, matrix->RowMap(), true);
1560 
1561  Assert(ierr == 0, ExcTrilinosError(ierr));
1562  }
1563 
1564  last_action = Insert;
1565 
1566  const TrilinosWrappers::types::int_type *col_index_ptr;
1567  const TrilinosScalar * col_value_ptr;
1568  const TrilinosWrappers::types::int_type trilinos_row = row;
1569  TrilinosWrappers::types::int_type n_columns;
1570 
1571  boost::container::small_vector<TrilinosScalar, 200> local_value_array(
1572  n_cols);
1573  boost::container::small_vector<TrilinosWrappers::types::int_type, 200>
1574  local_index_array(n_cols);
1575 
1576  // If we don't elide zeros, the pointers are already available... need to
1577  // cast to non-const pointers as that is the format taken by Trilinos (but
1578  // we will not modify const data)
1579  if (elide_zero_values == false)
1580  {
1581  col_index_ptr =
1582  reinterpret_cast<const TrilinosWrappers::types::int_type *>(
1583  col_indices);
1584  col_value_ptr = values;
1585  n_columns = n_cols;
1586  }
1587  else
1588  {
1589  // Otherwise, extract nonzero values in each row and get the
1590  // respective indices.
1591  col_index_ptr = local_index_array.data();
1592  col_value_ptr = local_value_array.data();
1593 
1594  n_columns = 0;
1595  for (size_type j = 0; j < n_cols; ++j)
1596  {
1597  const double value = values[j];
1598  AssertIsFinite(value);
1599  if (value != 0)
1600  {
1601  local_index_array[n_columns] = col_indices[j];
1602  local_value_array[n_columns] = value;
1603  n_columns++;
1604  }
1605  }
1606 
1607  AssertIndexRange(n_columns, n_cols + 1);
1608  }
1609 
1610 
1611  // If the calling matrix owns the row to which we want to insert values,
1612  // we can directly call the Epetra_CrsMatrix input function, which is much
1613  // faster than the Epetra_FECrsMatrix function. We distinguish between two
1614  // cases: the first one is when the matrix is not filled (i.e., it is
1615  // possible to add new elements to the sparsity pattern), and the second
1616  // one is when the pattern is already fixed. In the former case, we add
1617  // the possibility to insert new values, and in the second we just replace
1618  // data.
1619  if (matrix->RowMap().MyGID(
1620  static_cast<TrilinosWrappers::types::int_type>(row)) == true)
1621  {
1622  if (matrix->Filled() == false)
1623  {
1624  ierr = matrix->Epetra_CrsMatrix::InsertGlobalValues(
1625  row, static_cast<int>(n_columns), col_value_ptr, col_index_ptr);
1626 
1627  // When inserting elements, we do not want to create exceptions in
1628  // the case when inserting non-local data (since that's what we
1629  // want to do right now).
1630  if (ierr > 0)
1631  ierr = 0;
1632  }
1633  else
1634  ierr = matrix->Epetra_CrsMatrix::ReplaceGlobalValues(row,
1635  n_columns,
1636  col_value_ptr,
1637  col_index_ptr);
1638  }
1639  else
1640  {
1641  // When we're at off-processor data, we have to stick with the
1642  // standard Insert/ReplaceGlobalValues function. Nevertheless, the way
1643  // we call it is the fastest one (any other will lead to repeated
1644  // allocation and deallocation of memory in order to call the function
1645  // we already use, which is very inefficient if writing one element at
1646  // a time).
1647  compressed = false;
1648 
1649  if (matrix->Filled() == false)
1650  {
1651  ierr = matrix->InsertGlobalValues(1,
1652  &trilinos_row,
1653  n_columns,
1654  col_index_ptr,
1655  &col_value_ptr,
1656  Epetra_FECrsMatrix::ROW_MAJOR);
1657  if (ierr > 0)
1658  ierr = 0;
1659  }
1660  else
1661  ierr = matrix->ReplaceGlobalValues(1,
1662  &trilinos_row,
1663  n_columns,
1664  col_index_ptr,
1665  &col_value_ptr,
1666  Epetra_FECrsMatrix::ROW_MAJOR);
1667  // use the FECrsMatrix facilities for set even in the case when we
1668  // have explicitly set the off-processor rows because that only works
1669  // properly when adding elements, not when setting them (since we want
1670  // to only touch elements that have been set explicitly, and there is
1671  // no way on the receiving processor to identify them otherwise)
1672  }
1673 
1674  Assert(ierr <= 0, ExcAccessToNonPresentElement(row, col_index_ptr[0]));
1675  AssertThrow(ierr >= 0, ExcTrilinosError(ierr));
1676  }
1677 
1678 
1679 
1680  void
1681  SparseMatrix::add(const std::vector<size_type> & indices,
1682  const FullMatrix<TrilinosScalar> &values,
1683  const bool elide_zero_values)
1684  {
1685  Assert(indices.size() == values.m(),
1686  ExcDimensionMismatch(indices.size(), values.m()));
1687  Assert(values.m() == values.n(), ExcNotQuadratic());
1688 
1689  for (size_type i = 0; i < indices.size(); ++i)
1690  add(indices[i],
1691  indices.size(),
1692  indices.data(),
1693  &values(i, 0),
1694  elide_zero_values);
1695  }
1696 
1697 
1698 
1699  void
1700  SparseMatrix::add(const std::vector<size_type> & row_indices,
1701  const std::vector<size_type> & col_indices,
1702  const FullMatrix<TrilinosScalar> &values,
1703  const bool elide_zero_values)
1704  {
1705  Assert(row_indices.size() == values.m(),
1706  ExcDimensionMismatch(row_indices.size(), values.m()));
1707  Assert(col_indices.size() == values.n(),
1708  ExcDimensionMismatch(col_indices.size(), values.n()));
1709 
1710  for (size_type i = 0; i < row_indices.size(); ++i)
1711  add(row_indices[i],
1712  col_indices.size(),
1713  col_indices.data(),
1714  &values(i, 0),
1715  elide_zero_values);
1716  }
1717 
1718 
1719 
1720  void
1722  const std::vector<size_type> & col_indices,
1723  const std::vector<TrilinosScalar> &values,
1724  const bool elide_zero_values)
1725  {
1726  Assert(col_indices.size() == values.size(),
1727  ExcDimensionMismatch(col_indices.size(), values.size()));
1728 
1729  add(row,
1730  col_indices.size(),
1731  col_indices.data(),
1732  values.data(),
1733  elide_zero_values);
1734  }
1735 
1736 
1737 
1738  void
1740  const size_type n_cols,
1741  const size_type * col_indices,
1742  const TrilinosScalar *values,
1743  const bool elide_zero_values,
1744  const bool /*col_indices_are_sorted*/)
1745  {
1746  AssertIndexRange(row, this->m());
1747  int ierr;
1748  if (last_action == Insert)
1749  {
1750  // TODO: this could lead to a dead lock when only one processor
1751  // calls GlobalAssemble.
1752  ierr =
1753  matrix->GlobalAssemble(*column_space_map, matrix->RowMap(), false);
1754 
1755  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1756  }
1757 
1758  last_action = Add;
1759 
1760  const TrilinosWrappers::types::int_type *col_index_ptr;
1761  const TrilinosScalar * col_value_ptr;
1762  const TrilinosWrappers::types::int_type trilinos_row = row;
1763  TrilinosWrappers::types::int_type n_columns;
1764 
1765  boost::container::small_vector<TrilinosScalar, 100> local_value_array(
1766  n_cols);
1767  boost::container::small_vector<TrilinosWrappers::types::int_type, 100>
1768  local_index_array(n_cols);
1769 
1770  // If we don't elide zeros, the pointers are already available... need to
1771  // cast to non-const pointers as that is the format taken by Trilinos (but
1772  // we will not modify const data)
1773  if (elide_zero_values == false)
1774  {
1775  col_index_ptr =
1776  reinterpret_cast<const TrilinosWrappers::types::int_type *>(
1777  col_indices);
1778  col_value_ptr = values;
1779  n_columns = n_cols;
1780 # ifdef DEBUG
1781  for (size_type j = 0; j < n_cols; ++j)
1782  AssertIsFinite(values[j]);
1783 # endif
1784  }
1785  else
1786  {
1787  // Otherwise, extract nonzero values in each row and the corresponding
1788  // index.
1789  col_index_ptr = local_index_array.data();
1790  col_value_ptr = local_value_array.data();
1791 
1792  n_columns = 0;
1793  for (size_type j = 0; j < n_cols; ++j)
1794  {
1795  const double value = values[j];
1796 
1797  AssertIsFinite(value);
1798  if (value != 0)
1799  {
1800  local_index_array[n_columns] = col_indices[j];
1801  local_value_array[n_columns] = value;
1802  ++n_columns;
1803  }
1804  }
1805 
1806  AssertIndexRange(n_columns, n_cols + 1);
1807  }
1808  // Exit early if there is nothing to do
1809  if (n_columns == 0)
1810  {
1811  return;
1812  }
1813 
1814  // If the calling processor owns the row to which we want to add values, we
1815  // can directly call the Epetra_CrsMatrix input function, which is much
1816  // faster than the Epetra_FECrsMatrix function.
1817  if (matrix->RowMap().MyGID(
1818  static_cast<TrilinosWrappers::types::int_type>(row)) == true)
1819  {
1820  ierr = matrix->Epetra_CrsMatrix::SumIntoGlobalValues(row,
1821  n_columns,
1822  col_value_ptr,
1823  col_index_ptr);
1824  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1825  }
1826  else if (nonlocal_matrix.get() != nullptr)
1827  {
1828  compressed = false;
1829  // this is the case when we have explicitly set the off-processor rows
1830  // and want to create a separate matrix object for them (to retain
1831  // thread-safety)
1832  Assert(nonlocal_matrix->RowMap().LID(
1833  static_cast<TrilinosWrappers::types::int_type>(row)) != -1,
1834  ExcMessage("Attempted to write into off-processor matrix row "
1835  "that has not be specified as being writable upon "
1836  "initialization"));
1837  ierr = nonlocal_matrix->SumIntoGlobalValues(row,
1838  n_columns,
1839  col_value_ptr,
1840  col_index_ptr);
1841  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1842  }
1843  else
1844  {
1845  // When we're at off-processor data, we have to stick with the
1846  // standard SumIntoGlobalValues function. Nevertheless, the way we
1847  // call it is the fastest one (any other will lead to repeated
1848  // allocation and deallocation of memory in order to call the function
1849  // we already use, which is very inefficient if writing one element at
1850  // a time).
1851  compressed = false;
1852 
1853  ierr = matrix->SumIntoGlobalValues(1,
1854  &trilinos_row,
1855  n_columns,
1856  col_index_ptr,
1857  &col_value_ptr,
1858  Epetra_FECrsMatrix::ROW_MAJOR);
1859  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1860  }
1861 
1862 # ifdef DEBUG
1863  if (ierr > 0)
1864  {
1865  std::cout << "------------------------------------------" << std::endl;
1866  std::cout << "Got error " << ierr << " in row " << row << " of proc "
1867  << matrix->RowMap().Comm().MyPID()
1868  << " when trying to add the columns:" << std::endl;
1869  for (TrilinosWrappers::types::int_type i = 0; i < n_columns; ++i)
1870  std::cout << col_index_ptr[i] << " ";
1871  std::cout << std::endl << std::endl;
1872  std::cout << "Matrix row "
1873  << (matrix->RowMap().MyGID(
1874  static_cast<TrilinosWrappers::types::int_type>(row)) ==
1875  false ?
1876  "(nonlocal part)" :
1877  "")
1878  << " has the following indices:" << std::endl;
1879  std::vector<TrilinosWrappers::types::int_type> indices;
1880  const Epetra_CrsGraph * graph =
1881  (nonlocal_matrix.get() != nullptr &&
1882  matrix->RowMap().MyGID(
1883  static_cast<TrilinosWrappers::types::int_type>(row)) == false) ?
1884  &nonlocal_matrix->Graph() :
1885  &matrix->Graph();
1886 
1887  indices.resize(graph->NumGlobalIndices(row));
1888  int n_indices = 0;
1889  graph->ExtractGlobalRowCopy(row,
1890  indices.size(),
1891  n_indices,
1892  indices.data());
1893  AssertDimension(n_indices, indices.size());
1894 
1895  for (TrilinosWrappers::types::int_type i = 0; i < n_indices; ++i)
1896  std::cout << indices[i] << " ";
1897  std::cout << std::endl << std::endl;
1898  Assert(ierr <= 0, ExcAccessToNonPresentElement(row, col_index_ptr[0]));
1899  }
1900 # endif
1901  Assert(ierr >= 0, ExcTrilinosError(ierr));
1902  }
1903 
1904 
1905 
1906  SparseMatrix &
1907  SparseMatrix::operator=(const double d)
1908  {
1910  compress(
1911  ::VectorOperation::unknown); // TODO: why do we do this? Should we
1912  // not check for is_compressed?
1913 
1914  const int ierr = matrix->PutScalar(d);
1915  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1916  if (nonlocal_matrix.get() != nullptr)
1917  nonlocal_matrix->PutScalar(d);
1918 
1919  return *this;
1920  }
1921 
1922 
1923 
1924  void
1925  SparseMatrix::add(const TrilinosScalar factor, const SparseMatrix &rhs)
1926  {
1927  AssertDimension(rhs.m(), m());
1928  AssertDimension(rhs.n(), n());
1929  AssertDimension(rhs.local_range().first, local_range().first);
1930  AssertDimension(rhs.local_range().second, local_range().second);
1931  Assert(matrix->RowMap().SameAs(rhs.matrix->RowMap()),
1932  ExcMessage("Can only add matrices with same distribution of rows"));
1933  Assert(matrix->Filled() && rhs.matrix->Filled(),
1934  ExcMessage("Addition of matrices only allowed if matrices are "
1935  "filled, i.e., compress() has been called"));
1936 
1937  const bool same_col_map = matrix->ColMap().SameAs(rhs.matrix->ColMap());
1938 
1939  for (const auto row : locally_owned_range_indices())
1940  {
1941  const int row_local = matrix->RowMap().LID(
1942  static_cast<TrilinosWrappers::types::int_type>(row));
1943  Assert((row_local >= 0), ExcAccessToNonlocalRow(row));
1944 
1945  // First get a view to the matrix columns of both matrices. Note that
1946  // the data is in local index spaces so we need to be careful not only
1947  // to compare column indices in case they are derived from the same
1948  // map.
1949  int n_entries, rhs_n_entries;
1950  TrilinosScalar *value_ptr, *rhs_value_ptr;
1951  int * index_ptr, *rhs_index_ptr;
1952  int ierr = rhs.matrix->ExtractMyRowView(row_local,
1953  rhs_n_entries,
1954  rhs_value_ptr,
1955  rhs_index_ptr);
1956  (void)ierr;
1957  Assert(ierr == 0, ExcTrilinosError(ierr));
1958 
1959  ierr =
1960  matrix->ExtractMyRowView(row_local, n_entries, value_ptr, index_ptr);
1961  Assert(ierr == 0, ExcTrilinosError(ierr));
1962  bool expensive_checks = (n_entries != rhs_n_entries || !same_col_map);
1963  if (!expensive_checks)
1964  {
1965  // check if the column indices are the same. If yes, can simply
1966  // copy over the data.
1967  expensive_checks = std::memcmp(static_cast<void *>(index_ptr),
1968  static_cast<void *>(rhs_index_ptr),
1969  sizeof(int) * n_entries) != 0;
1970  if (!expensive_checks)
1971  for (int i = 0; i < n_entries; ++i)
1972  value_ptr[i] += rhs_value_ptr[i] * factor;
1973  }
1974  // Now to the expensive case where we need to check all column indices
1975  // against each other (transformed into global index space) and where
1976  // we need to make sure that all entries we are about to add into the
1977  // lhs matrix actually exist
1978  if (expensive_checks)
1979  {
1980  for (int i = 0; i < rhs_n_entries; ++i)
1981  {
1982  if (rhs_value_ptr[i] == 0.)
1983  continue;
1984  const TrilinosWrappers::types::int_type rhs_global_col =
1985  global_column_index(*rhs.matrix, rhs_index_ptr[i]);
1986  int local_col = matrix->ColMap().LID(rhs_global_col);
1987  int *local_index = Utilities::lower_bound(index_ptr,
1988  index_ptr + n_entries,
1989  local_col);
1990  Assert(local_index != index_ptr + n_entries &&
1991  *local_index == local_col,
1992  ExcMessage(
1993  "Adding the entries from the other matrix "
1994  "failed, because the sparsity pattern "
1995  "of that matrix includes more elements than the "
1996  "calling matrix, which is not allowed."));
1997  value_ptr[local_index - index_ptr] += factor * rhs_value_ptr[i];
1998  }
1999  }
2000  }
2001  }
2002 
2003 
2004 
2005  void
2007  {
2008  // This only flips a flag that tells
2009  // Trilinos that any vmult operation
2010  // should be done with the
2011  // transpose. However, the matrix
2012  // structure is not reset.
2013  int ierr;
2014 
2015  if (!matrix->UseTranspose())
2016  {
2017  ierr = matrix->SetUseTranspose(true);
2018  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2019  }
2020  else
2021  {
2022  ierr = matrix->SetUseTranspose(false);
2023  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2024  }
2025  }
2026 
2027 
2028 
2029  SparseMatrix &
2030  SparseMatrix::operator*=(const TrilinosScalar a)
2031  {
2032  const int ierr = matrix->Scale(a);
2033  Assert(ierr == 0, ExcTrilinosError(ierr));
2034  (void)ierr; // removes -Wunused-variable in optimized mode
2035 
2036  return *this;
2037  }
2038 
2039 
2040 
2041  SparseMatrix &
2042  SparseMatrix::operator/=(const TrilinosScalar a)
2043  {
2044  Assert(a != 0, ExcDivideByZero());
2045 
2046  const TrilinosScalar factor = 1. / a;
2047 
2048  const int ierr = matrix->Scale(factor);
2049  Assert(ierr == 0, ExcTrilinosError(ierr));
2050  (void)ierr; // removes -Wunused-variable in optimized mode
2051 
2052  return *this;
2053  }
2054 
2055 
2056 
2057  TrilinosScalar
2059  {
2060  Assert(matrix->Filled(), ExcMatrixNotCompressed());
2061  return matrix->NormOne();
2062  }
2063 
2064 
2065 
2066  TrilinosScalar
2068  {
2069  Assert(matrix->Filled(), ExcMatrixNotCompressed());
2070  return matrix->NormInf();
2071  }
2072 
2073 
2074 
2075  TrilinosScalar
2077  {
2078  Assert(matrix->Filled(), ExcMatrixNotCompressed());
2079  return matrix->NormFrobenius();
2080  }
2081 
2082 
2083 
2084  namespace internal
2085  {
2086  namespace SparseMatrixImplementation
2087  {
2088  template <typename VectorType>
2089  inline void
2090  check_vector_map_equality(const Epetra_CrsMatrix &,
2091  const VectorType &,
2092  const VectorType &)
2093  {}
2094 
2095  inline void
2096  check_vector_map_equality(const Epetra_CrsMatrix & m,
2098  const TrilinosWrappers::MPI::Vector &out)
2099  {
2100  Assert(in.trilinos_partitioner().SameAs(m.DomainMap()) == true,
2101  ExcMessage(
2102  "Column map of matrix does not fit with vector map!"));
2103  Assert(out.trilinos_partitioner().SameAs(m.RangeMap()) == true,
2104  ExcMessage("Row map of matrix does not fit with vector map!"));
2105  (void)m;
2106  (void)in;
2107  (void)out;
2108  }
2109  } // namespace SparseMatrixImplementation
2110  } // namespace internal
2111 
2112 
2113  template <typename VectorType>
2114  typename std::enable_if<
2115  std::is_same<typename VectorType::value_type, TrilinosScalar>::value>::type
2116  SparseMatrix::vmult(VectorType &dst, const VectorType &src) const
2117  {
2118  Assert(&src != &dst, ExcSourceEqualsDestination());
2119  Assert(matrix->Filled(), ExcMatrixNotCompressed());
2120  (void)src;
2121  (void)dst;
2122 
2123  internal::SparseMatrixImplementation::check_vector_map_equality(*matrix,
2124  src,
2125  dst);
2126  const size_type dst_local_size = internal::end(dst) - internal::begin(dst);
2127  AssertDimension(dst_local_size, matrix->RangeMap().NumMyPoints());
2128  const size_type src_local_size = internal::end(src) - internal::begin(src);
2129  AssertDimension(src_local_size, matrix->DomainMap().NumMyPoints());
2130 
2131  Epetra_MultiVector tril_dst(
2132  View, matrix->RangeMap(), internal::begin(dst), dst_local_size, 1);
2133  Epetra_MultiVector tril_src(View,
2134  matrix->DomainMap(),
2135  const_cast<TrilinosScalar *>(
2136  internal::begin(src)),
2137  src_local_size,
2138  1);
2139 
2140  const int ierr = matrix->Multiply(false, tril_src, tril_dst);
2141  Assert(ierr == 0, ExcTrilinosError(ierr));
2142  (void)ierr; // removes -Wunused-variable in optimized mode
2143  }
2144 
2145 
2146 
2147  template <typename VectorType>
2148  typename std::enable_if<
2149  !std::is_same<typename VectorType::value_type, TrilinosScalar>::value>::type
2150  SparseMatrix::vmult(VectorType & /*dst*/, const VectorType & /*src*/) const
2151  {
2152  AssertThrow(false, ExcNotImplemented());
2153  }
2154 
2155 
2156 
2157  template <typename VectorType>
2158  typename std::enable_if<
2159  std::is_same<typename VectorType::value_type, TrilinosScalar>::value>::type
2160  SparseMatrix::Tvmult(VectorType &dst, const VectorType &src) const
2161  {
2162  Assert(&src != &dst, ExcSourceEqualsDestination());
2163  Assert(matrix->Filled(), ExcMatrixNotCompressed());
2164 
2165  internal::SparseMatrixImplementation::check_vector_map_equality(*matrix,
2166  dst,
2167  src);
2168  const size_type dst_local_size = internal::end(dst) - internal::begin(dst);
2169  AssertDimension(dst_local_size, matrix->DomainMap().NumMyPoints());
2170  const size_type src_local_size = internal::end(src) - internal::begin(src);
2171  AssertDimension(src_local_size, matrix->RangeMap().NumMyPoints());
2172 
2173  Epetra_MultiVector tril_dst(
2174  View, matrix->DomainMap(), internal::begin(dst), dst_local_size, 1);
2175  Epetra_MultiVector tril_src(View,
2176  matrix->RangeMap(),
2177  const_cast<double *>(internal::begin(src)),
2178  src_local_size,
2179  1);
2180 
2181  const int ierr = matrix->Multiply(true, tril_src, tril_dst);
2182  Assert(ierr == 0, ExcTrilinosError(ierr));
2183  (void)ierr; // removes -Wunused-variable in optimized mode
2184  }
2185 
2186 
2187 
2188  template <typename VectorType>
2189  typename std::enable_if<
2190  !std::is_same<typename VectorType::value_type, TrilinosScalar>::value>::type
2191  SparseMatrix::Tvmult(VectorType & /*dst*/, const VectorType & /*src*/) const
2192  {
2193  AssertThrow(false, ExcNotImplemented());
2194  }
2195 
2196 
2197 
2198  template <typename VectorType>
2199  void
2200  SparseMatrix::vmult_add(VectorType &dst, const VectorType &src) const
2201  {
2202  Assert(&src != &dst, ExcSourceEqualsDestination());
2203 
2204  // Reinit a temporary vector with fast argument set, which does not
2205  // overwrite the content (to save time).
2206  VectorType tmp_vector;
2207  tmp_vector.reinit(dst, true);
2208  vmult(tmp_vector, src);
2209  dst += tmp_vector;
2210  }
2211 
2212 
2213 
2214  template <typename VectorType>
2215  void
2216  SparseMatrix::Tvmult_add(VectorType &dst, const VectorType &src) const
2217  {
2218  Assert(&src != &dst, ExcSourceEqualsDestination());
2219 
2220  // Reinit a temporary vector with fast argument set, which does not
2221  // overwrite the content (to save time).
2222  VectorType tmp_vector;
2223  tmp_vector.reinit(dst, true);
2224  Tvmult(tmp_vector, src);
2225  dst += tmp_vector;
2226  }
2227 
2228 
2229 
2230  TrilinosScalar
2232  {
2233  Assert(matrix->RowMap().SameAs(matrix->DomainMap()), ExcNotQuadratic());
2234 
2235  MPI::Vector temp_vector;
2236  temp_vector.reinit(v, true);
2237 
2238  vmult(temp_vector, v);
2239  return temp_vector * v;
2240  }
2241 
2242 
2243 
2244  TrilinosScalar
2246  const MPI::Vector &v) const
2247  {
2248  Assert(matrix->RowMap().SameAs(matrix->DomainMap()), ExcNotQuadratic());
2249 
2250  MPI::Vector temp_vector;
2251  temp_vector.reinit(v, true);
2252 
2253  vmult(temp_vector, v);
2254  return u * temp_vector;
2255  }
2256 
2257 
2258 
2259  TrilinosScalar
2261  const MPI::Vector &x,
2262  const MPI::Vector &b) const
2263  {
2264  vmult(dst, x);
2265  dst -= b;
2266  dst *= -1.;
2267 
2268  return dst.l2_norm();
2269  }
2270 
2271 
2272 
2273  namespace internals
2274  {
2275  using size_type = ::types::global_dof_index;
2276 
2277  void
2278  perform_mmult(const SparseMatrix &inputleft,
2279  const SparseMatrix &inputright,
2280  SparseMatrix & result,
2281  const MPI::Vector & V,
2282  const bool transpose_left)
2283  {
2284  const bool use_vector = (V.size() == inputright.m() ? true : false);
2285  if (transpose_left == false)
2286  {
2287  Assert(inputleft.n() == inputright.m(),
2288  ExcDimensionMismatch(inputleft.n(), inputright.m()));
2289  Assert(inputleft.domain_partitioner().SameAs(
2290  inputright.range_partitioner()),
2291  ExcMessage("Parallel partitioning of A and B does not fit."));
2292  }
2293  else
2294  {
2295  Assert(inputleft.m() == inputright.m(),
2296  ExcDimensionMismatch(inputleft.m(), inputright.m()));
2297  Assert(inputleft.range_partitioner().SameAs(
2298  inputright.range_partitioner()),
2299  ExcMessage("Parallel partitioning of A and B does not fit."));
2300  }
2301 
2302  result.clear();
2303 
2304  // create a suitable operator B: in case
2305  // we do not use a vector, all we need to
2306  // do is to set the pointer. Otherwise,
2307  // we insert the data from B, but
2308  // multiply each row with the respective
2309  // vector element.
2310  Teuchos::RCP<Epetra_CrsMatrix> mod_B;
2311  if (use_vector == false)
2312  {
2313  mod_B = Teuchos::rcp(const_cast<Epetra_CrsMatrix *>(
2314  &inputright.trilinos_matrix()),
2315  false);
2316  }
2317  else
2318  {
2319  mod_B = Teuchos::rcp(
2320  new Epetra_CrsMatrix(Copy, inputright.trilinos_sparsity_pattern()),
2321  true);
2322  mod_B->FillComplete(inputright.domain_partitioner(),
2323  inputright.range_partitioner());
2324  Assert(inputright.local_range() == V.local_range(),
2325  ExcMessage("Parallel distribution of matrix B and vector V "
2326  "does not match."));
2327 
2328  const int local_N = inputright.local_size();
2329  for (int i = 0; i < local_N; ++i)
2330  {
2331  int N_entries = -1;
2332  double *new_data, *B_data;
2333  mod_B->ExtractMyRowView(i, N_entries, new_data);
2334  inputright.trilinos_matrix().ExtractMyRowView(i,
2335  N_entries,
2336  B_data);
2337  double value = V.trilinos_vector()[0][i];
2338  for (TrilinosWrappers::types::int_type j = 0; j < N_entries; ++j)
2339  new_data[j] = value * B_data[j];
2340  }
2341  }
2342 
2343 
2344  SparseMatrix tmp_result(transpose_left ?
2345  inputleft.locally_owned_domain_indices() :
2346  inputleft.locally_owned_range_indices(),
2347  inputright.locally_owned_domain_indices(),
2348  inputleft.get_mpi_communicator());
2349 
2350 # ifdef DEAL_II_TRILINOS_WITH_EPETRAEXT
2351  EpetraExt::MatrixMatrix::Multiply(inputleft.trilinos_matrix(),
2352  transpose_left,
2353  *mod_B,
2354  false,
2355  const_cast<Epetra_CrsMatrix &>(
2356  tmp_result.trilinos_matrix()));
2357 # else
2358  Assert("false", ExcMessage("This function requires EpetraExt."));
2359 # endif
2360  result.reinit(tmp_result.trilinos_matrix());
2361  }
2362  } // namespace internals
2363 
2364 
2365  void
2367  const SparseMatrix &B,
2368  const MPI::Vector & V) const
2369  {
2370  internals::perform_mmult(*this, B, C, V, false);
2371  }
2372 
2373 
2374 
2375  void
2377  const SparseMatrix &B,
2378  const MPI::Vector & V) const
2379  {
2380  internals::perform_mmult(*this, B, C, V, true);
2381  }
2382 
2383 
2384 
2385  void
2387  {
2388  Assert(false, ExcNotImplemented());
2389  }
2390 
2391 
2392 
2393  // As of now, no particularly neat
2394  // output is generated in case of
2395  // multiple processors.
2396  void
2397  SparseMatrix::print(std::ostream &out,
2398  const bool print_detailed_trilinos_information) const
2399  {
2400  if (print_detailed_trilinos_information == true)
2401  out << *matrix;
2402  else
2403  {
2404  double *values;
2405  int * indices;
2406  int num_entries;
2407 
2408  for (int i = 0; i < matrix->NumMyRows(); ++i)
2409  {
2410  matrix->ExtractMyRowView(i, num_entries, values, indices);
2411  for (TrilinosWrappers::types::int_type j = 0; j < num_entries; ++j)
2412  out << "(" << TrilinosWrappers::global_row_index(*matrix, i)
2413  << ","
2415  << ") " << values[j] << std::endl;
2416  }
2417  }
2418 
2419  AssertThrow(out, ExcIO());
2420  }
2421 
2422 
2423 
2426  {
2427  size_type static_memory =
2428  sizeof(*this) + sizeof(*matrix) + sizeof(*matrix->Graph().DataPtr());
2429  return (
2430  (sizeof(TrilinosScalar) + sizeof(TrilinosWrappers::types::int_type)) *
2431  matrix->NumMyNonzeros() +
2432  sizeof(int) * local_size() + static_memory);
2433  }
2434 
2435 
2436 
2437  const Epetra_Map &
2439  {
2440  return matrix->DomainMap();
2441  }
2442 
2443 
2444 
2445  const Epetra_Map &
2447  {
2448  return matrix->RangeMap();
2449  }
2450 
2451 
2452 
2453  const Epetra_Map &
2455  {
2456  return matrix->RowMap();
2457  }
2458 
2459 
2460 
2461  const Epetra_Map &
2463  {
2464  return matrix->ColMap();
2465  }
2466 
2467 
2468 
2469  MPI_Comm
2471  {
2472 # ifdef DEAL_II_WITH_MPI
2473 
2474  const Epetra_MpiComm *mpi_comm =
2475  dynamic_cast<const Epetra_MpiComm *>(&matrix->RangeMap().Comm());
2476  Assert(mpi_comm != nullptr, ExcInternalError());
2477  return mpi_comm->Comm();
2478 # else
2479 
2480  return MPI_COMM_SELF;
2481 
2482 # endif
2483  }
2484 } // namespace TrilinosWrappers
2485 
2486 
2487 namespace TrilinosWrappers
2488 {
2489  namespace internal
2490  {
2491  namespace
2492  {
2493 # ifndef DEAL_II_WITH_MPI
2494  Epetra_Map
2495  make_serial_Epetra_map(const IndexSet &serial_partitioning)
2496  {
2497  // See IndexSet::make_trilinos_map
2498  return Epetra_Map(
2499  TrilinosWrappers::types::int_type(serial_partitioning.size()),
2500  TrilinosWrappers::types::int_type(serial_partitioning.n_elements()),
2501  0,
2502  Epetra_SerialComm());
2503  }
2504 # endif
2505  } // namespace
2506 
2507  namespace LinearOperatorImplementation
2508  {
2510  : use_transpose(false)
2511  ,
2512 # ifdef DEAL_II_WITH_MPI
2513  communicator(MPI_COMM_SELF)
2514  , domain_map(IndexSet().make_trilinos_map(communicator.Comm()))
2515  , range_map(IndexSet().make_trilinos_map(communicator.Comm()))
2516 # else
2517  domain_map(internal::make_serial_Epetra_map(IndexSet()))
2518  , range_map(internal::make_serial_Epetra_map(IndexSet()))
2519 # endif
2520  {
2521  vmult = [](Range &, const Domain &) {
2522  Assert(false,
2523  ExcMessage("Uninitialized TrilinosPayload::vmult called"
2524  "(Default constructor)"));
2525  };
2526 
2527  Tvmult = [](Domain &, const Range &) {
2528  Assert(false,
2529  ExcMessage("Uninitialized TrilinosPayload::Tvmult called"
2530  "(Default constructor)"));
2531  };
2532 
2533  inv_vmult = [](Domain &, const Range &) {
2534  Assert(false,
2535  ExcMessage("Uninitialized TrilinosPayload::inv_vmult called"
2536  "(Default constructor)"));
2537  };
2538 
2539  inv_Tvmult = [](Range &, const Domain &) {
2540  Assert(false,
2541  ExcMessage("Uninitialized TrilinosPayload::inv_Tvmult called"
2542  "(Default constructor)"));
2543  };
2544  }
2545 
2546 
2547 
2549  const TrilinosWrappers::SparseMatrix &matrix_exemplar,
2550  const TrilinosWrappers::SparseMatrix &matrix)
2551  : use_transpose(matrix_exemplar.trilinos_matrix().UseTranspose())
2552  ,
2553 # ifdef DEAL_II_WITH_MPI
2554  communicator(matrix_exemplar.get_mpi_communicator())
2555  , domain_map(
2556  matrix_exemplar.locally_owned_domain_indices().make_trilinos_map(
2557  communicator.Comm()))
2558  , range_map(
2559  matrix_exemplar.locally_owned_range_indices().make_trilinos_map(
2560  communicator.Comm()))
2561 # else
2562  domain_map(internal::make_serial_Epetra_map(
2563  matrix_exemplar.locally_owned_domain_indices()))
2564  , range_map(internal::make_serial_Epetra_map(
2565  matrix_exemplar.locally_owned_range_indices()))
2566 # endif
2567  {
2568  vmult = [&matrix_exemplar, &matrix](Range & tril_dst,
2569  const Domain &tril_src) {
2570  // Duplicated from TrilinosWrappers::SparseMatrix::vmult
2571  Assert(&tril_src != &tril_dst,
2573  Assert(matrix.trilinos_matrix().Filled(),
2575  internal::check_vector_map_equality(
2576  matrix_exemplar.trilinos_matrix(),
2577  tril_src,
2578  tril_dst,
2579  matrix_exemplar.trilinos_matrix().UseTranspose());
2580  internal::check_vector_map_equality(
2581  matrix.trilinos_matrix(),
2582  tril_src,
2583  tril_dst,
2584  matrix.trilinos_matrix().UseTranspose());
2585 
2586  const int ierr = matrix.trilinos_matrix().Apply(tril_src, tril_dst);
2587  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2588  };
2589 
2590  Tvmult = [&matrix_exemplar, &matrix](Domain & tril_dst,
2591  const Range &tril_src) {
2592  // Duplicated from TrilinosWrappers::SparseMatrix::Tvmult
2593  Assert(&tril_src != &tril_dst,
2595  Assert(matrix.trilinos_matrix().Filled(),
2597  internal::check_vector_map_equality(
2598  matrix_exemplar.trilinos_matrix(),
2599  tril_src,
2600  tril_dst,
2601  !matrix_exemplar.trilinos_matrix().UseTranspose());
2602  internal::check_vector_map_equality(
2603  matrix.trilinos_matrix(),
2604  tril_src,
2605  tril_dst,
2606  !matrix.trilinos_matrix().UseTranspose());
2607 
2608  Epetra_CrsMatrix &tril_mtrx_non_const =
2609  const_cast<Epetra_CrsMatrix &>(matrix.trilinos_matrix());
2610  tril_mtrx_non_const.SetUseTranspose(
2611  !matrix.trilinos_matrix().UseTranspose());
2612  const int ierr = matrix.trilinos_matrix().Apply(tril_src, tril_dst);
2613  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2614  tril_mtrx_non_const.SetUseTranspose(
2615  !matrix.trilinos_matrix().UseTranspose());
2616  };
2617 
2618  inv_vmult = [](Domain &, const Range &) {
2619  Assert(false,
2620  ExcMessage("Uninitialized TrilinosPayload::inv_vmult called"
2621  "(Matrix constructor with matrix exemplar)"));
2622  };
2623 
2624  inv_Tvmult = [](Range &, const Domain &) {
2625  Assert(false,
2626  ExcMessage("Uninitialized TrilinosPayload::inv_Tvmult called"
2627  "(Matrix constructor with matrix exemplar)"));
2628  };
2629  }
2630 
2631 
2632 
2634  const TrilinosWrappers::SparseMatrix & matrix_exemplar,
2635  const TrilinosWrappers::PreconditionBase &preconditioner)
2636  : use_transpose(matrix_exemplar.trilinos_matrix().UseTranspose())
2637  ,
2638 # ifdef DEAL_II_WITH_MPI
2639  communicator(matrix_exemplar.get_mpi_communicator())
2640  , domain_map(
2641  matrix_exemplar.locally_owned_domain_indices().make_trilinos_map(
2642  communicator.Comm()))
2643  , range_map(
2644  matrix_exemplar.locally_owned_range_indices().make_trilinos_map(
2645  communicator.Comm()))
2646 # else
2647  domain_map(internal::make_serial_Epetra_map(
2648  matrix_exemplar.locally_owned_domain_indices()))
2649  , range_map(internal::make_serial_Epetra_map(
2650  matrix_exemplar.locally_owned_range_indices()))
2651 # endif
2652  {
2653  vmult = [&matrix_exemplar, &preconditioner](Range & tril_dst,
2654  const Domain &tril_src) {
2655  // Duplicated from TrilinosWrappers::PreconditionBase::vmult
2656  // as well as from TrilinosWrappers::SparseMatrix::Tvmult
2657  Assert(&tril_src != &tril_dst,
2659  internal::check_vector_map_equality(
2660  matrix_exemplar.trilinos_matrix(),
2661  tril_src,
2662  tril_dst,
2663  matrix_exemplar.trilinos_matrix().UseTranspose());
2664  internal::check_vector_map_equality(
2665  preconditioner.trilinos_operator(),
2666  tril_src,
2667  tril_dst,
2668  preconditioner.trilinos_operator().UseTranspose());
2669 
2670  const int ierr =
2671  preconditioner.trilinos_operator().ApplyInverse(tril_src, tril_dst);
2672  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2673  };
2674 
2675  Tvmult = [&matrix_exemplar, &preconditioner](Domain & tril_dst,
2676  const Range &tril_src) {
2677  // Duplicated from TrilinosWrappers::PreconditionBase::vmult
2678  // as well as from TrilinosWrappers::SparseMatrix::Tvmult
2679  Assert(&tril_src != &tril_dst,
2681  internal::check_vector_map_equality(
2682  matrix_exemplar.trilinos_matrix(),
2683  tril_src,
2684  tril_dst,
2685  !matrix_exemplar.trilinos_matrix().UseTranspose());
2686  internal::check_vector_map_equality(
2687  preconditioner.trilinos_operator(),
2688  tril_src,
2689  tril_dst,
2690  !preconditioner.trilinos_operator().UseTranspose());
2691 
2692  preconditioner.trilinos_operator().SetUseTranspose(
2693  !preconditioner.trilinos_operator().UseTranspose());
2694  const int ierr =
2695  preconditioner.trilinos_operator().ApplyInverse(tril_src, tril_dst);
2696  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2697  preconditioner.trilinos_operator().SetUseTranspose(
2698  !preconditioner.trilinos_operator().UseTranspose());
2699  };
2700 
2701  inv_vmult = [&matrix_exemplar, &preconditioner](Domain & tril_dst,
2702  const Range &tril_src) {
2703  // Duplicated from TrilinosWrappers::PreconditionBase::vmult
2704  // as well as from TrilinosWrappers::SparseMatrix::Tvmult
2705  Assert(&tril_src != &tril_dst,
2707  internal::check_vector_map_equality(
2708  matrix_exemplar.trilinos_matrix(),
2709  tril_src,
2710  tril_dst,
2711  !matrix_exemplar.trilinos_matrix().UseTranspose());
2712  internal::check_vector_map_equality(
2713  preconditioner.trilinos_operator(),
2714  tril_src,
2715  tril_dst,
2716  !preconditioner.trilinos_operator().UseTranspose());
2717 
2718  const int ierr =
2719  preconditioner.trilinos_operator().ApplyInverse(tril_src, tril_dst);
2720  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2721  };
2722 
2723  inv_Tvmult = [&matrix_exemplar,
2724  &preconditioner](Range & tril_dst,
2725  const Domain &tril_src) {
2726  // Duplicated from TrilinosWrappers::PreconditionBase::vmult
2727  // as well as from TrilinosWrappers::SparseMatrix::Tvmult
2728  Assert(&tril_src != &tril_dst,
2730  internal::check_vector_map_equality(
2731  matrix_exemplar.trilinos_matrix(),
2732  tril_src,
2733  tril_dst,
2734  matrix_exemplar.trilinos_matrix().UseTranspose());
2735  internal::check_vector_map_equality(
2736  preconditioner.trilinos_operator(),
2737  tril_src,
2738  tril_dst,
2739  preconditioner.trilinos_operator().UseTranspose());
2740 
2741  preconditioner.trilinos_operator().SetUseTranspose(
2742  !preconditioner.trilinos_operator().UseTranspose());
2743  const int ierr =
2744  preconditioner.trilinos_operator().ApplyInverse(tril_src, tril_dst);
2745  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2746  preconditioner.trilinos_operator().SetUseTranspose(
2747  !preconditioner.trilinos_operator().UseTranspose());
2748  };
2749  }
2750 
2751 
2752 
2754  const TrilinosWrappers::PreconditionBase &preconditioner_exemplar,
2755  const TrilinosWrappers::PreconditionBase &preconditioner)
2756  : use_transpose(
2757  preconditioner_exemplar.trilinos_operator().UseTranspose())
2758  ,
2759 # ifdef DEAL_II_WITH_MPI
2760  communicator(preconditioner_exemplar.get_mpi_communicator())
2761  , domain_map(preconditioner_exemplar.locally_owned_domain_indices()
2762  .make_trilinos_map(communicator.Comm()))
2763  , range_map(preconditioner_exemplar.locally_owned_range_indices()
2764  .make_trilinos_map(communicator.Comm()))
2765 # else
2766  domain_map(internal::make_serial_Epetra_map(
2767  preconditioner_exemplar.locally_owned_domain_indices()))
2768  , range_map(internal::make_serial_Epetra_map(
2769  preconditioner_exemplar.locally_owned_range_indices()))
2770 # endif
2771  {
2772  vmult = [&preconditioner_exemplar,
2773  &preconditioner](Range &tril_dst, const Domain &tril_src) {
2774  // Duplicated from TrilinosWrappers::PreconditionBase::vmult
2775  // as well as from TrilinosWrappers::SparseMatrix::Tvmult
2776  Assert(&tril_src != &tril_dst,
2778  internal::check_vector_map_equality(
2779  preconditioner_exemplar.trilinos_operator(),
2780  tril_src,
2781  tril_dst,
2782  preconditioner_exemplar.trilinos_operator().UseTranspose());
2783  internal::check_vector_map_equality(
2784  preconditioner.trilinos_operator(),
2785  tril_src,
2786  tril_dst,
2787  preconditioner.trilinos_operator().UseTranspose());
2788 
2789  const int ierr =
2790  preconditioner.trilinos_operator().Apply(tril_src, tril_dst);
2791  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2792  };
2793 
2794  Tvmult = [&preconditioner_exemplar,
2795  &preconditioner](Domain &tril_dst, const Range &tril_src) {
2796  // Duplicated from TrilinosWrappers::PreconditionBase::vmult
2797  // as well as from TrilinosWrappers::SparseMatrix::Tvmult
2798  Assert(&tril_src != &tril_dst,
2800  internal::check_vector_map_equality(
2801  preconditioner_exemplar.trilinos_operator(),
2802  tril_src,
2803  tril_dst,
2804  !preconditioner_exemplar.trilinos_operator().UseTranspose());
2805  internal::check_vector_map_equality(
2806  preconditioner.trilinos_operator(),
2807  tril_src,
2808  tril_dst,
2809  !preconditioner.trilinos_operator().UseTranspose());
2810 
2811  preconditioner.trilinos_operator().SetUseTranspose(
2812  !preconditioner.trilinos_operator().UseTranspose());
2813  const int ierr =
2814  preconditioner.trilinos_operator().Apply(tril_src, tril_dst);
2815  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2816  preconditioner.trilinos_operator().SetUseTranspose(
2817  !preconditioner.trilinos_operator().UseTranspose());
2818  };
2819 
2820  inv_vmult = [&preconditioner_exemplar,
2821  &preconditioner](Domain &tril_dst, const Range &tril_src) {
2822  // Duplicated from TrilinosWrappers::PreconditionBase::vmult
2823  // as well as from TrilinosWrappers::SparseMatrix::Tvmult
2824  Assert(&tril_src != &tril_dst,
2826  internal::check_vector_map_equality(
2827  preconditioner_exemplar.trilinos_operator(),
2828  tril_src,
2829  tril_dst,
2830  !preconditioner_exemplar.trilinos_operator().UseTranspose());
2831  internal::check_vector_map_equality(
2832  preconditioner.trilinos_operator(),
2833  tril_src,
2834  tril_dst,
2835  !preconditioner.trilinos_operator().UseTranspose());
2836 
2837  const int ierr =
2838  preconditioner.trilinos_operator().ApplyInverse(tril_src, tril_dst);
2839  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2840  };
2841 
2842  inv_Tvmult = [&preconditioner_exemplar,
2843  &preconditioner](Range & tril_dst,
2844  const Domain &tril_src) {
2845  // Duplicated from TrilinosWrappers::PreconditionBase::vmult
2846  // as well as from TrilinosWrappers::SparseMatrix::Tvmult
2847  Assert(&tril_src != &tril_dst,
2849  internal::check_vector_map_equality(
2850  preconditioner_exemplar.trilinos_operator(),
2851  tril_src,
2852  tril_dst,
2853  preconditioner_exemplar.trilinos_operator().UseTranspose());
2854  internal::check_vector_map_equality(
2855  preconditioner.trilinos_operator(),
2856  tril_src,
2857  tril_dst,
2858  preconditioner.trilinos_operator().UseTranspose());
2859 
2860  preconditioner.trilinos_operator().SetUseTranspose(
2861  !preconditioner.trilinos_operator().UseTranspose());
2862  const int ierr =
2863  preconditioner.trilinos_operator().ApplyInverse(tril_src, tril_dst);
2864  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2865  preconditioner.trilinos_operator().SetUseTranspose(
2866  !preconditioner.trilinos_operator().UseTranspose());
2867  };
2868  }
2869 
2870 
2871 
2873  : vmult(payload.vmult)
2874  , Tvmult(payload.Tvmult)
2875  , inv_vmult(payload.inv_vmult)
2876  , inv_Tvmult(payload.inv_Tvmult)
2877  , use_transpose(payload.use_transpose)
2878  , communicator(payload.communicator)
2879  , domain_map(payload.domain_map)
2880  , range_map(payload.range_map)
2881  {}
2882 
2883 
2884 
2885  // Composite copy constructor
2886  // This is required for PackagedOperations
2888  const TrilinosPayload &second_op)
2889  : use_transpose(false)
2890  , // The combination of operators provides the exact
2891  // definition of the operation
2892  communicator(first_op.communicator)
2893  , domain_map(second_op.domain_map)
2894  , range_map(first_op.range_map)
2895  {}
2896 
2897 
2898 
2901  {
2902  TrilinosPayload return_op(*this);
2903 
2904  return_op.vmult = [](Range &tril_dst, const Range &tril_src) {
2905  tril_dst = tril_src;
2906  };
2907 
2908  return_op.Tvmult = [](Range &tril_dst, const Range &tril_src) {
2909  tril_dst = tril_src;
2910  };
2911 
2912  return_op.inv_vmult = [](Range &tril_dst, const Range &tril_src) {
2913  tril_dst = tril_src;
2914  };
2915 
2916  return_op.inv_Tvmult = [](Range &tril_dst, const Range &tril_src) {
2917  tril_dst = tril_src;
2918  };
2919 
2920  return return_op;
2921  }
2922 
2923 
2924 
2927  {
2928  TrilinosPayload return_op(*this);
2929 
2930  return_op.vmult = [](Range &tril_dst, const Domain &) {
2931  const int ierr = tril_dst.PutScalar(0.0);
2932 
2933  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2934  };
2935 
2936  return_op.Tvmult = [](Domain &tril_dst, const Range &) {
2937  const int ierr = tril_dst.PutScalar(0.0);
2938 
2939  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2940  };
2941 
2942  return_op.inv_vmult = [](Domain &tril_dst, const Range &) {
2943  AssertThrow(false,
2944  ExcMessage("Cannot compute inverse of null operator"));
2945 
2946  const int ierr = tril_dst.PutScalar(0.0);
2947 
2948  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2949  };
2950 
2951  return_op.inv_Tvmult = [](Range &tril_dst, const Domain &) {
2952  AssertThrow(false,
2953  ExcMessage("Cannot compute inverse of null operator"));
2954 
2955  const int ierr = tril_dst.PutScalar(0.0);
2956 
2957  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2958  };
2959 
2960  return return_op;
2961  }
2962 
2963 
2964 
2967  {
2968  TrilinosPayload return_op(*this);
2969  return_op.transpose();
2970  return return_op;
2971  }
2972 
2973 
2974 
2975  IndexSet
2977  {
2978  return IndexSet(domain_map);
2979  }
2980 
2981 
2982 
2983  IndexSet
2985  {
2986  return IndexSet(range_map);
2987  }
2988 
2989 
2990 
2991  MPI_Comm
2993  {
2994 # ifdef DEAL_II_WITH_MPI
2995  return communicator.Comm();
2996 # else
2997  return MPI_COMM_SELF;
2998 # endif
2999  }
3000 
3001 
3002 
3003  void
3005  {
3007  }
3008 
3009 
3010 
3011  bool
3013  {
3014  return use_transpose;
3015  }
3016 
3017 
3018 
3019  int
3021  {
3022  if (use_transpose != UseTranspose)
3023  {
3028  }
3029  return 0;
3030  }
3031 
3032 
3033 
3034  int
3036  {
3037  // The transposedness of the operations is taken care of
3038  // when we hit the transpose flag.
3039  vmult(Y, X);
3040  return 0;
3041  }
3042 
3043 
3044 
3045  int
3047  {
3048  // The transposedness of the operations is taken care of
3049  // when we hit the transpose flag.
3050  inv_vmult(X, Y);
3051  return 0;
3052  }
3053 
3054 
3055 
3056  const char *
3058  {
3059  return "TrilinosPayload";
3060  }
3061 
3062 
3063 
3064  const Epetra_Comm &
3066  {
3067  return communicator;
3068  }
3069 
3070 
3071 
3072  const Epetra_Map &
3074  {
3075  return domain_map;
3076  }
3077 
3078 
3079 
3080  const Epetra_Map &
3082  {
3083  return range_map;
3084  }
3085 
3086 
3087 
3088  bool
3090  {
3091  return false;
3092  }
3093 
3094 
3095 
3096  double
3098  {
3099  AssertThrow(false, ExcNotImplemented());
3100  return 0.0;
3101  }
3102 
3103 
3104 
3106  operator+(const TrilinosPayload &first_op,
3107  const TrilinosPayload &second_op)
3108  {
3109  using Domain = typename TrilinosPayload::Domain;
3110  using Range = typename TrilinosPayload::Range;
3111  using Intermediate = typename TrilinosPayload::VectorType;
3112  using GVMVectorType = TrilinosWrappers::MPI::Vector;
3113 
3114  Assert(first_op.locally_owned_domain_indices() ==
3115  second_op.locally_owned_domain_indices(),
3116  ExcMessage(
3117  "Operators are set to work on incompatible IndexSets."));
3118  Assert(first_op.locally_owned_range_indices() ==
3119  second_op.locally_owned_range_indices(),
3120  ExcMessage(
3121  "Operators are set to work on incompatible IndexSets."));
3122 
3123  TrilinosPayload return_op(first_op, second_op);
3124 
3125  // Capture by copy so the payloads are always valid
3126  return_op.vmult = [first_op, second_op](Range & tril_dst,
3127  const Domain &tril_src) {
3128  // Duplicated from LinearOperator::operator*
3129  // TODO: Template the constructor on GrowingVectorMemory vector type?
3130  GrowingVectorMemory<GVMVectorType> vector_memory;
3131  VectorMemory<GVMVectorType>::Pointer i(vector_memory);
3132 
3133  // Initialize intermediate vector
3134  const Epetra_Map &first_op_init_map = first_op.OperatorDomainMap();
3135  i->reinit(IndexSet(first_op_init_map),
3136  first_op.get_mpi_communicator(),
3137  /*bool omit_zeroing_entries =*/true);
3138 
3139  // Duplicated from TrilinosWrappers::SparseMatrix::vmult
3140  const size_type i_local_size = i->end() - i->begin();
3141  AssertDimension(i_local_size, first_op_init_map.NumMyPoints());
3142  const Epetra_Map &second_op_init_map = second_op.OperatorDomainMap();
3143  AssertDimension(i_local_size, second_op_init_map.NumMyPoints());
3144  (void)second_op_init_map;
3145  Intermediate tril_int(View,
3146  first_op_init_map,
3147  const_cast<TrilinosScalar *>(i->begin()),
3148  i_local_size,
3149  1);
3150 
3151  // These operators may themselves be transposed or not, so we let them
3152  // decide what the intended outcome is
3153  second_op.Apply(tril_src, tril_int);
3154  first_op.Apply(tril_src, tril_dst);
3155  const int ierr = tril_dst.Update(1.0, tril_int, 1.0);
3156  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
3157  };
3158 
3159  return_op.Tvmult = [first_op, second_op](Domain & tril_dst,
3160  const Range &tril_src) {
3161  // Duplicated from LinearOperator::operator*
3162  // TODO: Template the constructor on GrowingVectorMemory vector type?
3163  GrowingVectorMemory<GVMVectorType> vector_memory;
3164  VectorMemory<GVMVectorType>::Pointer i(vector_memory);
3165 
3166  // These operators may themselves be transposed or not, so we let them
3167  // decide what the intended outcome is
3168  // We must first transpose the operators to get the right IndexSets
3169  // for the input, intermediate and result vectors
3170  const_cast<TrilinosPayload &>(first_op).transpose();
3171  const_cast<TrilinosPayload &>(second_op).transpose();
3172 
3173  // Initialize intermediate vector
3174  const Epetra_Map &first_op_init_map = first_op.OperatorRangeMap();
3175  i->reinit(IndexSet(first_op_init_map),
3176  first_op.get_mpi_communicator(),
3177  /*bool omit_zeroing_entries =*/true);
3178 
3179  // Duplicated from TrilinosWrappers::SparseMatrix::vmult
3180  const size_type i_local_size = i->end() - i->begin();
3181  AssertDimension(i_local_size, first_op_init_map.NumMyPoints());
3182  const Epetra_Map &second_op_init_map = second_op.OperatorRangeMap();
3183  AssertDimension(i_local_size, second_op_init_map.NumMyPoints());
3184  (void)second_op_init_map;
3185  Intermediate tril_int(View,
3186  first_op_init_map,
3187  const_cast<TrilinosScalar *>(i->begin()),
3188  i_local_size,
3189  1);
3190 
3191  // These operators may themselves be transposed or not, so we let them
3192  // decide what the intended outcome is
3193  second_op.Apply(tril_src, tril_int);
3194  first_op.Apply(tril_src, tril_dst);
3195  const int ierr = tril_dst.Update(1.0, tril_int, 1.0);
3196  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
3197 
3198  // Reset transpose flag
3199  const_cast<TrilinosPayload &>(first_op).transpose();
3200  const_cast<TrilinosPayload &>(second_op).transpose();
3201  };
3202 
3203  return_op.inv_vmult = [first_op, second_op](Domain & tril_dst,
3204  const Range &tril_src) {
3205  // Duplicated from LinearOperator::operator*
3206  // TODO: Template the constructor on GrowingVectorMemory vector type?
3207  GrowingVectorMemory<GVMVectorType> vector_memory;
3208  VectorMemory<GVMVectorType>::Pointer i(vector_memory);
3209 
3210  // Initialize intermediate vector
3211  const Epetra_Map &first_op_init_map = first_op.OperatorRangeMap();
3212  i->reinit(IndexSet(first_op_init_map),
3213  first_op.get_mpi_communicator(),
3214  /*bool omit_zeroing_entries =*/true);
3215 
3216  // Duplicated from TrilinosWrappers::SparseMatrix::vmult
3217  const size_type i_local_size = i->end() - i->begin();
3218  AssertDimension(i_local_size, first_op_init_map.NumMyPoints());
3219  const Epetra_Map &second_op_init_map = second_op.OperatorRangeMap();
3220  AssertDimension(i_local_size, second_op_init_map.NumMyPoints());
3221  (void)second_op_init_map;
3222  Intermediate tril_int(View,
3223  first_op_init_map,
3224  const_cast<TrilinosScalar *>(i->begin()),
3225  i_local_size,
3226  1);
3227 
3228  // These operators may themselves be transposed or not, so we let them
3229  // decide what the intended outcome is
3230  second_op.ApplyInverse(tril_src, tril_int);
3231  first_op.ApplyInverse(tril_src, tril_dst);
3232  const int ierr = tril_dst.Update(1.0, tril_int, 1.0);
3233  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
3234  };
3235 
3236  return_op.inv_Tvmult = [first_op, second_op](Range & tril_dst,
3237  const Domain &tril_src) {
3238  // Duplicated from LinearOperator::operator*
3239  // TODO: Template the constructor on GrowingVectorMemory vector type?
3240  GrowingVectorMemory<GVMVectorType> vector_memory;
3241  VectorMemory<GVMVectorType>::Pointer i(vector_memory);
3242 
3243  // These operators may themselves be transposed or not, so we let them
3244  // decide what the intended outcome is
3245  // We must first transpose the operators to get the right IndexSets
3246  // for the input, intermediate and result vectors
3247  const_cast<TrilinosPayload &>(first_op).transpose();
3248  const_cast<TrilinosPayload &>(second_op).transpose();
3249 
3250  // Initialize intermediate vector
3251  const Epetra_Map &first_op_init_map = first_op.OperatorDomainMap();
3252  i->reinit(IndexSet(first_op_init_map),
3253  first_op.get_mpi_communicator(),
3254  /*bool omit_zeroing_entries =*/true);
3255 
3256  // Duplicated from TrilinosWrappers::SparseMatrix::vmult
3257  const size_type i_local_size = i->end() - i->begin();
3258  AssertDimension(i_local_size, first_op_init_map.NumMyPoints());
3259  const Epetra_Map &second_op_init_map = second_op.OperatorDomainMap();
3260  AssertDimension(i_local_size, second_op_init_map.NumMyPoints());
3261  (void)second_op_init_map;
3262  Intermediate tril_int(View,
3263  first_op_init_map,
3264  const_cast<TrilinosScalar *>(i->begin()),
3265  i_local_size,
3266  1);
3267 
3268  // These operators may themselves be transposed or not, so we let them
3269  // decide what the intended outcome is
3270  second_op.ApplyInverse(tril_src, tril_int);
3271  first_op.ApplyInverse(tril_src, tril_dst);
3272  const int ierr = tril_dst.Update(1.0, tril_int, 1.0);
3273  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
3274 
3275  // Reset transpose flag
3276  const_cast<TrilinosPayload &>(first_op).transpose();
3277  const_cast<TrilinosPayload &>(second_op).transpose();
3278  };
3279 
3280  return return_op;
3281  }
3282 
3283 
3284 
3285  TrilinosPayload operator*(const TrilinosPayload &first_op,
3286  const TrilinosPayload &second_op)
3287  {
3288  using Domain = typename TrilinosPayload::Domain;
3289  using Range = typename TrilinosPayload::Range;
3290  using Intermediate = typename TrilinosPayload::VectorType;
3291  using GVMVectorType = TrilinosWrappers::MPI::Vector;
3292 
3293  AssertThrow(first_op.locally_owned_domain_indices() ==
3294  second_op.locally_owned_range_indices(),
3295  ExcMessage(
3296  "Operators are set to work on incompatible IndexSets."));
3297 
3298  TrilinosPayload return_op(first_op, second_op);
3299 
3300  // Capture by copy so the payloads are always valid
3301  return_op.vmult = [first_op, second_op](Range & tril_dst,
3302  const Domain &tril_src) {
3303  // Duplicated from LinearOperator::operator*
3304  // TODO: Template the constructor on GrowingVectorMemory vector type?
3305  GrowingVectorMemory<GVMVectorType> vector_memory;
3306  VectorMemory<GVMVectorType>::Pointer i(vector_memory);
3307 
3308  // Initialize intermediate vector
3309  const Epetra_Map &first_op_init_map = first_op.OperatorDomainMap();
3310  i->reinit(IndexSet(first_op_init_map),
3311  first_op.get_mpi_communicator(),
3312  /*bool omit_zeroing_entries =*/true);
3313 
3314  // Duplicated from TrilinosWrappers::SparseMatrix::vmult
3315  const size_type i_local_size = i->end() - i->begin();
3316  AssertDimension(i_local_size, first_op_init_map.NumMyPoints());
3317  const Epetra_Map &second_op_init_map = second_op.OperatorRangeMap();
3318  AssertDimension(i_local_size, second_op_init_map.NumMyPoints());
3319  (void)second_op_init_map;
3320  Intermediate tril_int(View,
3321  first_op_init_map,
3322  const_cast<TrilinosScalar *>(i->begin()),
3323  i_local_size,
3324  1);
3325 
3326  // These operators may themselves be transposed or not, so we let them
3327  // decide what the intended outcome is
3328  second_op.Apply(tril_src, tril_int);
3329  first_op.Apply(tril_int, tril_dst);
3330  };
3331 
3332  return_op.Tvmult = [first_op, second_op](Domain & tril_dst,
3333  const Range &tril_src) {
3334  // Duplicated from LinearOperator::operator*
3335  // TODO: Template the constructor on GrowingVectorMemory vector type?
3336  GrowingVectorMemory<GVMVectorType> vector_memory;
3337  VectorMemory<GVMVectorType>::Pointer i(vector_memory);
3338 
3339  // These operators may themselves be transposed or not, so we let them
3340  // decide what the intended outcome is
3341  // We must first transpose the operators to get the right IndexSets
3342  // for the input, intermediate and result vectors
3343  const_cast<TrilinosPayload &>(first_op).transpose();
3344  const_cast<TrilinosPayload &>(second_op).transpose();
3345 
3346  // Initialize intermediate vector
3347  const Epetra_Map &first_op_init_map = first_op.OperatorRangeMap();
3348  i->reinit(IndexSet(first_op_init_map),
3349  first_op.get_mpi_communicator(),
3350  /*bool omit_zeroing_entries =*/true);
3351 
3352  // Duplicated from TrilinosWrappers::SparseMatrix::vmult
3353  const size_type i_local_size = i->end() - i->begin();
3354  AssertDimension(i_local_size, first_op_init_map.NumMyPoints());
3355  const Epetra_Map &second_op_init_map = second_op.OperatorDomainMap();
3356  AssertDimension(i_local_size, second_op_init_map.NumMyPoints());
3357  (void)second_op_init_map;
3358  Intermediate tril_int(View,
3359  first_op_init_map,
3360  const_cast<TrilinosScalar *>(i->begin()),
3361  i_local_size,
3362  1);
3363 
3364  // Apply the operators in the reverse order to vmult
3365  first_op.Apply(tril_src, tril_int);
3366  second_op.Apply(tril_int, tril_dst);
3367 
3368  // Reset transpose flag
3369  const_cast<TrilinosPayload &>(first_op).transpose();
3370  const_cast<TrilinosPayload &>(second_op).transpose();
3371  };
3372 
3373  return_op.inv_vmult = [first_op, second_op](Domain & tril_dst,
3374  const Range &tril_src) {
3375  // Duplicated from LinearOperator::operator*
3376  // TODO: Template the constructor on GrowingVectorMemory vector type?
3377  GrowingVectorMemory<GVMVectorType> vector_memory;
3378  VectorMemory<GVMVectorType>::Pointer i(vector_memory);
3379 
3380  // Initialize intermediate vector
3381  const Epetra_Map &first_op_init_map = first_op.OperatorRangeMap();
3382  i->reinit(IndexSet(first_op_init_map),
3383  first_op.get_mpi_communicator(),
3384  /*bool omit_zeroing_entries =*/true);
3385 
3386  // Duplicated from TrilinosWrappers::SparseMatrix::vmult
3387  const size_type i_local_size = i->end() - i->begin();
3388  AssertDimension(i_local_size, first_op_init_map.NumMyPoints());
3389  const Epetra_Map &second_op_init_map = second_op.OperatorDomainMap();
3390  AssertDimension(i_local_size, second_op_init_map.NumMyPoints());
3391  (void)second_op_init_map;
3392  Intermediate tril_int(View,
3393  first_op_init_map,
3394  const_cast<TrilinosScalar *>(i->begin()),
3395  i_local_size,
3396  1);
3397 
3398  // Apply the operators in the reverse order to vmult
3399  // and the same order as Tvmult
3400  first_op.ApplyInverse(tril_src, tril_int);
3401  second_op.ApplyInverse(tril_int, tril_dst);
3402  };
3403 
3404  return_op.inv_Tvmult = [first_op, second_op](Range & tril_dst,
3405  const Domain &tril_src) {
3406  // Duplicated from LinearOperator::operator*
3407  // TODO: Template the constructor on GrowingVectorMemory vector type?
3408  GrowingVectorMemory<GVMVectorType> vector_memory;
3409  VectorMemory<GVMVectorType>::Pointer i(vector_memory);
3410 
3411  // These operators may themselves be transposed or not, so we let them
3412  // decide what the intended outcome is
3413  // We must first transpose the operators to get the right IndexSets
3414  // for the input, intermediate and result vectors
3415  const_cast<TrilinosPayload &>(first_op).transpose();
3416  const_cast<TrilinosPayload &>(second_op).transpose();
3417 
3418  // Initialize intermediate vector
3419  const Epetra_Map &first_op_init_map = first_op.OperatorDomainMap();
3420  i->reinit(IndexSet(first_op_init_map),
3421  first_op.get_mpi_communicator(),
3422  /*bool omit_zeroing_entries =*/true);
3423 
3424  // Duplicated from TrilinosWrappers::SparseMatrix::vmult
3425  const size_type i_local_size = i->end() - i->begin();
3426  AssertDimension(i_local_size, first_op_init_map.NumMyPoints());
3427  const Epetra_Map &second_op_init_map = second_op.OperatorRangeMap();
3428  AssertDimension(i_local_size, second_op_init_map.NumMyPoints());
3429  (void)second_op_init_map;
3430  Intermediate tril_int(View,
3431  first_op_init_map,
3432  const_cast<TrilinosScalar *>(i->begin()),
3433  i_local_size,
3434  1);
3435 
3436  // These operators may themselves be transposed or not, so we let them
3437  // decide what the intended outcome is
3438  // Apply the operators in the reverse order to Tvmult
3439  // and the same order as vmult
3440  second_op.ApplyInverse(tril_src, tril_int);
3441  first_op.ApplyInverse(tril_int, tril_dst);
3442 
3443  // Reset transpose flag
3444  const_cast<TrilinosPayload &>(first_op).transpose();
3445  const_cast<TrilinosPayload &>(second_op).transpose();
3446  };
3447 
3448  return return_op;
3449  }
3450 
3451  } // namespace LinearOperatorImplementation
3452  } /* namespace internal */
3453 } /* namespace TrilinosWrappers */
3454 
3455 
3456 
3457 // explicit instantiations
3458 # include "trilinos_sparse_matrix.inst"
3459 
3460 
3461 // TODO: put these instantiations into generic file
3462 namespace TrilinosWrappers
3463 {
3464  template void
3465  SparseMatrix::reinit(const ::SparsityPattern &);
3466 
3467  template void
3469 
3470  template void
3471  SparseMatrix::reinit(const Epetra_Map &,
3472  const ::SparsityPattern &,
3473  const bool);
3474  template void
3475  SparseMatrix::reinit(const Epetra_Map &,
3476  const DynamicSparsityPattern &,
3477  const bool);
3478  template void
3479  SparseMatrix::reinit(const Epetra_Map &,
3480  const Epetra_Map &,
3481  const ::SparsityPattern &,
3482  const bool);
3483  template void
3484  SparseMatrix::reinit(const Epetra_Map &,
3485  const Epetra_Map &,
3486  const DynamicSparsityPattern &,
3487  const bool);
3488  template void
3490  const IndexSet &,
3491  const ::SparsityPattern &,
3492  const MPI_Comm &,
3493  const bool);
3494 
3495  template void
3497  const IndexSet &,
3498  const DynamicSparsityPattern &,
3499  const MPI_Comm &,
3500  const bool);
3501 
3502  template void
3503  SparseMatrix::vmult(MPI::Vector &, const MPI::Vector &) const;
3504 
3505  template void
3507  const ::Vector<double> &) const;
3508 
3509  template void
3512  const ::LinearAlgebra::distributed::Vector<double> &) const;
3513 
3514 # ifdef DEAL_II_WITH_MPI
3515 # ifdef DEAL_II_TRILINOS_WITH_TPETRA
3516  template void
3518  ::LinearAlgebra::TpetraWrappers::Vector<double> &,
3519  const ::LinearAlgebra::TpetraWrappers::Vector<double> &) const;
3520 
3521  template void
3523  ::LinearAlgebra::TpetraWrappers::Vector<float> &,
3524  const ::LinearAlgebra::TpetraWrappers::Vector<float> &) const;
3525 # endif
3526 
3527  template void
3531 # endif
3532 
3533  template void
3534  SparseMatrix::Tvmult(MPI::Vector &, const MPI::Vector &) const;
3535 
3536  template void
3538  const ::Vector<double> &) const;
3539 
3540  template void
3543  const ::LinearAlgebra::distributed::Vector<double> &) const;
3544 
3545 # ifdef DEAL_II_WITH_MPI
3546 # ifdef DEAL_II_TRILINOS_WITH_TPETRA
3547  template void
3549  ::LinearAlgebra::TpetraWrappers::Vector<double> &,
3550  const ::LinearAlgebra::TpetraWrappers::Vector<double> &) const;
3551 
3552  template void
3554  ::LinearAlgebra::TpetraWrappers::Vector<float> &,
3555  const ::LinearAlgebra::TpetraWrappers::Vector<float> &) const;
3556 # endif
3557 
3558  template void
3562 # endif
3563 
3564  template void
3566 
3567  template void
3569  const ::Vector<double> &) const;
3570 
3571  template void
3574  const ::LinearAlgebra::distributed::Vector<double> &) const;
3575 
3576 # ifdef DEAL_II_WITH_MPI
3577 # ifdef DEAL_II_TRILINOS_WITH_TPETRA
3578  template void
3580  ::LinearAlgebra::TpetraWrappers::Vector<double> &,
3581  const ::LinearAlgebra::TpetraWrappers::Vector<double> &) const;
3582 
3583  template void
3585  ::LinearAlgebra::TpetraWrappers::Vector<float> &,
3586  const ::LinearAlgebra::TpetraWrappers::Vector<float> &) const;
3587 # endif
3588 
3589  template void
3593 # endif
3594 
3595  template void
3597 
3598  template void
3600  const ::Vector<double> &) const;
3601 
3602  template void
3605  const ::LinearAlgebra::distributed::Vector<double> &) const;
3606 
3607 # ifdef DEAL_II_WITH_MPI
3608 # ifdef DEAL_II_TRILINOS_WITH_TPETRA
3609  template void
3611  ::LinearAlgebra::TpetraWrappers::Vector<double> &,
3612  const ::LinearAlgebra::TpetraWrappers::Vector<double> &) const;
3613 
3614  template void
3616  ::LinearAlgebra::TpetraWrappers::Vector<float> &,
3617  const ::LinearAlgebra::TpetraWrappers::Vector<float> &) const;
3618 # endif
3619 
3620  template void
3624 # endif
3625 } // namespace TrilinosWrappers
3626 
3627 DEAL_II_NAMESPACE_CLOSE
3628 
3629 #endif // DEAL_II_WITH_TRILINOS
TrilinosScalar diag_element(const size_type i) const
Iterator lower_bound(Iterator first, Iterator last, const T &val)
Definition: utilities.h:1067
static ::ExceptionBase & ExcTrilinosError(int arg1)
TrilinosScalar el(const size_type i, const size_type j) const
size_type m() const
const Epetra_Map & col_partitioner() const
void mmult(SparseMatrix &C, const SparseMatrix &B, const MPI::Vector &V=MPI::Vector()) const
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1567
Contents is actually a matrix.
static ::ExceptionBase & ExcSourceEqualsDestination()
static ::ExceptionBase & ExcIO()
void clear_row(const size_type row, const TrilinosScalar new_diag_value=0)
size_type column_number(const size_type row, const size_type index) const
const Epetra_Map & range_partitioner() const
static ::ExceptionBase & ExcScalarAssignmentOnlyForZeroValue()
void vmult_add(VectorType &dst, const VectorType &src) const
std::function< void(VectorType &, const VectorType &)> inv_Tvmult
const Epetra_FECrsGraph & trilinos_sparsity_pattern() const
void reinit(const Vector &v, const bool omit_zeroing_entries=false, const bool allow_different_maps=false)
#define AssertIndexRange(index, range)
Definition: exceptions.h:1637
TrilinosWrappers::types::int_type min_my_gid(const Epetra_BlockMap &map)
std::pair< size_type, size_type > local_range() const
TrilinosScalar operator()(const size_type i, const size_type j) const
static ::ExceptionBase & ExcInvalidIndex(size_type arg1, size_type arg2)
const Epetra_Map & row_partitioner() const
#define AssertThrow(cond, exc)
Definition: exceptions.h:1519
static ::ExceptionBase & ExcAccessToNonPresentElement(size_type arg1, size_type arg2)
SymmetricTensor< rank_, dim, typename ProductType< Number, OtherNumber >::type > operator+(const SymmetricTensor< rank_, dim, Number > &left, const SymmetricTensor< rank_, dim, OtherNumber > &right)
TrilinosScalar residual(MPI::Vector &dst, const MPI::Vector &x, const MPI::Vector &b) const
const Epetra_Comm & comm_self()
Definition: utilities.cc:1086
static ::ExceptionBase & ExcTrilinosError(int arg1)
SymmetricTensor< rank_, dim, Number > operator*(const SymmetricTensor< rank_, dim, Number > &t, const Number &factor)
TrilinosScalar matrix_scalar_product(const MPI::Vector &u, const MPI::Vector &v) const
std::unique_ptr< Epetra_CrsGraph > nonlocal_graph
static ::ExceptionBase & ExcDivideByZero()
LinearAlgebra::distributed::Vector< Number > Vector
void clear_rows(const std::vector< size_type > &rows, const TrilinosScalar new_diag_value=0)
Epetra_Map make_trilinos_map(const MPI_Comm &communicator=MPI_COMM_WORLD, const bool overlapping=false) const
Definition: index_set.cc:594
std::shared_ptr< std::vector< size_type > > colnum_cache
SparseMatrix & operator*=(const TrilinosScalar factor)
std::unique_ptr< Epetra_FECrsMatrix > matrix
size_type size() const
Definition: index_set.h:1600
virtual int Apply(const VectorType &X, VectorType &Y) const override
SparseMatrix & operator=(const SparseMatrix &)=delete
static ::ExceptionBase & ExcAccessToNonlocalRow(std::size_t arg1)
::types::global_dof_index size_type
virtual void reinit(const size_type m, const size_type n, const ArrayView< const unsigned int > &row_lengths) override
bool in_local_range(const size_type index) const
static ::ExceptionBase & ExcMessage(std::string arg1)
size_type n() const
Definition: types.h:31
void add(const size_type i, const size_type j, const TrilinosScalar value)
#define Assert(cond, exc)
Definition: exceptions.h:1407
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
std::unique_ptr< Epetra_Export > nonlocal_matrix_exporter
TrilinosWrappers::types::int_type n_global_cols(const Epetra_CrsGraph &graph)
TrilinosWrappers::types::int_type global_row_index(const Epetra_CrsMatrix &matrix, const ::types::global_dof_index i)
const IndexSet & row_index_set() const
void print(std::ostream &out, const bool write_extended_trilinos_info=false) const
std::function< void(VectorType &, const VectorType &)> inv_vmult
TrilinosScalar matrix_norm_square(const MPI::Vector &v) const
const Epetra_FEVector & trilinos_vector() const
virtual int ApplyInverse(const VectorType &Y, VectorType &X) const override
const Epetra_CrsMatrix & trilinos_matrix() const
SymmetricTensor< rank_, dim, Number > transpose(const SymmetricTensor< rank_, dim, Number > &t)
TrilinosWrappers::types::int_type global_column_index(const Epetra_CrsMatrix &matrix, const ::types::global_dof_index i)
Epetra_Operator & trilinos_operator() const
IndexSet locally_owned_range_indices() const
size_type n_nonzero_elements() const
std::enable_if< std::is_same< typename VectorType::value_type, TrilinosScalar >::value >::type Tvmult(VectorType &dst, const VectorType &src) const
static ::ExceptionBase & ExcNotQuadratic()
TrilinosWrappers::types::int_type n_global_elements(const Epetra_BlockMap &map)
void swap(Vector< Number > &u, Vector< Number > &v)
Definition: vector.h:1376
unsigned int row_length(const size_type row) const
void set_size(const size_type size)
Definition: index_set.h:1588
void Tvmult_add(VectorType &dst, const VectorType &src) const
unsigned int global_dof_index
Definition: types.h:89
Definition: cuda.h:31
std::enable_if< std::is_same< typename VectorType::value_type, TrilinosScalar >::value >::type vmult(VectorType &dst, const VectorType &src) const
void reinit(const SparsityPatternType &sparsity_pattern)
size_type row_length(const size_type row) const
const Epetra_BlockMap & trilinos_partitioner() const
void copy_from(const SparseMatrix &source)
const Epetra_Map & domain_partitioner() const
SparseMatrix & operator/=(const TrilinosScalar factor)
TrilinosWrappers::types::int_type max_my_gid(const Epetra_BlockMap &map)
std::pair< size_type, size_type > local_range() const
static ::ExceptionBase & ExcNotImplemented()
static ::ExceptionBase & ExcMatrixNotCompressed()
IndexSet locally_owned_domain_indices() const
const Epetra_CrsGraph & trilinos_sparsity_pattern() const
bool is_element(const size_type index) const
Definition: index_set.h:1665
void Tmmult(SparseMatrix &C, const SparseMatrix &B, const MPI::Vector &V=MPI::Vector()) const
std::unique_ptr< Epetra_Map > column_space_map
void compress(::VectorOperation::values operation)
const Epetra_MultiVector & trilinos_vector() const
static ::ExceptionBase & ExcAccessToNonLocalElement(size_type arg1, size_type arg2, size_type arg3, size_type arg4)
std::unique_ptr< Epetra_CrsMatrix > nonlocal_matrix
void set(const size_type i, const size_type j, const TrilinosScalar value)
size_type n_elements() const
Definition: index_set.h:1732
#define AssertIsFinite(number)
Definition: exceptions.h:1669
unsigned int local_size() const
static ::ExceptionBase & ExcInternalError()
std::shared_ptr< std::vector< TrilinosScalar > > value_cache