Reference documentation for deal.II version GIT 0980a66d4b 2023-03-23 20:20:03+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
trilinos_sparse_matrix.cc
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2008 - 2022 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
18 
19 #ifdef DEAL_II_WITH_TRILINOS
20 
22 # include <deal.II/base/utilities.h>
23 
30 
31 # include <boost/container/small_vector.hpp>
32 
33 # ifdef DEAL_II_TRILINOS_WITH_EPETRAEXT
34 # include <EpetraExt_MatrixMatrix.h>
35 # endif
36 # include <Epetra_Export.h>
37 # include <Teuchos_RCP.hpp>
38 # include <ml_epetra_utils.h>
39 # include <ml_struct.h>
40 
41 # include <memory>
42 
44 
45 namespace TrilinosWrappers
46 {
47  namespace internal
48  {
49  template <typename VectorType>
50  typename VectorType::value_type *
51  begin(VectorType &V)
52  {
53  return V.begin();
54  }
55 
56  template <typename VectorType>
57  const typename VectorType::value_type *
58  begin(const VectorType &V)
59  {
60  return V.begin();
61  }
62 
63  template <typename VectorType>
64  typename VectorType::value_type *
65  end(VectorType &V)
66  {
67  return V.end();
68  }
69 
70  template <typename VectorType>
71  const typename VectorType::value_type *
72  end(const VectorType &V)
73  {
74  return V.end();
75  }
76 
77  template <>
78  double *
80  {
81  return V.trilinos_vector()[0];
82  }
83 
84  template <>
85  const double *
87  {
88  return V.trilinos_vector()[0];
89  }
90 
91  template <>
92  double *
94  {
95  return V.trilinos_vector()[0] + V.trilinos_vector().MyLength();
96  }
97 
98  template <>
99  const double *
101  {
102  return V.trilinos_vector()[0] + V.trilinos_vector().MyLength();
103  }
104 
105 # ifdef DEAL_II_TRILINOS_WITH_TPETRA
106  template <typename Number>
107  Number *
109  {
110  return V.trilinos_vector().getDataNonConst().get();
111  }
112 
113  template <typename Number>
114  const Number *
116  {
117  return V.trilinos_vector().getData().get();
118  }
119 
120  template <typename Number>
121  Number *
123  {
124  return V.trilinos_vector().getDataNonConst().get() +
125  V.trilinos_vector().getLocalLength();
126  }
127 
128  template <typename Number>
129  const Number *
131  {
132  return V.trilinos_vector().getData().get() +
133  V.trilinos_vector().getLocalLength();
134  }
135 # endif
136  } // namespace internal
137 
138 
139  namespace SparseMatrixIterators
140  {
141  void
143  {
144  // if we are asked to visit the past-the-end line, then simply
145  // release all our caches and go on with life.
146  //
147  // do the same if the row we're supposed to visit is not locally
148  // owned. this is simply going to make non-locally owned rows
149  // look like they're empty
150  if ((this->a_row == matrix->m()) ||
151  (matrix->in_local_range(this->a_row) == false))
152  {
153  colnum_cache.reset();
154  value_cache.reset();
155 
156  return;
157  }
158 
159  // get a representation of the present row
160  int ncols;
162  if (value_cache.get() == nullptr)
163  {
164  value_cache =
165  std::make_shared<std::vector<TrilinosScalar>>(matrix->n());
166  colnum_cache = std::make_shared<std::vector<size_type>>(matrix->n());
167  }
168  else
169  {
170  value_cache->resize(matrix->n());
171  colnum_cache->resize(matrix->n());
172  }
173 
174  int ierr = matrix->trilinos_matrix().ExtractGlobalRowCopy(
175  this->a_row,
176  colnums,
177  ncols,
178  value_cache->data(),
179  reinterpret_cast<TrilinosWrappers::types::int_type *>(
180  colnum_cache->data()));
181  value_cache->resize(ncols);
182  colnum_cache->resize(ncols);
183  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
184 
185  // copy it into our caches if the
186  // line isn't empty. if it is, then
187  // we've done something wrong, since
188  // we shouldn't have initialized an
189  // iterator for an empty line (what
190  // would it point to?)
191  }
192  } // namespace SparseMatrixIterators
193 
194 
195  // The constructor is actually the
196  // only point where we have to check
197  // whether we build a serial or a
198  // parallel Trilinos matrix.
199  // Actually, it does not even matter
200  // how many threads there are, but
201  // only if we use an MPI compiler or
202  // a standard compiler. So, even one
203  // thread on a configuration with
204  // MPI will still get a parallel
205  // interface.
207  : column_space_map(new Epetra_Map(0, 0, Utilities::Trilinos::comm_self()))
208  , matrix(
209  new Epetra_FECrsMatrix(View, *column_space_map, *column_space_map, 0))
210  , last_action(Zero)
211  , compressed(true)
212  {
213  matrix->FillComplete();
214  }
215 
216 
217 
219  const size_type n,
220  const unsigned int n_max_entries_per_row)
221  : column_space_map(
222  new Epetra_Map(static_cast<TrilinosWrappers::types::int_type>(n),
223  0,
224  Utilities::Trilinos::comm_self()))
225  ,
226 
227  // on one processor only, we know how the
228  // columns of the matrix will be
229  // distributed (everything on one
230  // processor), so we can hand in this
231  // information to the constructor. we
232  // can't do so in parallel, where the
233  // information from columns is only
234  // available when entries have been added
235  matrix(new Epetra_FECrsMatrix(
236  Copy,
237  Epetra_Map(static_cast<TrilinosWrappers::types::int_type>(m),
238  0,
239  Utilities::Trilinos::comm_self()),
240  *column_space_map,
241  n_max_entries_per_row,
242  false))
243  , last_action(Zero)
244  , compressed(false)
245  {}
246 
247 
248 
250  const size_type n,
251  const std::vector<unsigned int> &n_entries_per_row)
252  : column_space_map(
253  new Epetra_Map(static_cast<TrilinosWrappers::types::int_type>(n),
254  0,
255  Utilities::Trilinos::comm_self()))
256  , matrix(new Epetra_FECrsMatrix(
257  Copy,
258  Epetra_Map(static_cast<TrilinosWrappers::types::int_type>(m),
259  0,
260  Utilities::Trilinos::comm_self()),
261  *column_space_map,
262  reinterpret_cast<int *>(
263  const_cast<unsigned int *>(n_entries_per_row.data())),
264  false))
265  , last_action(Zero)
266  , compressed(false)
267  {}
268 
269 
270 
271  SparseMatrix::SparseMatrix(const IndexSet & parallel_partitioning,
272  const MPI_Comm & communicator,
273  const unsigned int n_max_entries_per_row)
274  : column_space_map(new Epetra_Map(
275  parallel_partitioning.make_trilinos_map(communicator, false)))
276  , matrix(new Epetra_FECrsMatrix(Copy,
277  *column_space_map,
278  n_max_entries_per_row,
279  false))
280  , last_action(Zero)
281  , compressed(false)
282  {}
283 
284 
285 
286  SparseMatrix::SparseMatrix(const IndexSet &parallel_partitioning,
287  const MPI_Comm &communicator,
288  const std::vector<unsigned int> &n_entries_per_row)
289  : column_space_map(new Epetra_Map(
290  parallel_partitioning.make_trilinos_map(communicator, false)))
291  , matrix(new Epetra_FECrsMatrix(Copy,
292  *column_space_map,
293  reinterpret_cast<int *>(
294  const_cast<unsigned int *>(
295  n_entries_per_row.data())),
296  false))
297  , last_action(Zero)
298  , compressed(false)
299  {}
300 
301 
302 
303  SparseMatrix::SparseMatrix(const IndexSet &row_parallel_partitioning,
304  const IndexSet &col_parallel_partitioning,
305  const MPI_Comm &communicator,
306  const size_type n_max_entries_per_row)
307  : column_space_map(new Epetra_Map(
308  col_parallel_partitioning.make_trilinos_map(communicator, false)))
309  , matrix(new Epetra_FECrsMatrix(
310  Copy,
311  row_parallel_partitioning.make_trilinos_map(communicator, false),
312  n_max_entries_per_row,
313  false))
314  , last_action(Zero)
315  , compressed(false)
316  {}
317 
318 
319 
320  SparseMatrix::SparseMatrix(const IndexSet &row_parallel_partitioning,
321  const IndexSet &col_parallel_partitioning,
322  const MPI_Comm &communicator,
323  const std::vector<unsigned int> &n_entries_per_row)
324  : column_space_map(new Epetra_Map(
325  col_parallel_partitioning.make_trilinos_map(communicator, false)))
326  , matrix(new Epetra_FECrsMatrix(
327  Copy,
328  row_parallel_partitioning.make_trilinos_map(communicator, false),
329  reinterpret_cast<int *>(
330  const_cast<unsigned int *>(n_entries_per_row.data())),
331  false))
332  , last_action(Zero)
333  , compressed(false)
334  {}
335 
336 
337 
339  : column_space_map(new Epetra_Map(sparsity_pattern.domain_partitioner()))
340  , matrix(
341  new Epetra_FECrsMatrix(Copy,
342  sparsity_pattern.trilinos_sparsity_pattern(),
343  false))
344  , last_action(Zero)
345  , compressed(true)
346  {
347  Assert(sparsity_pattern.trilinos_sparsity_pattern().Filled() == true,
348  ExcMessage(
349  "The Trilinos sparsity pattern has not been compressed."));
351  }
352 
353 
354 
356  : column_space_map(std::move(other.column_space_map))
357  , matrix(std::move(other.matrix))
358  , nonlocal_matrix(std::move(other.nonlocal_matrix))
359  , nonlocal_matrix_exporter(std::move(other.nonlocal_matrix_exporter))
360  , last_action(other.last_action)
361  , compressed(other.compressed)
362  {
363  other.last_action = Zero;
364  other.compressed = false;
365  }
366 
367 
368 
369  void
371  {
372  if (this == &rhs)
373  return;
374 
375  nonlocal_matrix.reset();
376  nonlocal_matrix_exporter.reset();
377 
378  // check whether we need to update the whole matrix layout (we have
379  // different maps or if we detect a row where the columns of the two
380  // matrices do not match)
381  bool needs_deep_copy =
382  !matrix->RowMap().SameAs(rhs.matrix->RowMap()) ||
383  !matrix->ColMap().SameAs(rhs.matrix->ColMap()) ||
384  !matrix->DomainMap().SameAs(rhs.matrix->DomainMap()) ||
386  if (!needs_deep_copy)
387  {
388  // Try to copy all the rows of the matrix one by one. In case of error
389  // (i.e., the column indices are different), we need to abort and blow
390  // away the matrix.
391  for (const auto row : locally_owned_range_indices())
392  {
393  const int row_local = matrix->RowMap().LID(
394  static_cast<TrilinosWrappers::types::int_type>(row));
395  Assert((row_local >= 0), ExcAccessToNonlocalRow(row));
396 
397  int n_entries, rhs_n_entries;
398  TrilinosScalar *value_ptr, *rhs_value_ptr;
399  int * index_ptr, *rhs_index_ptr;
400  int ierr = rhs.matrix->ExtractMyRowView(row_local,
401  rhs_n_entries,
402  rhs_value_ptr,
403  rhs_index_ptr);
404  (void)ierr;
405  Assert(ierr == 0, ExcTrilinosError(ierr));
406 
407  ierr = matrix->ExtractMyRowView(row_local,
408  n_entries,
409  value_ptr,
410  index_ptr);
411  Assert(ierr == 0, ExcTrilinosError(ierr));
412 
413  if (n_entries != rhs_n_entries ||
414  std::memcmp(static_cast<void *>(index_ptr),
415  static_cast<void *>(rhs_index_ptr),
416  sizeof(int) * n_entries) != 0)
417  {
418  needs_deep_copy = true;
419  break;
420  }
421 
422  for (int i = 0; i < n_entries; ++i)
423  value_ptr[i] = rhs_value_ptr[i];
424  }
425  }
426 
427  if (needs_deep_copy)
428  {
430  std::make_unique<Epetra_Map>(rhs.trilinos_matrix().DomainMap());
431 
432  // release memory before reallocation
433  matrix = std::make_unique<Epetra_FECrsMatrix>(*rhs.matrix);
434 
435  matrix->FillComplete(*column_space_map, matrix->RowMap());
436  }
437 
438  if (rhs.nonlocal_matrix.get() != nullptr)
440  std::make_unique<Epetra_CrsMatrix>(Copy, rhs.nonlocal_matrix->Graph());
441  }
442 
443 
444 
445  namespace
446  {
448 
449  template <typename SparsityPatternType>
450  void
451  reinit_matrix(const IndexSet & row_parallel_partitioning,
452  const IndexSet & column_parallel_partitioning,
453  const SparsityPatternType & sparsity_pattern,
454  const bool exchange_data,
455  const MPI_Comm & communicator,
456  std::unique_ptr<Epetra_Map> &column_space_map,
457  std::unique_ptr<Epetra_FECrsMatrix> &matrix,
458  std::unique_ptr<Epetra_CrsMatrix> & nonlocal_matrix,
459  std::unique_ptr<Epetra_Export> & nonlocal_matrix_exporter)
460  {
461  // release memory before reallocation
462  matrix.reset();
463  nonlocal_matrix.reset();
464  nonlocal_matrix_exporter.reset();
465 
466  column_space_map = std::make_unique<Epetra_Map>(
467  column_parallel_partitioning.make_trilinos_map(communicator, false));
468 
469  if (column_space_map->Comm().MyPID() == 0)
470  {
471  AssertDimension(sparsity_pattern.n_rows(),
472  row_parallel_partitioning.size());
473  AssertDimension(sparsity_pattern.n_cols(),
474  column_parallel_partitioning.size());
475  }
476 
477  Epetra_Map row_space_map =
478  row_parallel_partitioning.make_trilinos_map(communicator, false);
479 
480  // if we want to exchange data, build a usual Trilinos sparsity pattern
481  // and let that handle the exchange. otherwise, manually create a
482  // CrsGraph, which consumes considerably less memory because it can set
483  // correct number of indices right from the start
484  if (exchange_data)
485  {
486  SparsityPattern trilinos_sparsity;
487  trilinos_sparsity.reinit(row_parallel_partitioning,
488  column_parallel_partitioning,
489  sparsity_pattern,
490  communicator,
491  exchange_data);
492  matrix = std::make_unique<Epetra_FECrsMatrix>(
493  Copy, trilinos_sparsity.trilinos_sparsity_pattern(), false);
494 
495  return;
496  }
497 
498  const size_type first_row = TrilinosWrappers::min_my_gid(row_space_map),
499  last_row =
500  TrilinosWrappers::max_my_gid(row_space_map) + 1;
501  std::vector<int> n_entries_per_row(last_row - first_row);
502 
503  for (size_type row = first_row; row < last_row; ++row)
504  n_entries_per_row[row - first_row] = sparsity_pattern.row_length(row);
505 
506  // The deal.II notation of a Sparsity pattern corresponds to the Epetra
507  // concept of a Graph. Hence, we generate a graph by copying the
508  // sparsity pattern into it, and then build up the matrix from the
509  // graph. This is considerable faster than directly filling elements
510  // into the matrix. Moreover, it consumes less memory, since the
511  // internal reordering is done on ints only, and we can leave the
512  // doubles aside.
513 
514  // for more than one processor, need to specify only row map first and
515  // let the matrix entries decide about the column map (which says which
516  // columns are present in the matrix, not to be confused with the
517  // col_map that tells how the domain dofs of the matrix will be
518  // distributed). for only one processor, we can directly assign the
519  // columns as well. Compare this with bug # 4123 in the Sandia Bugzilla.
520  std::unique_ptr<Epetra_CrsGraph> graph;
521  if (row_space_map.Comm().NumProc() > 1)
522  graph = std::make_unique<Epetra_CrsGraph>(Copy,
523  row_space_map,
524  n_entries_per_row.data(),
525  true);
526  else
527  graph = std::make_unique<Epetra_CrsGraph>(Copy,
528  row_space_map,
529  *column_space_map,
530  n_entries_per_row.data(),
531  true);
532 
533  // This functions assumes that the sparsity pattern sits on all
534  // processors (completely). The parallel version uses an Epetra graph
535  // that is already distributed.
536 
537  // now insert the indices
538  std::vector<TrilinosWrappers::types::int_type> row_indices;
539 
540  for (size_type row = first_row; row < last_row; ++row)
541  {
542  const int row_length = sparsity_pattern.row_length(row);
543  if (row_length == 0)
544  continue;
545 
546  row_indices.resize(row_length, -1);
547  {
548  typename SparsityPatternType::iterator p =
549  sparsity_pattern.begin(row);
550  for (size_type col = 0; p != sparsity_pattern.end(row); ++p, ++col)
551  row_indices[col] = p->column();
552  }
553  graph->Epetra_CrsGraph::InsertGlobalIndices(row,
554  row_length,
555  row_indices.data());
556  }
557 
558  // Eventually, optimize the graph structure (sort indices, make memory
559  // contiguous, etc). note that the documentation of the function indeed
560  // states that we first need to provide the column (domain) map and then
561  // the row (range) map
562  graph->FillComplete(*column_space_map, row_space_map);
563  graph->OptimizeStorage();
564 
565  // check whether we got the number of columns right.
566  AssertDimension(sparsity_pattern.n_cols(),
568  (void)n_global_cols;
569 
570  // And now finally generate the matrix.
571  matrix = std::make_unique<Epetra_FECrsMatrix>(Copy, *graph, false);
572  }
573 
574 
575 
576  // for the non-local graph, we need to circumvent the problem that some
577  // processors will not add into the non-local graph at all: We do not want
578  // to insert dummy elements on >5000 processors because that gets very
579  // slow. Thus, we set a flag in Epetra_CrsGraph that sets the correct
580  // flag. Since it is protected, we need to expose this information by
581  // deriving a class from Epetra_CrsGraph for the purpose of creating the
582  // data structure
583  class Epetra_CrsGraphMod : public Epetra_CrsGraph
584  {
585  public:
586  Epetra_CrsGraphMod(const Epetra_Map &row_map,
587  const int * n_entries_per_row)
588  : Epetra_CrsGraph(Copy, row_map, n_entries_per_row, true)
589  {}
590 
591  void
592  SetIndicesAreGlobal()
593  {
594  this->Epetra_CrsGraph::SetIndicesAreGlobal(true);
595  }
596  };
597 
598 
599 
600  // specialization for DynamicSparsityPattern which can provide us with
601  // more information about the non-locally owned rows
602  template <>
603  void
604  reinit_matrix(const IndexSet & row_parallel_partitioning,
605  const IndexSet & column_parallel_partitioning,
606  const DynamicSparsityPattern &sparsity_pattern,
607  const bool exchange_data,
608  const MPI_Comm & communicator,
609  std::unique_ptr<Epetra_Map> & column_space_map,
610  std::unique_ptr<Epetra_FECrsMatrix> &matrix,
611  std::unique_ptr<Epetra_CrsMatrix> & nonlocal_matrix,
612  std::unique_ptr<Epetra_Export> & nonlocal_matrix_exporter)
613  {
614  matrix.reset();
615  nonlocal_matrix.reset();
616  nonlocal_matrix_exporter.reset();
617 
618  column_space_map = std::make_unique<Epetra_Map>(
619  column_parallel_partitioning.make_trilinos_map(communicator, false));
620 
621  AssertDimension(sparsity_pattern.n_rows(),
622  row_parallel_partitioning.size());
623  AssertDimension(sparsity_pattern.n_cols(),
624  column_parallel_partitioning.size());
625 
626  Epetra_Map row_space_map =
627  row_parallel_partitioning.make_trilinos_map(communicator, false);
628 
629  IndexSet relevant_rows(sparsity_pattern.row_index_set());
630  // serial case
631  if (relevant_rows.size() == 0)
632  {
633  relevant_rows.set_size(
634  TrilinosWrappers::n_global_elements(row_space_map));
635  relevant_rows.add_range(
636  0, TrilinosWrappers::n_global_elements(row_space_map));
637  }
638  relevant_rows.compress();
639  Assert(relevant_rows.n_elements() >=
640  static_cast<unsigned int>(row_space_map.NumMyElements()),
641  ExcMessage(
642  "Locally relevant rows of sparsity pattern must contain "
643  "all locally owned rows"));
644 
645  // check whether the relevant rows correspond to exactly the same map as
646  // the owned rows. In that case, do not create the nonlocal graph and
647  // fill the columns by demand
648  const bool have_ghost_rows = [&]() {
649  const std::vector<::types::global_dof_index> indices =
650  relevant_rows.get_index_vector();
651  Epetra_Map relevant_map(
653  TrilinosWrappers::types::int_type(relevant_rows.n_elements()),
654  (indices.empty() ?
655  nullptr :
656  reinterpret_cast<const TrilinosWrappers::types::int_type *>(
657  indices.data())),
658  0,
659  row_space_map.Comm());
660  return !relevant_map.SameAs(row_space_map);
661  }();
662 
663  const unsigned int n_rows = relevant_rows.n_elements();
664  std::vector<TrilinosWrappers::types::int_type> ghost_rows;
665  std::vector<int> n_entries_per_row(row_space_map.NumMyElements());
666  std::vector<int> n_entries_per_ghost_row;
667  for (unsigned int i = 0, own = 0; i < n_rows; ++i)
668  {
669  const TrilinosWrappers::types::int_type global_row =
670  relevant_rows.nth_index_in_set(i);
671  if (row_space_map.MyGID(global_row))
672  n_entries_per_row[own++] = sparsity_pattern.row_length(global_row);
673  else if (sparsity_pattern.row_length(global_row) > 0)
674  {
675  ghost_rows.push_back(global_row);
676  n_entries_per_ghost_row.push_back(
677  sparsity_pattern.row_length(global_row));
678  }
679  }
680 
681  Epetra_Map off_processor_map(-1,
682  ghost_rows.size(),
683  (ghost_rows.size() > 0) ?
684  (ghost_rows.data()) :
685  nullptr,
686  0,
687  row_space_map.Comm());
688 
689  std::unique_ptr<Epetra_CrsGraph> graph;
690  std::unique_ptr<Epetra_CrsGraphMod> nonlocal_graph;
691  if (row_space_map.Comm().NumProc() > 1)
692  {
693  graph =
694  std::make_unique<Epetra_CrsGraph>(Copy,
695  row_space_map,
696  (n_entries_per_row.size() > 0) ?
697  (n_entries_per_row.data()) :
698  nullptr,
699  exchange_data ? false : true);
700  if (have_ghost_rows == true)
701  nonlocal_graph = std::make_unique<Epetra_CrsGraphMod>(
702  off_processor_map, n_entries_per_ghost_row.data());
703  }
704  else
705  graph =
706  std::make_unique<Epetra_CrsGraph>(Copy,
707  row_space_map,
708  *column_space_map,
709  (n_entries_per_row.size() > 0) ?
710  (n_entries_per_row.data()) :
711  nullptr,
712  true);
713 
714  // now insert the indices, select between the right matrix
715  std::vector<TrilinosWrappers::types::int_type> row_indices;
716 
717  for (unsigned int i = 0; i < n_rows; ++i)
718  {
719  const TrilinosWrappers::types::int_type global_row =
720  relevant_rows.nth_index_in_set(i);
721  const int row_length = sparsity_pattern.row_length(global_row);
722  if (row_length == 0)
723  continue;
724 
725  row_indices.resize(row_length, -1);
726  for (int col = 0; col < row_length; ++col)
727  row_indices[col] = sparsity_pattern.column_number(global_row, col);
728 
729  if (row_space_map.MyGID(global_row))
730  graph->InsertGlobalIndices(global_row,
731  row_length,
732  row_indices.data());
733  else
734  {
735  Assert(nonlocal_graph.get() != nullptr, ExcInternalError());
736  nonlocal_graph->InsertGlobalIndices(global_row,
737  row_length,
738  row_indices.data());
739  }
740  }
741 
742  // finalize nonlocal graph and create nonlocal matrix
743  if (nonlocal_graph.get() != nullptr)
744  {
745  // must make sure the IndicesAreGlobal flag is set on all processors
746  // because some processors might not call InsertGlobalIndices (and
747  // we do not want to insert dummy indices on all processors for
748  // large-scale simulations due to the bad impact on performance)
749  nonlocal_graph->SetIndicesAreGlobal();
750  Assert(nonlocal_graph->IndicesAreGlobal() == true,
751  ExcInternalError());
752  nonlocal_graph->FillComplete(*column_space_map, row_space_map);
753  nonlocal_graph->OptimizeStorage();
754 
755  // insert data from nonlocal graph into the final sparsity pattern
756  if (exchange_data)
757  {
758  Epetra_Export exporter(nonlocal_graph->RowMap(), row_space_map);
759  int ierr = graph->Export(*nonlocal_graph, exporter, Add);
760  (void)ierr;
761  Assert(ierr == 0, ExcTrilinosError(ierr));
762  }
763 
764  nonlocal_matrix =
765  std::make_unique<Epetra_CrsMatrix>(Copy, *nonlocal_graph);
766  }
767 
768  graph->FillComplete(*column_space_map, row_space_map);
769  graph->OptimizeStorage();
770 
771  AssertDimension(sparsity_pattern.n_cols(),
773 
774  matrix = std::make_unique<Epetra_FECrsMatrix>(Copy, *graph, false);
775  }
776  } // namespace
777 
778 
779 
780  template <typename SparsityPatternType>
781  void
782  SparseMatrix::reinit(const SparsityPatternType &sparsity_pattern)
783  {
784  reinit_matrix(complete_index_set(sparsity_pattern.n_rows()),
785  complete_index_set(sparsity_pattern.n_cols()),
786  sparsity_pattern,
787  false,
788  MPI_COMM_SELF,
790  matrix,
793  }
794 
795 
796 
797  template <typename SparsityPatternType>
798  inline std::enable_if_t<
799  !std::is_same<SparsityPatternType, ::SparseMatrix<double>>::value>
800  SparseMatrix::reinit(const IndexSet & row_parallel_partitioning,
801  const IndexSet & col_parallel_partitioning,
802  const SparsityPatternType &sparsity_pattern,
803  const MPI_Comm & communicator,
804  const bool exchange_data)
805  {
806  reinit_matrix(row_parallel_partitioning,
807  col_parallel_partitioning,
808  sparsity_pattern,
809  exchange_data,
810  communicator,
812  matrix,
815 
816  // In the end, the matrix needs to be compressed in order to be really
817  // ready.
818  last_action = Zero;
820  }
821 
822 
823 
824  void
825  SparseMatrix::reinit(const SparsityPattern &sparsity_pattern)
826  {
827  matrix.reset();
828  nonlocal_matrix_exporter.reset();
829 
830  // reinit with a (parallel) Trilinos sparsity pattern.
832  std::make_unique<Epetra_Map>(sparsity_pattern.domain_partitioner());
833  matrix = std::make_unique<Epetra_FECrsMatrix>(
834  Copy, sparsity_pattern.trilinos_sparsity_pattern(), false);
835 
836  if (sparsity_pattern.nonlocal_graph.get() != nullptr)
838  std::make_unique<Epetra_CrsMatrix>(Copy,
839  *sparsity_pattern.nonlocal_graph);
840  else
841  nonlocal_matrix.reset();
842 
843  last_action = Zero;
845  }
846 
847 
848 
849  void
850  SparseMatrix::reinit(const SparseMatrix &sparse_matrix)
851  {
852  if (this == &sparse_matrix)
853  return;
854 
856  std::make_unique<Epetra_Map>(sparse_matrix.trilinos_matrix().DomainMap());
857  matrix.reset();
858  nonlocal_matrix_exporter.reset();
859  matrix = std::make_unique<Epetra_FECrsMatrix>(
860  Copy, sparse_matrix.trilinos_sparsity_pattern(), false);
861 
862  if (sparse_matrix.nonlocal_matrix != nullptr)
863  nonlocal_matrix = std::make_unique<Epetra_CrsMatrix>(
864  Copy, sparse_matrix.nonlocal_matrix->Graph());
865  else
866  nonlocal_matrix.reset();
867 
868  last_action = Zero;
870  }
871 
872 
873 
874  template <typename number>
875  inline void
877  const IndexSet & row_parallel_partitioning,
878  const IndexSet & col_parallel_partitioning,
879  const ::SparseMatrix<number> &dealii_sparse_matrix,
880  const MPI_Comm & communicator,
881  const double drop_tolerance,
882  const bool copy_values,
883  const ::SparsityPattern * use_this_sparsity)
884  {
885  if (copy_values == false)
886  {
887  // in case we do not copy values, just
888  // call the other function.
889  if (use_this_sparsity == nullptr)
890  reinit(row_parallel_partitioning,
891  col_parallel_partitioning,
892  dealii_sparse_matrix.get_sparsity_pattern(),
893  communicator,
894  false);
895  else
896  reinit(row_parallel_partitioning,
897  col_parallel_partitioning,
898  *use_this_sparsity,
899  communicator,
900  false);
901  return;
902  }
903 
904  const size_type n_rows = dealii_sparse_matrix.m();
905 
906  AssertDimension(row_parallel_partitioning.size(), n_rows);
907  AssertDimension(col_parallel_partitioning.size(), dealii_sparse_matrix.n());
908 
909  const ::SparsityPattern &sparsity_pattern =
910  (use_this_sparsity != nullptr) ?
911  *use_this_sparsity :
912  dealii_sparse_matrix.get_sparsity_pattern();
913 
914  if (matrix.get() == nullptr || m() != n_rows ||
915  n_nonzero_elements() != sparsity_pattern.n_nonzero_elements())
916  {
917  reinit(row_parallel_partitioning,
918  col_parallel_partitioning,
919  sparsity_pattern,
920  communicator,
921  false);
922  }
923 
924  // fill the values. the same as above: go through all rows of the
925  // matrix, and then all columns. since the sparsity patterns of the
926  // input matrix and the specified sparsity pattern might be different,
927  // need to go through the row for both these sparsity structures
928  // simultaneously in order to really set the correct values.
929  size_type maximum_row_length = matrix->MaxNumEntries();
930  std::vector<size_type> row_indices(maximum_row_length);
931  std::vector<TrilinosScalar> values(maximum_row_length);
932 
933  for (size_type row = 0; row < n_rows; ++row)
934  // see if the row is locally stored on this processor
935  if (row_parallel_partitioning.is_element(row) == true)
936  {
937  ::SparsityPattern::iterator select_index =
938  sparsity_pattern.begin(row);
939  typename ::SparseMatrix<number>::const_iterator it =
940  dealii_sparse_matrix.begin(row);
941  size_type col = 0;
942  if (sparsity_pattern.n_rows() == sparsity_pattern.n_cols())
943  {
944  // optimized diagonal
945  AssertDimension(it->column(), row);
946  if (std::fabs(it->value()) > drop_tolerance)
947  {
948  values[col] = it->value();
949  row_indices[col++] = it->column();
950  }
951  ++select_index;
952  ++it;
953  }
954 
955  while (it != dealii_sparse_matrix.end(row) &&
956  select_index != sparsity_pattern.end(row))
957  {
958  while (select_index->column() < it->column() &&
959  select_index != sparsity_pattern.end(row))
960  ++select_index;
961  while (it->column() < select_index->column() &&
962  it != dealii_sparse_matrix.end(row))
963  ++it;
964 
965  if (it == dealii_sparse_matrix.end(row))
966  break;
967  if (std::fabs(it->value()) > drop_tolerance)
968  {
969  values[col] = it->value();
970  row_indices[col++] = it->column();
971  }
972  ++select_index;
973  ++it;
974  }
975  set(row,
976  col,
977  reinterpret_cast<size_type *>(row_indices.data()),
978  values.data(),
979  false);
980  }
982  }
983 
984 
985 
986  template <typename number>
987  void
989  const ::SparseMatrix<number> &dealii_sparse_matrix,
990  const double drop_tolerance,
991  const bool copy_values,
992  const ::SparsityPattern * use_this_sparsity)
993  {
994  reinit(complete_index_set(dealii_sparse_matrix.m()),
995  complete_index_set(dealii_sparse_matrix.n()),
996  dealii_sparse_matrix,
997  MPI_COMM_SELF,
998  drop_tolerance,
999  copy_values,
1000  use_this_sparsity);
1001  }
1002 
1003 
1004 
1005  void
1006  SparseMatrix::reinit(const Epetra_CrsMatrix &input_matrix,
1007  const bool copy_values)
1008  {
1009  Assert(input_matrix.Filled() == true,
1010  ExcMessage("Input CrsMatrix has not called FillComplete()!"));
1011 
1012  column_space_map = std::make_unique<Epetra_Map>(input_matrix.DomainMap());
1013 
1014  const Epetra_CrsGraph *graph = &input_matrix.Graph();
1015 
1016  nonlocal_matrix.reset();
1017  nonlocal_matrix_exporter.reset();
1018  matrix.reset();
1019  matrix = std::make_unique<Epetra_FECrsMatrix>(Copy, *graph, false);
1020 
1021  matrix->FillComplete(*column_space_map, input_matrix.RangeMap(), true);
1022 
1023  if (copy_values == true)
1024  {
1025  // point to the first data entry in the two
1026  // matrices and copy the content
1027  const TrilinosScalar *in_values = input_matrix[0];
1028  TrilinosScalar * values = (*matrix)[0];
1029  const size_type my_nonzeros = input_matrix.NumMyNonzeros();
1030  std::memcpy(values, in_values, my_nonzeros * sizeof(TrilinosScalar));
1031  }
1032 
1033  last_action = Zero;
1035  }
1036 
1037 
1038 
1039  void
1041  {
1042  Epetra_CombineMode mode = last_action;
1043  if (last_action == Zero)
1044  {
1045  if ((operation == ::VectorOperation::add) ||
1046  (operation == ::VectorOperation::unknown))
1047  mode = Add;
1048  else if (operation == ::VectorOperation::insert)
1049  mode = Insert;
1050  else
1051  Assert(
1052  false,
1053  ExcMessage(
1054  "compress() can only be called with VectorOperation add, insert, or unknown"));
1055  }
1056  else
1057  {
1058  Assert(((last_action == Add) &&
1059  (operation != ::VectorOperation::insert)) ||
1060  ((last_action == Insert) &&
1061  (operation != ::VectorOperation::add)),
1062  ExcMessage("Operation and argument to compress() do not match"));
1063  }
1064 
1065  // flush buffers
1066  int ierr;
1067  if (nonlocal_matrix.get() != nullptr && mode == Add)
1068  {
1069  // do only export in case of an add() operation, otherwise the owning
1070  // processor must have set the correct entry
1071  nonlocal_matrix->FillComplete(*column_space_map, matrix->RowMap());
1072  if (nonlocal_matrix_exporter.get() == nullptr)
1074  std::make_unique<Epetra_Export>(nonlocal_matrix->RowMap(),
1075  matrix->RowMap());
1076  ierr =
1078  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1079  ierr = matrix->FillComplete(*column_space_map, matrix->RowMap());
1080  nonlocal_matrix->PutScalar(0);
1081  }
1082  else
1083  ierr =
1084  matrix->GlobalAssemble(*column_space_map, matrix->RowMap(), true, mode);
1085 
1086  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1087 
1088  ierr = matrix->OptimizeStorage();
1089  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1090 
1091  last_action = Zero;
1092 
1093  compressed = true;
1094  }
1095 
1096 
1097 
1098  void
1100  {
1101  // When we clear the matrix, reset
1102  // the pointer and generate an
1103  // empty matrix.
1105  std::make_unique<Epetra_Map>(0, 0, Utilities::Trilinos::comm_self());
1106  matrix = std::make_unique<Epetra_FECrsMatrix>(View, *column_space_map, 0);
1107  nonlocal_matrix.reset();
1108  nonlocal_matrix_exporter.reset();
1109 
1110  matrix->FillComplete();
1111 
1112  compressed = true;
1113  }
1114 
1115 
1116 
1117  void
1119  const TrilinosScalar new_diag_value)
1120  {
1121  Assert(matrix->Filled() == true, ExcMatrixNotCompressed());
1122 
1123  // Only do this on the rows owned
1124  // locally on this processor.
1125  int local_row =
1126  matrix->LRID(static_cast<TrilinosWrappers::types::int_type>(row));
1127  if (local_row >= 0)
1128  {
1130  int * col_indices;
1131  int num_entries;
1132  const int ierr =
1133  matrix->ExtractMyRowView(local_row, num_entries, values, col_indices);
1134  (void)ierr;
1135 
1136  Assert(ierr == 0, ExcTrilinosError(ierr));
1137 
1138  const std::ptrdiff_t diag_index =
1139  std::find(col_indices, col_indices + num_entries, local_row) -
1140  col_indices;
1141 
1142  for (TrilinosWrappers::types::int_type j = 0; j < num_entries; ++j)
1143  if (diag_index != j || new_diag_value == 0)
1144  values[j] = 0.;
1145 
1146  if (diag_index != num_entries)
1147  values[diag_index] = new_diag_value;
1148  }
1149  }
1150 
1151 
1152 
1153  void
1154  SparseMatrix::clear_rows(const std::vector<size_type> &rows,
1155  const TrilinosScalar new_diag_value)
1156  {
1157  for (const auto row : rows)
1158  clear_row(row, new_diag_value);
1159  }
1160 
1161 
1162 
1165  {
1166  // Extract local indices in
1167  // the matrix.
1168  int trilinos_i =
1169  matrix->LRID(static_cast<TrilinosWrappers::types::int_type>(i)),
1170  trilinos_j =
1171  matrix->LCID(static_cast<TrilinosWrappers::types::int_type>(j));
1172  TrilinosScalar value = 0.;
1173 
1174  // If the data is not on the
1175  // present processor, we throw
1176  // an exception. This is one of
1177  // the two tiny differences to
1178  // the el(i,j) call, which does
1179  // not throw any assertions.
1180  if (trilinos_i == -1)
1181  {
1182  Assert(false,
1184  i, j, local_range().first, local_range().second - 1));
1185  }
1186  else
1187  {
1188  // Check whether the matrix has
1189  // already been transformed to local
1190  // indices.
1191  Assert(matrix->Filled(), ExcMatrixNotCompressed());
1192 
1193  // Prepare pointers for extraction
1194  // of a view of the row.
1195  int nnz_present = matrix->NumMyEntries(trilinos_i);
1196  int nnz_extracted;
1197  int * col_indices;
1199 
1200  // Generate the view and make
1201  // sure that we have not generated
1202  // an error.
1203  // TODO Check that col_indices are int and not long long
1204  int ierr = matrix->ExtractMyRowView(trilinos_i,
1205  nnz_extracted,
1206  values,
1207  col_indices);
1208  (void)ierr;
1209  Assert(ierr == 0, ExcTrilinosError(ierr));
1210 
1211  Assert(nnz_present == nnz_extracted,
1212  ExcDimensionMismatch(nnz_present, nnz_extracted));
1213 
1214  // Search the index where we
1215  // look for the value, and then
1216  // finally get it.
1217  const std::ptrdiff_t local_col_index =
1218  std::find(col_indices, col_indices + nnz_present, trilinos_j) -
1219  col_indices;
1220 
1221  // This is actually the only
1222  // difference to the el(i,j)
1223  // function, which means that
1224  // we throw an exception in
1225  // this case instead of just
1226  // returning zero for an
1227  // element that is not present
1228  // in the sparsity pattern.
1229  if (local_col_index == nnz_present)
1230  {
1231  Assert(false, ExcInvalidIndex(i, j));
1232  }
1233  else
1234  value = values[local_col_index];
1235  }
1236 
1237  return value;
1238  }
1239 
1240 
1241 
1243  SparseMatrix::el(const size_type i, const size_type j) const
1244  {
1245  // Extract local indices in
1246  // the matrix.
1247  int trilinos_i =
1248  matrix->LRID(static_cast<TrilinosWrappers::types::int_type>(i)),
1249  trilinos_j =
1250  matrix->LCID(static_cast<TrilinosWrappers::types::int_type>(j));
1251  TrilinosScalar value = 0.;
1252 
1253  // If the data is not on the
1254  // present processor, we can't
1255  // continue. Just print out zero
1256  // as discussed in the
1257  // documentation of this
1258  // function. if you want error
1259  // checking, use operator().
1260  if ((trilinos_i == -1) || (trilinos_j == -1))
1261  return 0.;
1262  else
1263  {
1264  // Check whether the matrix
1265  // already is transformed to
1266  // local indices.
1267  Assert(matrix->Filled(), ExcMatrixNotCompressed());
1268 
1269  // Prepare pointers for extraction
1270  // of a view of the row.
1271  int nnz_present = matrix->NumMyEntries(trilinos_i);
1272  int nnz_extracted;
1273  int * col_indices;
1275 
1276  // Generate the view and make
1277  // sure that we have not generated
1278  // an error.
1279  int ierr = matrix->ExtractMyRowView(trilinos_i,
1280  nnz_extracted,
1281  values,
1282  col_indices);
1283  (void)ierr;
1284  Assert(ierr == 0, ExcTrilinosError(ierr));
1285 
1286  Assert(nnz_present == nnz_extracted,
1287  ExcDimensionMismatch(nnz_present, nnz_extracted));
1288 
1289  // Search the index where we
1290  // look for the value, and then
1291  // finally get it.
1292  const std::ptrdiff_t local_col_index =
1293  std::find(col_indices, col_indices + nnz_present, trilinos_j) -
1294  col_indices;
1295 
1296  // This is actually the only
1297  // difference to the () function
1298  // querying (i,j), where we throw an
1299  // exception instead of just
1300  // returning zero for an element
1301  // that is not present in the
1302  // sparsity pattern.
1303  if (local_col_index == nnz_present)
1304  value = 0;
1305  else
1306  value = values[local_col_index];
1307  }
1308 
1309  return value;
1310  }
1311 
1312 
1313 
1316  {
1317  Assert(m() == n(), ExcNotQuadratic());
1318 
1319 # ifdef DEBUG
1320  // use operator() in debug mode because
1321  // it checks if this is a valid element
1322  // (in parallel)
1323  return operator()(i, i);
1324 # else
1325  // Trilinos doesn't seem to have a
1326  // more efficient way to access the
1327  // diagonal than by just using the
1328  // standard el(i,j) function.
1329  return el(i, i);
1330 # endif
1331  }
1332 
1333 
1334 
1335  unsigned int
1337  {
1338  Assert(row < m(), ExcInternalError());
1339 
1340  // get a representation of the
1341  // present row
1342  int ncols = -1;
1343  int local_row =
1344  matrix->LRID(static_cast<TrilinosWrappers::types::int_type>(row));
1345  Assert((local_row >= 0), ExcAccessToNonlocalRow(row));
1346 
1347  // on the processor who owns this
1348  // row, we'll have a non-negative
1349  // value.
1350  if (local_row >= 0)
1351  {
1352  int ierr = matrix->NumMyRowEntries(local_row, ncols);
1353  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1354  }
1355 
1356  return static_cast<unsigned int>(ncols);
1357  }
1358 
1359 
1360 
1361  void
1362  SparseMatrix::set(const std::vector<size_type> & row_indices,
1363  const std::vector<size_type> & col_indices,
1365  const bool elide_zero_values)
1366  {
1367  Assert(row_indices.size() == values.m(),
1368  ExcDimensionMismatch(row_indices.size(), values.m()));
1369  Assert(col_indices.size() == values.n(),
1370  ExcDimensionMismatch(col_indices.size(), values.n()));
1371 
1372  for (size_type i = 0; i < row_indices.size(); ++i)
1373  set(row_indices[i],
1374  col_indices.size(),
1375  col_indices.data(),
1376  &values(i, 0),
1377  elide_zero_values);
1378  }
1379 
1380 
1381 
1382  void
1384  const std::vector<size_type> & col_indices,
1385  const std::vector<TrilinosScalar> &values,
1386  const bool elide_zero_values)
1387  {
1388  Assert(col_indices.size() == values.size(),
1389  ExcDimensionMismatch(col_indices.size(), values.size()));
1390 
1391  set(row,
1392  col_indices.size(),
1393  col_indices.data(),
1394  values.data(),
1395  elide_zero_values);
1396  }
1397 
1398 
1399 
1400  template <>
1401  void
1402  SparseMatrix::set<TrilinosScalar>(const size_type row,
1403  const size_type n_cols,
1404  const size_type * col_indices,
1405  const TrilinosScalar *values,
1406  const bool elide_zero_values)
1407  {
1408  AssertIndexRange(row, this->m());
1409 
1410  int ierr;
1411  if (last_action == Add)
1412  {
1413  ierr =
1414  matrix->GlobalAssemble(*column_space_map, matrix->RowMap(), true);
1415 
1416  Assert(ierr == 0, ExcTrilinosError(ierr));
1417  }
1418 
1419  last_action = Insert;
1420 
1421  const TrilinosWrappers::types::int_type *col_index_ptr;
1422  const TrilinosScalar * col_value_ptr;
1423  const TrilinosWrappers::types::int_type trilinos_row = row;
1425 
1426  boost::container::small_vector<TrilinosScalar, 200> local_value_array(
1427  n_cols);
1428  boost::container::small_vector<TrilinosWrappers::types::int_type, 200>
1429  local_index_array(n_cols);
1430 
1431  // If we don't elide zeros, the pointers are already available... need to
1432  // cast to non-const pointers as that is the format taken by Trilinos (but
1433  // we will not modify const data)
1434  if (elide_zero_values == false)
1435  {
1436  col_index_ptr =
1437  reinterpret_cast<const TrilinosWrappers::types::int_type *>(
1438  col_indices);
1439  col_value_ptr = values;
1440  n_columns = n_cols;
1441  }
1442  else
1443  {
1444  // Otherwise, extract nonzero values in each row and get the
1445  // respective indices.
1446  col_index_ptr = local_index_array.data();
1447  col_value_ptr = local_value_array.data();
1448 
1449  n_columns = 0;
1450  for (size_type j = 0; j < n_cols; ++j)
1451  {
1452  const double value = values[j];
1453  AssertIsFinite(value);
1454  if (value != 0)
1455  {
1456  local_index_array[n_columns] = col_indices[j];
1457  local_value_array[n_columns] = value;
1458  n_columns++;
1459  }
1460  }
1461 
1462  AssertIndexRange(n_columns, n_cols + 1);
1463  }
1464 
1465 
1466  // If the calling matrix owns the row to which we want to insert values,
1467  // we can directly call the Epetra_CrsMatrix input function, which is much
1468  // faster than the Epetra_FECrsMatrix function. We distinguish between two
1469  // cases: the first one is when the matrix is not filled (i.e., it is
1470  // possible to add new elements to the sparsity pattern), and the second
1471  // one is when the pattern is already fixed. In the former case, we add
1472  // the possibility to insert new values, and in the second we just replace
1473  // data.
1474  if (matrix->RowMap().MyGID(
1475  static_cast<TrilinosWrappers::types::int_type>(row)) == true)
1476  {
1477  if (matrix->Filled() == false)
1478  {
1479  ierr = matrix->Epetra_CrsMatrix::InsertGlobalValues(
1480  row, static_cast<int>(n_columns), col_value_ptr, col_index_ptr);
1481 
1482  // When inserting elements, we do not want to create exceptions in
1483  // the case when inserting non-local data (since that's what we
1484  // want to do right now).
1485  if (ierr > 0)
1486  ierr = 0;
1487  }
1488  else
1489  ierr = matrix->Epetra_CrsMatrix::ReplaceGlobalValues(row,
1490  n_columns,
1491  col_value_ptr,
1492  col_index_ptr);
1493  }
1494  else
1495  {
1496  // When we're at off-processor data, we have to stick with the
1497  // standard Insert/ReplaceGlobalValues function. Nevertheless, the way
1498  // we call it is the fastest one (any other will lead to repeated
1499  // allocation and deallocation of memory in order to call the function
1500  // we already use, which is very inefficient if writing one element at
1501  // a time).
1502  compressed = false;
1503 
1504  if (matrix->Filled() == false)
1505  {
1506  ierr = matrix->InsertGlobalValues(1,
1507  &trilinos_row,
1508  n_columns,
1509  col_index_ptr,
1510  &col_value_ptr,
1511  Epetra_FECrsMatrix::ROW_MAJOR);
1512  if (ierr > 0)
1513  ierr = 0;
1514  }
1515  else
1516  ierr = matrix->ReplaceGlobalValues(1,
1517  &trilinos_row,
1518  n_columns,
1519  col_index_ptr,
1520  &col_value_ptr,
1521  Epetra_FECrsMatrix::ROW_MAJOR);
1522  // use the FECrsMatrix facilities for set even in the case when we
1523  // have explicitly set the off-processor rows because that only works
1524  // properly when adding elements, not when setting them (since we want
1525  // to only touch elements that have been set explicitly, and there is
1526  // no way on the receiving processor to identify them otherwise)
1527  }
1528 
1529  Assert(ierr <= 0, ExcAccessToNonPresentElement(row, col_index_ptr[0]));
1530  AssertThrow(ierr >= 0, ExcTrilinosError(ierr));
1531  }
1532 
1533 
1534 
1535  void
1536  SparseMatrix::add(const std::vector<size_type> & indices,
1538  const bool elide_zero_values)
1539  {
1540  Assert(indices.size() == values.m(),
1541  ExcDimensionMismatch(indices.size(), values.m()));
1542  Assert(values.m() == values.n(), ExcNotQuadratic());
1543 
1544  for (size_type i = 0; i < indices.size(); ++i)
1545  add(indices[i],
1546  indices.size(),
1547  indices.data(),
1548  &values(i, 0),
1549  elide_zero_values);
1550  }
1551 
1552 
1553 
1554  void
1555  SparseMatrix::add(const std::vector<size_type> & row_indices,
1556  const std::vector<size_type> & col_indices,
1558  const bool elide_zero_values)
1559  {
1560  Assert(row_indices.size() == values.m(),
1561  ExcDimensionMismatch(row_indices.size(), values.m()));
1562  Assert(col_indices.size() == values.n(),
1563  ExcDimensionMismatch(col_indices.size(), values.n()));
1564 
1565  for (size_type i = 0; i < row_indices.size(); ++i)
1566  add(row_indices[i],
1567  col_indices.size(),
1568  col_indices.data(),
1569  &values(i, 0),
1570  elide_zero_values);
1571  }
1572 
1573 
1574 
1575  void
1577  const std::vector<size_type> & col_indices,
1578  const std::vector<TrilinosScalar> &values,
1579  const bool elide_zero_values)
1580  {
1581  Assert(col_indices.size() == values.size(),
1582  ExcDimensionMismatch(col_indices.size(), values.size()));
1583 
1584  add(row,
1585  col_indices.size(),
1586  col_indices.data(),
1587  values.data(),
1588  elide_zero_values);
1589  }
1590 
1591 
1592 
1593  void
1595  const size_type n_cols,
1596  const size_type * col_indices,
1597  const TrilinosScalar *values,
1598  const bool elide_zero_values,
1599  const bool /*col_indices_are_sorted*/)
1600  {
1601  AssertIndexRange(row, this->m());
1602  int ierr;
1603  if (last_action == Insert)
1604  {
1605  // TODO: this could lead to a dead lock when only one processor
1606  // calls GlobalAssemble.
1607  ierr =
1608  matrix->GlobalAssemble(*column_space_map, matrix->RowMap(), false);
1609 
1610  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1611  }
1612 
1613  last_action = Add;
1614 
1615  const TrilinosWrappers::types::int_type *col_index_ptr;
1616  const TrilinosScalar * col_value_ptr;
1617  const TrilinosWrappers::types::int_type trilinos_row = row;
1619 
1620  boost::container::small_vector<TrilinosScalar, 100> local_value_array(
1621  n_cols);
1622  boost::container::small_vector<TrilinosWrappers::types::int_type, 100>
1623  local_index_array(n_cols);
1624 
1625  // If we don't elide zeros, the pointers are already available... need to
1626  // cast to non-const pointers as that is the format taken by Trilinos (but
1627  // we will not modify const data)
1628  if (elide_zero_values == false)
1629  {
1630  col_index_ptr =
1631  reinterpret_cast<const TrilinosWrappers::types::int_type *>(
1632  col_indices);
1633  col_value_ptr = values;
1634  n_columns = n_cols;
1635 # ifdef DEBUG
1636  for (size_type j = 0; j < n_cols; ++j)
1637  AssertIsFinite(values[j]);
1638 # endif
1639  }
1640  else
1641  {
1642  // Otherwise, extract nonzero values in each row and the corresponding
1643  // index.
1644  col_index_ptr = local_index_array.data();
1645  col_value_ptr = local_value_array.data();
1646 
1647  n_columns = 0;
1648  for (size_type j = 0; j < n_cols; ++j)
1649  {
1650  const double value = values[j];
1651 
1652  AssertIsFinite(value);
1653  if (value != 0)
1654  {
1655  local_index_array[n_columns] = col_indices[j];
1656  local_value_array[n_columns] = value;
1657  ++n_columns;
1658  }
1659  }
1660 
1661  AssertIndexRange(n_columns, n_cols + 1);
1662  }
1663  // Exit early if there is nothing to do
1664  if (n_columns == 0)
1665  {
1666  return;
1667  }
1668 
1669  // If the calling processor owns the row to which we want to add values, we
1670  // can directly call the Epetra_CrsMatrix input function, which is much
1671  // faster than the Epetra_FECrsMatrix function.
1672  if (matrix->RowMap().MyGID(
1673  static_cast<TrilinosWrappers::types::int_type>(row)) == true)
1674  {
1675  ierr = matrix->Epetra_CrsMatrix::SumIntoGlobalValues(row,
1676  n_columns,
1677  col_value_ptr,
1678  col_index_ptr);
1679  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1680  }
1681  else if (nonlocal_matrix.get() != nullptr)
1682  {
1683  compressed = false;
1684  // this is the case when we have explicitly set the off-processor rows
1685  // and want to create a separate matrix object for them (to retain
1686  // thread-safety)
1687  Assert(nonlocal_matrix->RowMap().LID(
1688  static_cast<TrilinosWrappers::types::int_type>(row)) != -1,
1689  ExcMessage("Attempted to write into off-processor matrix row "
1690  "that has not be specified as being writable upon "
1691  "initialization"));
1692  ierr = nonlocal_matrix->SumIntoGlobalValues(row,
1693  n_columns,
1694  col_value_ptr,
1695  col_index_ptr);
1696  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1697  }
1698  else
1699  {
1700  // When we're at off-processor data, we have to stick with the
1701  // standard SumIntoGlobalValues function. Nevertheless, the way we
1702  // call it is the fastest one (any other will lead to repeated
1703  // allocation and deallocation of memory in order to call the function
1704  // we already use, which is very inefficient if writing one element at
1705  // a time).
1706  compressed = false;
1707 
1708  ierr = matrix->SumIntoGlobalValues(1,
1709  &trilinos_row,
1710  n_columns,
1711  col_index_ptr,
1712  &col_value_ptr,
1713  Epetra_FECrsMatrix::ROW_MAJOR);
1714  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1715  }
1716 
1717 # ifdef DEBUG
1718  if (ierr > 0)
1719  {
1720  std::cout << "------------------------------------------" << std::endl;
1721  std::cout << "Got error " << ierr << " in row " << row << " of proc "
1722  << matrix->RowMap().Comm().MyPID()
1723  << " when trying to add the columns:" << std::endl;
1724  for (TrilinosWrappers::types::int_type i = 0; i < n_columns; ++i)
1725  std::cout << col_index_ptr[i] << " ";
1726  std::cout << std::endl << std::endl;
1727  std::cout << "Matrix row "
1728  << (matrix->RowMap().MyGID(
1729  static_cast<TrilinosWrappers::types::int_type>(row)) ==
1730  false ?
1731  "(nonlocal part)" :
1732  "")
1733  << " has the following indices:" << std::endl;
1734  std::vector<TrilinosWrappers::types::int_type> indices;
1735  const Epetra_CrsGraph * graph =
1736  (nonlocal_matrix.get() != nullptr &&
1737  matrix->RowMap().MyGID(
1738  static_cast<TrilinosWrappers::types::int_type>(row)) == false) ?
1739  &nonlocal_matrix->Graph() :
1740  &matrix->Graph();
1741 
1742  indices.resize(graph->NumGlobalIndices(row));
1743  int n_indices = 0;
1744  graph->ExtractGlobalRowCopy(row,
1745  indices.size(),
1746  n_indices,
1747  indices.data());
1748  AssertDimension(n_indices, indices.size());
1749 
1750  for (TrilinosWrappers::types::int_type i = 0; i < n_indices; ++i)
1751  std::cout << indices[i] << " ";
1752  std::cout << std::endl << std::endl;
1753  Assert(ierr <= 0, ExcAccessToNonPresentElement(row, col_index_ptr[0]));
1754  }
1755 # endif
1756  Assert(ierr >= 0, ExcTrilinosError(ierr));
1757  }
1758 
1759 
1760 
1761  SparseMatrix &
1763  {
1765  compress(
1766  ::VectorOperation::unknown); // TODO: why do we do this? Should we
1767  // not check for is_compressed?
1768 
1769  const int ierr = matrix->PutScalar(d);
1770  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1771  if (nonlocal_matrix.get() != nullptr)
1772  nonlocal_matrix->PutScalar(d);
1773 
1774  return *this;
1775  }
1776 
1777 
1778 
1779  void
1781  {
1782  AssertDimension(rhs.m(), m());
1783  AssertDimension(rhs.n(), n());
1784  AssertDimension(rhs.local_range().first, local_range().first);
1785  AssertDimension(rhs.local_range().second, local_range().second);
1786  Assert(matrix->RowMap().SameAs(rhs.matrix->RowMap()),
1787  ExcMessage("Can only add matrices with same distribution of rows"));
1788  Assert(matrix->Filled() && rhs.matrix->Filled(),
1789  ExcMessage("Addition of matrices only allowed if matrices are "
1790  "filled, i.e., compress() has been called"));
1791 
1792  const bool same_col_map = matrix->ColMap().SameAs(rhs.matrix->ColMap());
1793 
1794  for (const auto row : locally_owned_range_indices())
1795  {
1796  const int row_local = matrix->RowMap().LID(
1797  static_cast<TrilinosWrappers::types::int_type>(row));
1798  Assert((row_local >= 0), ExcAccessToNonlocalRow(row));
1799 
1800  // First get a view to the matrix columns of both matrices. Note that
1801  // the data is in local index spaces so we need to be careful not only
1802  // to compare column indices in case they are derived from the same
1803  // map.
1804  int n_entries, rhs_n_entries;
1805  TrilinosScalar *value_ptr, *rhs_value_ptr;
1806  int * index_ptr, *rhs_index_ptr;
1807  int ierr = rhs.matrix->ExtractMyRowView(row_local,
1808  rhs_n_entries,
1809  rhs_value_ptr,
1810  rhs_index_ptr);
1811  (void)ierr;
1812  Assert(ierr == 0, ExcTrilinosError(ierr));
1813 
1814  ierr =
1815  matrix->ExtractMyRowView(row_local, n_entries, value_ptr, index_ptr);
1816  Assert(ierr == 0, ExcTrilinosError(ierr));
1817  bool expensive_checks = (n_entries != rhs_n_entries || !same_col_map);
1818  if (!expensive_checks)
1819  {
1820  // check if the column indices are the same. If yes, can simply
1821  // copy over the data.
1822  expensive_checks = std::memcmp(static_cast<void *>(index_ptr),
1823  static_cast<void *>(rhs_index_ptr),
1824  sizeof(int) * n_entries) != 0;
1825  if (!expensive_checks)
1826  for (int i = 0; i < n_entries; ++i)
1827  value_ptr[i] += rhs_value_ptr[i] * factor;
1828  }
1829  // Now to the expensive case where we need to check all column indices
1830  // against each other (transformed into global index space) and where
1831  // we need to make sure that all entries we are about to add into the
1832  // lhs matrix actually exist
1833  if (expensive_checks)
1834  {
1835  for (int i = 0; i < rhs_n_entries; ++i)
1836  {
1837  if (rhs_value_ptr[i] == 0.)
1838  continue;
1839  const TrilinosWrappers::types::int_type rhs_global_col =
1840  global_column_index(*rhs.matrix, rhs_index_ptr[i]);
1841  int local_col = matrix->ColMap().LID(rhs_global_col);
1842  int *local_index = Utilities::lower_bound(index_ptr,
1843  index_ptr + n_entries,
1844  local_col);
1845  Assert(local_index != index_ptr + n_entries &&
1846  *local_index == local_col,
1847  ExcMessage(
1848  "Adding the entries from the other matrix "
1849  "failed, because the sparsity pattern "
1850  "of that matrix includes more elements than the "
1851  "calling matrix, which is not allowed."));
1852  value_ptr[local_index - index_ptr] += factor * rhs_value_ptr[i];
1853  }
1854  }
1855  }
1856  }
1857 
1858 
1859 
1860  void
1862  {
1863  // This only flips a flag that tells
1864  // Trilinos that any vmult operation
1865  // should be done with the
1866  // transpose. However, the matrix
1867  // structure is not reset.
1868  int ierr;
1869 
1870  if (!matrix->UseTranspose())
1871  {
1872  ierr = matrix->SetUseTranspose(true);
1873  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1874  }
1875  else
1876  {
1877  ierr = matrix->SetUseTranspose(false);
1878  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1879  }
1880  }
1881 
1882 
1883 
1884  SparseMatrix &
1886  {
1887  const int ierr = matrix->Scale(a);
1888  Assert(ierr == 0, ExcTrilinosError(ierr));
1889  (void)ierr; // removes -Wunused-variable in optimized mode
1890 
1891  return *this;
1892  }
1893 
1894 
1895 
1896  SparseMatrix &
1898  {
1899  Assert(a != 0, ExcDivideByZero());
1900 
1901  const TrilinosScalar factor = 1. / a;
1902 
1903  const int ierr = matrix->Scale(factor);
1904  Assert(ierr == 0, ExcTrilinosError(ierr));
1905  (void)ierr; // removes -Wunused-variable in optimized mode
1906 
1907  return *this;
1908  }
1909 
1910 
1911 
1914  {
1915  Assert(matrix->Filled(), ExcMatrixNotCompressed());
1916  return matrix->NormOne();
1917  }
1918 
1919 
1920 
1923  {
1924  Assert(matrix->Filled(), ExcMatrixNotCompressed());
1925  return matrix->NormInf();
1926  }
1927 
1928 
1929 
1932  {
1933  Assert(matrix->Filled(), ExcMatrixNotCompressed());
1934  return matrix->NormFrobenius();
1935  }
1936 
1937 
1938 
1939  namespace internal
1940  {
1941  namespace SparseMatrixImplementation
1942  {
1943  template <typename VectorType>
1944  inline void
1945  check_vector_map_equality(const Epetra_CrsMatrix &,
1946  const VectorType &,
1947  const VectorType &)
1948  {}
1949 
1950  inline void
1951  check_vector_map_equality(const Epetra_CrsMatrix & m,
1953  const TrilinosWrappers::MPI::Vector &out)
1954  {
1955  Assert(in.trilinos_partitioner().SameAs(m.DomainMap()) == true,
1956  ExcMessage("The column partitioning of a matrix does not match "
1957  "the partitioning of a vector you are trying to "
1958  "multiply it with. Are you multiplying the "
1959  "matrix with a vector that has ghost elements?"));
1960  Assert(out.trilinos_partitioner().SameAs(m.RangeMap()) == true,
1961  ExcMessage("The row partitioning of a matrix does not match "
1962  "the partitioning of a vector you are trying to "
1963  "put the result of a matrix-vector product in. "
1964  "Are you trying to put the product of the "
1965  "matrix with a vector into a vector that has "
1966  "ghost elements?"));
1967  (void)m;
1968  (void)in;
1969  (void)out;
1970  }
1971  } // namespace SparseMatrixImplementation
1972  } // namespace internal
1973 
1974 
1975  template <typename VectorType>
1976  std::enable_if_t<
1977  std::is_same<typename VectorType::value_type, TrilinosScalar>::value>
1978  SparseMatrix::vmult(VectorType &dst, const VectorType &src) const
1979  {
1980  Assert(&src != &dst, ExcSourceEqualsDestination());
1981  Assert(matrix->Filled(), ExcMatrixNotCompressed());
1982  (void)src;
1983  (void)dst;
1984 
1986  src,
1987  dst);
1988  const size_type dst_local_size = internal::end(dst) - internal::begin(dst);
1989  AssertDimension(dst_local_size, matrix->RangeMap().NumMyPoints());
1990  const size_type src_local_size = internal::end(src) - internal::begin(src);
1991  AssertDimension(src_local_size, matrix->DomainMap().NumMyPoints());
1992 
1993  Epetra_MultiVector tril_dst(
1994  View, matrix->RangeMap(), internal::begin(dst), dst_local_size, 1);
1995  Epetra_MultiVector tril_src(View,
1996  matrix->DomainMap(),
1997  const_cast<TrilinosScalar *>(
1998  internal::begin(src)),
1999  src_local_size,
2000  1);
2001 
2002  const int ierr = matrix->Multiply(false, tril_src, tril_dst);
2003  Assert(ierr == 0, ExcTrilinosError(ierr));
2004  (void)ierr; // removes -Wunused-variable in optimized mode
2005  }
2006 
2007 
2008 
2009  template <typename VectorType>
2010  std::enable_if_t<
2011  !std::is_same<typename VectorType::value_type, TrilinosScalar>::value>
2012  SparseMatrix::vmult(VectorType & /*dst*/, const VectorType & /*src*/) const
2013  {
2014  AssertThrow(false, ExcNotImplemented());
2015  }
2016 
2017 
2018 
2019  template <typename VectorType>
2020  std::enable_if_t<
2021  std::is_same<typename VectorType::value_type, TrilinosScalar>::value>
2022  SparseMatrix::Tvmult(VectorType &dst, const VectorType &src) const
2023  {
2024  Assert(&src != &dst, ExcSourceEqualsDestination());
2025  Assert(matrix->Filled(), ExcMatrixNotCompressed());
2026 
2028  dst,
2029  src);
2030  const size_type dst_local_size = internal::end(dst) - internal::begin(dst);
2031  AssertDimension(dst_local_size, matrix->DomainMap().NumMyPoints());
2032  const size_type src_local_size = internal::end(src) - internal::begin(src);
2033  AssertDimension(src_local_size, matrix->RangeMap().NumMyPoints());
2034 
2035  Epetra_MultiVector tril_dst(
2036  View, matrix->DomainMap(), internal::begin(dst), dst_local_size, 1);
2037  Epetra_MultiVector tril_src(View,
2038  matrix->RangeMap(),
2039  const_cast<double *>(internal::begin(src)),
2040  src_local_size,
2041  1);
2042 
2043  const int ierr = matrix->Multiply(true, tril_src, tril_dst);
2044  Assert(ierr == 0, ExcTrilinosError(ierr));
2045  (void)ierr; // removes -Wunused-variable in optimized mode
2046  }
2047 
2048 
2049 
2050  template <typename VectorType>
2051  std::enable_if_t<
2052  !std::is_same<typename VectorType::value_type, TrilinosScalar>::value>
2053  SparseMatrix::Tvmult(VectorType & /*dst*/, const VectorType & /*src*/) const
2054  {
2055  AssertThrow(false, ExcNotImplemented());
2056  }
2057 
2058 
2059 
2060  template <typename VectorType>
2061  void
2062  SparseMatrix::vmult_add(VectorType &dst, const VectorType &src) const
2063  {
2064  Assert(&src != &dst, ExcSourceEqualsDestination());
2065 
2066  // Reinit a temporary vector with fast argument set, which does not
2067  // overwrite the content (to save time).
2068  VectorType tmp_vector;
2069  tmp_vector.reinit(dst, true);
2070  vmult(tmp_vector, src);
2071  dst += tmp_vector;
2072  }
2073 
2074 
2075 
2076  template <typename VectorType>
2077  void
2078  SparseMatrix::Tvmult_add(VectorType &dst, const VectorType &src) const
2079  {
2080  Assert(&src != &dst, ExcSourceEqualsDestination());
2081 
2082  // Reinit a temporary vector with fast argument set, which does not
2083  // overwrite the content (to save time).
2084  VectorType tmp_vector;
2085  tmp_vector.reinit(dst, true);
2086  Tvmult(tmp_vector, src);
2087  dst += tmp_vector;
2088  }
2089 
2090 
2091 
2094  {
2095  Assert(matrix->RowMap().SameAs(matrix->DomainMap()), ExcNotQuadratic());
2096 
2097  MPI::Vector temp_vector;
2098  temp_vector.reinit(v, true);
2099 
2100  vmult(temp_vector, v);
2101  return temp_vector * v;
2102  }
2103 
2104 
2105 
2108  const MPI::Vector &v) const
2109  {
2110  Assert(matrix->RowMap().SameAs(matrix->DomainMap()), ExcNotQuadratic());
2111 
2112  MPI::Vector temp_vector;
2113  temp_vector.reinit(v, true);
2114 
2115  vmult(temp_vector, v);
2116  return u * temp_vector;
2117  }
2118 
2119 
2120 
2123  const MPI::Vector &x,
2124  const MPI::Vector &b) const
2125  {
2126  vmult(dst, x);
2127  dst -= b;
2128  dst *= -1.;
2129 
2130  return dst.l2_norm();
2131  }
2132 
2133 
2134 
2135  namespace internals
2136  {
2138 
2139  void
2140  perform_mmult(const SparseMatrix &inputleft,
2141  const SparseMatrix &inputright,
2142  SparseMatrix & result,
2143  const MPI::Vector & V,
2144  const bool transpose_left)
2145  {
2146  const bool use_vector = (V.size() == inputright.m() ? true : false);
2147  if (transpose_left == false)
2148  {
2149  Assert(inputleft.n() == inputright.m(),
2150  ExcDimensionMismatch(inputleft.n(), inputright.m()));
2151  Assert(inputleft.trilinos_matrix().DomainMap().SameAs(
2152  inputright.trilinos_matrix().RangeMap()),
2153  ExcMessage("Parallel partitioning of A and B does not fit."));
2154  }
2155  else
2156  {
2157  Assert(inputleft.m() == inputright.m(),
2158  ExcDimensionMismatch(inputleft.m(), inputright.m()));
2159  Assert(inputleft.trilinos_matrix().RangeMap().SameAs(
2160  inputright.trilinos_matrix().RangeMap()),
2161  ExcMessage("Parallel partitioning of A and B does not fit."));
2162  }
2163 
2164  result.clear();
2165 
2166  // create a suitable operator B: in case
2167  // we do not use a vector, all we need to
2168  // do is to set the pointer. Otherwise,
2169  // we insert the data from B, but
2170  // multiply each row with the respective
2171  // vector element.
2172  Teuchos::RCP<Epetra_CrsMatrix> mod_B;
2173  if (use_vector == false)
2174  {
2175  mod_B = Teuchos::rcp(const_cast<Epetra_CrsMatrix *>(
2176  &inputright.trilinos_matrix()),
2177  false);
2178  }
2179  else
2180  {
2181  mod_B = Teuchos::rcp(
2182  new Epetra_CrsMatrix(Copy, inputright.trilinos_sparsity_pattern()),
2183  true);
2184  mod_B->FillComplete(inputright.trilinos_matrix().DomainMap(),
2185  inputright.trilinos_matrix().RangeMap());
2186  Assert(inputright.local_range() == V.local_range(),
2187  ExcMessage("Parallel distribution of matrix B and vector V "
2188  "does not match."));
2189 
2190  const int local_N = inputright.local_size();
2191  for (int i = 0; i < local_N; ++i)
2192  {
2193  int N_entries = -1;
2194  double *new_data, *B_data;
2195  mod_B->ExtractMyRowView(i, N_entries, new_data);
2196  inputright.trilinos_matrix().ExtractMyRowView(i,
2197  N_entries,
2198  B_data);
2199  double value = V.trilinos_vector()[0][i];
2200  for (TrilinosWrappers::types::int_type j = 0; j < N_entries; ++j)
2201  new_data[j] = value * B_data[j];
2202  }
2203  }
2204 
2205 
2206  SparseMatrix tmp_result(transpose_left ?
2207  inputleft.locally_owned_domain_indices() :
2208  inputleft.locally_owned_range_indices(),
2209  inputright.locally_owned_domain_indices(),
2210  inputleft.get_mpi_communicator());
2211 
2212 # ifdef DEAL_II_TRILINOS_WITH_EPETRAEXT
2213  EpetraExt::MatrixMatrix::Multiply(inputleft.trilinos_matrix(),
2214  transpose_left,
2215  *mod_B,
2216  false,
2217  const_cast<Epetra_CrsMatrix &>(
2218  tmp_result.trilinos_matrix()));
2219 # else
2220  Assert(false,
2221  ExcMessage("This function requires that the Trilinos "
2222  "installation found while running the deal.II "
2223  "CMake scripts contains the optional Trilinos "
2224  "package 'EpetraExt'. However, this optional "
2225  "part of Trilinos was not found."));
2226 # endif
2227  result.reinit(tmp_result.trilinos_matrix());
2228  }
2229  } // namespace internals
2230 
2231 
2232  void
2234  const SparseMatrix &B,
2235  const MPI::Vector & V) const
2236  {
2237  internals::perform_mmult(*this, B, C, V, false);
2238  }
2239 
2240 
2241 
2242  void
2244  const SparseMatrix &B,
2245  const MPI::Vector & V) const
2246  {
2247  internals::perform_mmult(*this, B, C, V, true);
2248  }
2249 
2250 
2251 
2252  void
2254  {
2255  Assert(false, ExcNotImplemented());
2256  }
2257 
2258 
2259 
2260  // As of now, no particularly neat
2261  // output is generated in case of
2262  // multiple processors.
2263  void
2264  SparseMatrix::print(std::ostream &out,
2265  const bool print_detailed_trilinos_information) const
2266  {
2267  if (print_detailed_trilinos_information == true)
2268  out << *matrix;
2269  else
2270  {
2271  double *values;
2272  int * indices;
2273  int num_entries;
2274 
2275  for (int i = 0; i < matrix->NumMyRows(); ++i)
2276  {
2277  const int ierr =
2278  matrix->ExtractMyRowView(i, num_entries, values, indices);
2279  (void)ierr;
2280  Assert(ierr == 0, ExcTrilinosError(ierr));
2281 
2282  for (TrilinosWrappers::types::int_type j = 0; j < num_entries; ++j)
2283  out << "(" << TrilinosWrappers::global_row_index(*matrix, i)
2284  << ","
2286  << ") " << values[j] << std::endl;
2287  }
2288  }
2289 
2290  AssertThrow(out.fail() == false, ExcIO());
2291  }
2292 
2293 
2294 
2297  {
2298  size_type static_memory =
2299  sizeof(*this) + sizeof(*matrix) + sizeof(*matrix->Graph().DataPtr());
2300  return (
2302  matrix->NumMyNonzeros() +
2303  sizeof(int) * local_size() + static_memory);
2304  }
2305 
2306 
2307 
2308  MPI_Comm
2310  {
2311  const Epetra_MpiComm *mpi_comm =
2312  dynamic_cast<const Epetra_MpiComm *>(&matrix->RangeMap().Comm());
2313  Assert(mpi_comm != nullptr, ExcInternalError());
2314  return mpi_comm->Comm();
2315  }
2316 } // namespace TrilinosWrappers
2317 
2318 
2319 namespace TrilinosWrappers
2320 {
2321  namespace internal
2322  {
2323  namespace LinearOperatorImplementation
2324  {
2326  : use_transpose(false)
2327  , communicator(MPI_COMM_SELF)
2328  , domain_map(IndexSet().make_trilinos_map(communicator.Comm()))
2329  , range_map(IndexSet().make_trilinos_map(communicator.Comm()))
2330  {
2331  vmult = [](Range &, const Domain &) {
2332  Assert(false,
2333  ExcMessage("Uninitialized TrilinosPayload::vmult called "
2334  "(Default constructor)"));
2335  };
2336 
2337  Tvmult = [](Domain &, const Range &) {
2338  Assert(false,
2339  ExcMessage("Uninitialized TrilinosPayload::Tvmult called "
2340  "(Default constructor)"));
2341  };
2342 
2343  inv_vmult = [](Domain &, const Range &) {
2344  Assert(false,
2345  ExcMessage("Uninitialized TrilinosPayload::inv_vmult called "
2346  "(Default constructor)"));
2347  };
2348 
2349  inv_Tvmult = [](Range &, const Domain &) {
2350  Assert(false,
2351  ExcMessage("Uninitialized TrilinosPayload::inv_Tvmult called "
2352  "(Default constructor)"));
2353  };
2354  }
2355 
2356 
2357 
2359  const TrilinosWrappers::SparseMatrix &matrix_exemplar,
2361  : TrilinosPayload(const_cast<Epetra_CrsMatrix &>(
2362  matrix.trilinos_matrix()),
2363  /*op_supports_inverse_operations = */ false,
2364  matrix_exemplar.trilinos_matrix().UseTranspose(),
2365  matrix_exemplar.get_mpi_communicator(),
2366  matrix_exemplar.locally_owned_domain_indices(),
2367  matrix_exemplar.locally_owned_range_indices())
2368  {}
2369 
2370 
2371 
2373  const TrilinosPayload & payload_exemplar,
2375 
2376  : TrilinosPayload(const_cast<Epetra_CrsMatrix &>(
2377  matrix.trilinos_matrix()),
2378  /*op_supports_inverse_operations = */ false,
2379  payload_exemplar.UseTranspose(),
2380  payload_exemplar.get_mpi_communicator(),
2381  payload_exemplar.locally_owned_domain_indices(),
2382  payload_exemplar.locally_owned_range_indices())
2383  {}
2384 
2385 
2386 
2388  const TrilinosWrappers::SparseMatrix & matrix_exemplar,
2389  const TrilinosWrappers::PreconditionBase &preconditioner)
2390  : TrilinosPayload(preconditioner.trilinos_operator(),
2391  /*op_supports_inverse_operations = */ true,
2392  matrix_exemplar.trilinos_matrix().UseTranspose(),
2393  matrix_exemplar.get_mpi_communicator(),
2394  matrix_exemplar.locally_owned_domain_indices(),
2395  matrix_exemplar.locally_owned_range_indices())
2396  {}
2397 
2398 
2399 
2401  const TrilinosWrappers::PreconditionBase &preconditioner_exemplar,
2402  const TrilinosWrappers::PreconditionBase &preconditioner)
2403  : TrilinosPayload(
2404  preconditioner.trilinos_operator(),
2405  /*op_supports_inverse_operations = */ true,
2406  preconditioner_exemplar.trilinos_operator().UseTranspose(),
2407  preconditioner_exemplar.get_mpi_communicator(),
2408  preconditioner_exemplar.locally_owned_domain_indices(),
2409  preconditioner_exemplar.locally_owned_range_indices())
2410  {}
2411 
2412 
2413 
2415  const TrilinosPayload & payload_exemplar,
2416  const TrilinosWrappers::PreconditionBase &preconditioner)
2417  : TrilinosPayload(preconditioner.trilinos_operator(),
2418  /*op_supports_inverse_operations = */ true,
2419  payload_exemplar.UseTranspose(),
2420  payload_exemplar.get_mpi_communicator(),
2421  payload_exemplar.locally_owned_domain_indices(),
2422  payload_exemplar.locally_owned_range_indices())
2423  {}
2424 
2425 
2426 
2428  : vmult(payload.vmult)
2429  , Tvmult(payload.Tvmult)
2430  , inv_vmult(payload.inv_vmult)
2431  , inv_Tvmult(payload.inv_Tvmult)
2432  , use_transpose(payload.use_transpose)
2433  , communicator(payload.communicator)
2434  , domain_map(payload.domain_map)
2435  , range_map(payload.range_map)
2436  {}
2437 
2438 
2439 
2440  // Composite copy constructor
2441  // This is required for PackagedOperations
2443  const TrilinosPayload &second_op)
2444  : use_transpose(false)
2445  , // The combination of operators provides the exact
2446  // definition of the operation
2447  communicator(first_op.communicator)
2448  , domain_map(second_op.domain_map)
2449  , range_map(first_op.range_map)
2450  {}
2451 
2452 
2453 
2456  {
2457  TrilinosPayload return_op(*this);
2458 
2459  return_op.vmult = [](Range &tril_dst, const Range &tril_src) {
2460  tril_dst = tril_src;
2461  };
2462 
2463  return_op.Tvmult = [](Range &tril_dst, const Range &tril_src) {
2464  tril_dst = tril_src;
2465  };
2466 
2467  return_op.inv_vmult = [](Range &tril_dst, const Range &tril_src) {
2468  tril_dst = tril_src;
2469  };
2470 
2471  return_op.inv_Tvmult = [](Range &tril_dst, const Range &tril_src) {
2472  tril_dst = tril_src;
2473  };
2474 
2475  return return_op;
2476  }
2477 
2478 
2479 
2482  {
2483  TrilinosPayload return_op(*this);
2484 
2485  return_op.vmult = [](Range &tril_dst, const Domain &) {
2486  const int ierr = tril_dst.PutScalar(0.0);
2487 
2488  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2489  };
2490 
2491  return_op.Tvmult = [](Domain &tril_dst, const Range &) {
2492  const int ierr = tril_dst.PutScalar(0.0);
2493 
2494  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2495  };
2496 
2497  return_op.inv_vmult = [](Domain &tril_dst, const Range &) {
2498  AssertThrow(false,
2499  ExcMessage("Cannot compute inverse of null operator"));
2500 
2501  const int ierr = tril_dst.PutScalar(0.0);
2502 
2503  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2504  };
2505 
2506  return_op.inv_Tvmult = [](Range &tril_dst, const Domain &) {
2507  AssertThrow(false,
2508  ExcMessage("Cannot compute inverse of null operator"));
2509 
2510  const int ierr = tril_dst.PutScalar(0.0);
2511 
2512  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2513  };
2514 
2515  return return_op;
2516  }
2517 
2518 
2519 
2522  {
2523  TrilinosPayload return_op(*this);
2524  return_op.transpose();
2525  return return_op;
2526  }
2527 
2528 
2529 
2530  IndexSet
2532  {
2533  return IndexSet(domain_map);
2534  }
2535 
2536 
2537 
2538  IndexSet
2540  {
2541  return IndexSet(range_map);
2542  }
2543 
2544 
2545 
2546  MPI_Comm
2548  {
2549  return communicator.Comm();
2550  }
2551 
2552 
2553 
2554  void
2556  {
2558  }
2559 
2560 
2561 
2562  bool
2564  {
2565  return use_transpose;
2566  }
2567 
2568 
2569 
2570  int
2572  {
2573  if (use_transpose != UseTranspose)
2574  {
2579  }
2580  return 0;
2581  }
2582 
2583 
2584 
2585  int
2587  {
2588  // The transposedness of the operations is taken care of
2589  // when we hit the transpose flag.
2590  vmult(Y, X);
2591  return 0;
2592  }
2593 
2594 
2595 
2596  int
2598  {
2599  // The transposedness of the operations is taken care of
2600  // when we hit the transpose flag.
2601  inv_vmult(X, Y);
2602  return 0;
2603  }
2604 
2605 
2606 
2607  const char *
2609  {
2610  return "TrilinosPayload";
2611  }
2612 
2613 
2614 
2615  const Epetra_Comm &
2617  {
2618  return communicator;
2619  }
2620 
2621 
2622 
2623  const Epetra_Map &
2625  {
2626  return domain_map;
2627  }
2628 
2629 
2630 
2631  const Epetra_Map &
2633  {
2634  return range_map;
2635  }
2636 
2637 
2638 
2639  bool
2641  {
2642  return false;
2643  }
2644 
2645 
2646 
2647  double
2649  {
2650  AssertThrow(false, ExcNotImplemented());
2651  return 0.0;
2652  }
2653 
2654 
2655 
2657  operator+(const TrilinosPayload &first_op,
2658  const TrilinosPayload &second_op)
2659  {
2660  using Domain = typename TrilinosPayload::Domain;
2661  using Range = typename TrilinosPayload::Range;
2662  using Intermediate = typename TrilinosPayload::VectorType;
2663  using GVMVectorType = TrilinosWrappers::MPI::Vector;
2664 
2665  Assert(first_op.locally_owned_domain_indices() ==
2666  second_op.locally_owned_domain_indices(),
2667  ExcMessage(
2668  "Operators are set to work on incompatible IndexSets."));
2669  Assert(first_op.locally_owned_range_indices() ==
2670  second_op.locally_owned_range_indices(),
2671  ExcMessage(
2672  "Operators are set to work on incompatible IndexSets."));
2673 
2674  TrilinosPayload return_op(first_op, second_op);
2675 
2676  // Capture by copy so the payloads are always valid
2677  return_op.vmult = [first_op, second_op](Range & tril_dst,
2678  const Domain &tril_src) {
2679  // Duplicated from LinearOperator::operator*
2680  // TODO: Template the constructor on GrowingVectorMemory vector type?
2681  GrowingVectorMemory<GVMVectorType> vector_memory;
2682  VectorMemory<GVMVectorType>::Pointer i(vector_memory);
2683 
2684  // Initialize intermediate vector
2685  const Epetra_Map &first_op_init_map = first_op.OperatorDomainMap();
2686  i->reinit(IndexSet(first_op_init_map),
2687  first_op.get_mpi_communicator(),
2688  /*bool omit_zeroing_entries =*/true);
2689 
2690  // Duplicated from TrilinosWrappers::SparseMatrix::vmult
2691  const size_type i_local_size = i->end() - i->begin();
2692  AssertDimension(i_local_size, first_op_init_map.NumMyPoints());
2693  const Epetra_Map &second_op_init_map = second_op.OperatorDomainMap();
2694  AssertDimension(i_local_size, second_op_init_map.NumMyPoints());
2695  (void)second_op_init_map;
2696  Intermediate tril_int(View,
2697  first_op_init_map,
2698  const_cast<TrilinosScalar *>(i->begin()),
2699  i_local_size,
2700  1);
2701 
2702  // These operators may themselves be transposed or not, so we let them
2703  // decide what the intended outcome is
2704  second_op.Apply(tril_src, tril_int);
2705  first_op.Apply(tril_src, tril_dst);
2706  const int ierr = tril_dst.Update(1.0, tril_int, 1.0);
2707  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2708  };
2709 
2710  return_op.Tvmult = [first_op, second_op](Domain & tril_dst,
2711  const Range &tril_src) {
2712  // Duplicated from LinearOperator::operator*
2713  // TODO: Template the constructor on GrowingVectorMemory vector type?
2714  GrowingVectorMemory<GVMVectorType> vector_memory;
2715  VectorMemory<GVMVectorType>::Pointer i(vector_memory);
2716 
2717  // These operators may themselves be transposed or not, so we let them
2718  // decide what the intended outcome is
2719  // We must first transpose the operators to get the right IndexSets
2720  // for the input, intermediate and result vectors
2721  const_cast<TrilinosPayload &>(first_op).transpose();
2722  const_cast<TrilinosPayload &>(second_op).transpose();
2723 
2724  // Initialize intermediate vector
2725  const Epetra_Map &first_op_init_map = first_op.OperatorRangeMap();
2726  i->reinit(IndexSet(first_op_init_map),
2727  first_op.get_mpi_communicator(),
2728  /*bool omit_zeroing_entries =*/true);
2729 
2730  // Duplicated from TrilinosWrappers::SparseMatrix::vmult
2731  const size_type i_local_size = i->end() - i->begin();
2732  AssertDimension(i_local_size, first_op_init_map.NumMyPoints());
2733  const Epetra_Map &second_op_init_map = second_op.OperatorRangeMap();
2734  AssertDimension(i_local_size, second_op_init_map.NumMyPoints());
2735  (void)second_op_init_map;
2736  Intermediate tril_int(View,
2737  first_op_init_map,
2738  const_cast<TrilinosScalar *>(i->begin()),
2739  i_local_size,
2740  1);
2741 
2742  // These operators may themselves be transposed or not, so we let them
2743  // decide what the intended outcome is
2744  second_op.Apply(tril_src, tril_int);
2745  first_op.Apply(tril_src, tril_dst);
2746  const int ierr = tril_dst.Update(1.0, tril_int, 1.0);
2747  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2748 
2749  // Reset transpose flag
2750  const_cast<TrilinosPayload &>(first_op).transpose();
2751  const_cast<TrilinosPayload &>(second_op).transpose();
2752  };
2753 
2754  return_op.inv_vmult = [first_op, second_op](Domain & tril_dst,
2755  const Range &tril_src) {
2756  // Duplicated from LinearOperator::operator*
2757  // TODO: Template the constructor on GrowingVectorMemory vector type?
2758  GrowingVectorMemory<GVMVectorType> vector_memory;
2759  VectorMemory<GVMVectorType>::Pointer i(vector_memory);
2760 
2761  // Initialize intermediate vector
2762  const Epetra_Map &first_op_init_map = first_op.OperatorRangeMap();
2763  i->reinit(IndexSet(first_op_init_map),
2764  first_op.get_mpi_communicator(),
2765  /*bool omit_zeroing_entries =*/true);
2766 
2767  // Duplicated from TrilinosWrappers::SparseMatrix::vmult
2768  const size_type i_local_size = i->end() - i->begin();
2769  AssertDimension(i_local_size, first_op_init_map.NumMyPoints());
2770  const Epetra_Map &second_op_init_map = second_op.OperatorRangeMap();
2771  AssertDimension(i_local_size, second_op_init_map.NumMyPoints());
2772  (void)second_op_init_map;
2773  Intermediate tril_int(View,
2774  first_op_init_map,
2775  const_cast<TrilinosScalar *>(i->begin()),
2776  i_local_size,
2777  1);
2778 
2779  // These operators may themselves be transposed or not, so we let them
2780  // decide what the intended outcome is
2781  second_op.ApplyInverse(tril_src, tril_int);
2782  first_op.ApplyInverse(tril_src, tril_dst);
2783  const int ierr = tril_dst.Update(1.0, tril_int, 1.0);
2784  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2785  };
2786 
2787  return_op.inv_Tvmult = [first_op, second_op](Range & tril_dst,
2788  const Domain &tril_src) {
2789  // Duplicated from LinearOperator::operator*
2790  // TODO: Template the constructor on GrowingVectorMemory vector type?
2791  GrowingVectorMemory<GVMVectorType> vector_memory;
2792  VectorMemory<GVMVectorType>::Pointer i(vector_memory);
2793 
2794  // These operators may themselves be transposed or not, so we let them
2795  // decide what the intended outcome is
2796  // We must first transpose the operators to get the right IndexSets
2797  // for the input, intermediate and result vectors
2798  const_cast<TrilinosPayload &>(first_op).transpose();
2799  const_cast<TrilinosPayload &>(second_op).transpose();
2800 
2801  // Initialize intermediate vector
2802  const Epetra_Map &first_op_init_map = first_op.OperatorDomainMap();
2803  i->reinit(IndexSet(first_op_init_map),
2804  first_op.get_mpi_communicator(),
2805  /*bool omit_zeroing_entries =*/true);
2806 
2807  // Duplicated from TrilinosWrappers::SparseMatrix::vmult
2808  const size_type i_local_size = i->end() - i->begin();
2809  AssertDimension(i_local_size, first_op_init_map.NumMyPoints());
2810  const Epetra_Map &second_op_init_map = second_op.OperatorDomainMap();
2811  AssertDimension(i_local_size, second_op_init_map.NumMyPoints());
2812  (void)second_op_init_map;
2813  Intermediate tril_int(View,
2814  first_op_init_map,
2815  const_cast<TrilinosScalar *>(i->begin()),
2816  i_local_size,
2817  1);
2818 
2819  // These operators may themselves be transposed or not, so we let them
2820  // decide what the intended outcome is
2821  second_op.ApplyInverse(tril_src, tril_int);
2822  first_op.ApplyInverse(tril_src, tril_dst);
2823  const int ierr = tril_dst.Update(1.0, tril_int, 1.0);
2824  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2825 
2826  // Reset transpose flag
2827  const_cast<TrilinosPayload &>(first_op).transpose();
2828  const_cast<TrilinosPayload &>(second_op).transpose();
2829  };
2830 
2831  return return_op;
2832  }
2833 
2834 
2835 
2836  TrilinosPayload
2837  operator*(const TrilinosPayload &first_op,
2838  const TrilinosPayload &second_op)
2839  {
2840  using Domain = typename TrilinosPayload::Domain;
2841  using Range = typename TrilinosPayload::Range;
2842  using Intermediate = typename TrilinosPayload::VectorType;
2843  using GVMVectorType = TrilinosWrappers::MPI::Vector;
2844 
2846  second_op.locally_owned_range_indices(),
2847  ExcMessage(
2848  "Operators are set to work on incompatible IndexSets."));
2849 
2850  TrilinosPayload return_op(first_op, second_op);
2851 
2852  // Capture by copy so the payloads are always valid
2853  return_op.vmult = [first_op, second_op](Range & tril_dst,
2854  const Domain &tril_src) {
2855  // Duplicated from LinearOperator::operator*
2856  // TODO: Template the constructor on GrowingVectorMemory vector type?
2857  GrowingVectorMemory<GVMVectorType> vector_memory;
2858  VectorMemory<GVMVectorType>::Pointer i(vector_memory);
2859 
2860  // Initialize intermediate vector
2861  const Epetra_Map &first_op_init_map = first_op.OperatorDomainMap();
2862  i->reinit(IndexSet(first_op_init_map),
2863  first_op.get_mpi_communicator(),
2864  /*bool omit_zeroing_entries =*/true);
2865 
2866  // Duplicated from TrilinosWrappers::SparseMatrix::vmult
2867  const size_type i_local_size = i->end() - i->begin();
2868  AssertDimension(i_local_size, first_op_init_map.NumMyPoints());
2869  const Epetra_Map &second_op_init_map = second_op.OperatorRangeMap();
2870  AssertDimension(i_local_size, second_op_init_map.NumMyPoints());
2871  (void)second_op_init_map;
2872  Intermediate tril_int(View,
2873  first_op_init_map,
2874  const_cast<TrilinosScalar *>(i->begin()),
2875  i_local_size,
2876  1);
2877 
2878  // These operators may themselves be transposed or not, so we let them
2879  // decide what the intended outcome is
2880  second_op.Apply(tril_src, tril_int);
2881  first_op.Apply(tril_int, tril_dst);
2882  };
2883 
2884  return_op.Tvmult = [first_op, second_op](Domain & tril_dst,
2885  const Range &tril_src) {
2886  // Duplicated from LinearOperator::operator*
2887  // TODO: Template the constructor on GrowingVectorMemory vector type?
2888  GrowingVectorMemory<GVMVectorType> vector_memory;
2889  VectorMemory<GVMVectorType>::Pointer i(vector_memory);
2890 
2891  // These operators may themselves be transposed or not, so we let them
2892  // decide what the intended outcome is
2893  // We must first transpose the operators to get the right IndexSets
2894  // for the input, intermediate and result vectors
2895  const_cast<TrilinosPayload &>(first_op).transpose();
2896  const_cast<TrilinosPayload &>(second_op).transpose();
2897 
2898  // Initialize intermediate vector
2899  const Epetra_Map &first_op_init_map = first_op.OperatorRangeMap();
2900  i->reinit(IndexSet(first_op_init_map),
2901  first_op.get_mpi_communicator(),
2902  /*bool omit_zeroing_entries =*/true);
2903 
2904  // Duplicated from TrilinosWrappers::SparseMatrix::vmult
2905  const size_type i_local_size = i->end() - i->begin();
2906  AssertDimension(i_local_size, first_op_init_map.NumMyPoints());
2907  const Epetra_Map &second_op_init_map = second_op.OperatorDomainMap();
2908  AssertDimension(i_local_size, second_op_init_map.NumMyPoints());
2909  (void)second_op_init_map;
2910  Intermediate tril_int(View,
2911  first_op_init_map,
2912  const_cast<TrilinosScalar *>(i->begin()),
2913  i_local_size,
2914  1);
2915 
2916  // Apply the operators in the reverse order to vmult
2917  first_op.Apply(tril_src, tril_int);
2918  second_op.Apply(tril_int, tril_dst);
2919 
2920  // Reset transpose flag
2921  const_cast<TrilinosPayload &>(first_op).transpose();
2922  const_cast<TrilinosPayload &>(second_op).transpose();
2923  };
2924 
2925  return_op.inv_vmult = [first_op, second_op](Domain & tril_dst,
2926  const Range &tril_src) {
2927  // Duplicated from LinearOperator::operator*
2928  // TODO: Template the constructor on GrowingVectorMemory vector type?
2929  GrowingVectorMemory<GVMVectorType> vector_memory;
2930  VectorMemory<GVMVectorType>::Pointer i(vector_memory);
2931 
2932  // Initialize intermediate vector
2933  const Epetra_Map &first_op_init_map = first_op.OperatorRangeMap();
2934  i->reinit(IndexSet(first_op_init_map),
2935  first_op.get_mpi_communicator(),
2936  /*bool omit_zeroing_entries =*/true);
2937 
2938  // Duplicated from TrilinosWrappers::SparseMatrix::vmult
2939  const size_type i_local_size = i->end() - i->begin();
2940  AssertDimension(i_local_size, first_op_init_map.NumMyPoints());
2941  const Epetra_Map &second_op_init_map = second_op.OperatorDomainMap();
2942  AssertDimension(i_local_size, second_op_init_map.NumMyPoints());
2943  (void)second_op_init_map;
2944  Intermediate tril_int(View,
2945  first_op_init_map,
2946  const_cast<TrilinosScalar *>(i->begin()),
2947  i_local_size,
2948  1);
2949 
2950  // Apply the operators in the reverse order to vmult
2951  // and the same order as Tvmult
2952  first_op.ApplyInverse(tril_src, tril_int);
2953  second_op.ApplyInverse(tril_int, tril_dst);
2954  };
2955 
2956  return_op.inv_Tvmult = [first_op, second_op](Range & tril_dst,
2957  const Domain &tril_src) {
2958  // Duplicated from LinearOperator::operator*
2959  // TODO: Template the constructor on GrowingVectorMemory vector type?
2960  GrowingVectorMemory<GVMVectorType> vector_memory;
2961  VectorMemory<GVMVectorType>::Pointer i(vector_memory);
2962 
2963  // These operators may themselves be transposed or not, so we let them
2964  // decide what the intended outcome is
2965  // We must first transpose the operators to get the right IndexSets
2966  // for the input, intermediate and result vectors
2967  const_cast<TrilinosPayload &>(first_op).transpose();
2968  const_cast<TrilinosPayload &>(second_op).transpose();
2969 
2970  // Initialize intermediate vector
2971  const Epetra_Map &first_op_init_map = first_op.OperatorDomainMap();
2972  i->reinit(IndexSet(first_op_init_map),
2973  first_op.get_mpi_communicator(),
2974  /*bool omit_zeroing_entries =*/true);
2975 
2976  // Duplicated from TrilinosWrappers::SparseMatrix::vmult
2977  const size_type i_local_size = i->end() - i->begin();
2978  AssertDimension(i_local_size, first_op_init_map.NumMyPoints());
2979  const Epetra_Map &second_op_init_map = second_op.OperatorRangeMap();
2980  AssertDimension(i_local_size, second_op_init_map.NumMyPoints());
2981  (void)second_op_init_map;
2982  Intermediate tril_int(View,
2983  first_op_init_map,
2984  const_cast<TrilinosScalar *>(i->begin()),
2985  i_local_size,
2986  1);
2987 
2988  // These operators may themselves be transposed or not, so we let them
2989  // decide what the intended outcome is
2990  // Apply the operators in the reverse order to Tvmult
2991  // and the same order as vmult
2992  second_op.ApplyInverse(tril_src, tril_int);
2993  first_op.ApplyInverse(tril_int, tril_dst);
2994 
2995  // Reset transpose flag
2996  const_cast<TrilinosPayload &>(first_op).transpose();
2997  const_cast<TrilinosPayload &>(second_op).transpose();
2998  };
2999 
3000  return return_op;
3001  }
3002 
3003  } // namespace LinearOperatorImplementation
3004  } /* namespace internal */
3005 } /* namespace TrilinosWrappers */
3006 
3007 
3008 
3009 // explicit instantiations
3010 # include "trilinos_sparse_matrix.inst"
3011 
3012 # ifndef DOXYGEN
3013 // TODO: put these instantiations into generic file
3014 namespace TrilinosWrappers
3015 {
3016  template void
3017  SparseMatrix::reinit(const ::SparsityPattern &);
3018 
3019  template void
3021 
3022  template void
3024  const IndexSet &,
3025  const ::SparsityPattern &,
3026  const MPI_Comm &,
3027  const bool);
3028 
3029  template void
3031  const IndexSet &,
3032  const DynamicSparsityPattern &,
3033  const MPI_Comm &,
3034  const bool);
3035 
3036  template void
3037  SparseMatrix::vmult(MPI::Vector &, const MPI::Vector &) const;
3038 
3039  template void
3041  const ::Vector<double> &) const;
3042 
3043  template void
3046  const ::LinearAlgebra::distributed::Vector<double> &) const;
3047 
3048 # ifdef DEAL_II_TRILINOS_WITH_TPETRA
3049 # if defined(HAVE_TPETRA_INST_DOUBLE)
3050  template void
3053  const ::LinearAlgebra::TpetraWrappers::Vector<double> &) const;
3054 # endif
3055 
3056 # if defined(HAVE_TPETRA_INST_FLOAT)
3057  template void
3060  const ::LinearAlgebra::TpetraWrappers::Vector<float> &) const;
3061 # endif
3062 # endif
3063 
3064  template void
3068 
3069  template void
3070  SparseMatrix::Tvmult(MPI::Vector &, const MPI::Vector &) const;
3071 
3072  template void
3074  const ::Vector<double> &) const;
3075 
3076  template void
3079  const ::LinearAlgebra::distributed::Vector<double> &) const;
3080 
3081 # ifdef DEAL_II_TRILINOS_WITH_TPETRA
3082 # if defined(HAVE_TPETRA_INST_DOUBLE)
3083  template void
3086  const ::LinearAlgebra::TpetraWrappers::Vector<double> &) const;
3087 # endif
3088 
3089 # if defined(HAVE_TPETRA_INST_FLOAT)
3090  template void
3093  const ::LinearAlgebra::TpetraWrappers::Vector<float> &) const;
3094 # endif
3095 # endif
3096 
3097  template void
3101 
3102  template void
3104 
3105  template void
3107  const ::Vector<double> &) const;
3108 
3109  template void
3112  const ::LinearAlgebra::distributed::Vector<double> &) const;
3113 
3114 # ifdef DEAL_II_TRILINOS_WITH_TPETRA
3115 # if defined(HAVE_TPETRA_INST_DOUBLE)
3116  template void
3119  const ::LinearAlgebra::TpetraWrappers::Vector<double> &) const;
3120 # endif
3121 
3122 # if defined(HAVE_TPETRA_INST_FLOAT)
3123  template void
3126  const ::LinearAlgebra::TpetraWrappers::Vector<float> &) const;
3127 # endif
3128 # endif
3129 
3130  template void
3134 
3135  template void
3137 
3138  template void
3140  const ::Vector<double> &) const;
3141 
3142  template void
3145  const ::LinearAlgebra::distributed::Vector<double> &) const;
3146 
3147 # ifdef DEAL_II_TRILINOS_WITH_TPETRA
3148 # if defined(HAVE_TPETRA_INST_DOUBLE)
3149  template void
3152  const ::LinearAlgebra::TpetraWrappers::Vector<double> &) const;
3153 # endif
3154 
3155 # if defined(HAVE_TPETRA_INST_FLOAT)
3156  template void
3159  const ::LinearAlgebra::TpetraWrappers::Vector<float> &) const;
3160 # endif
3161 # endif
3162 
3163  template void
3167 } // namespace TrilinosWrappers
3168 # endif // DOXYGEN
3169 
3171 
3172 #endif // DEAL_II_WITH_TRILINOS
const IndexSet & row_index_set() const
size_type row_length(const size_type row) const
size_type column_number(const size_type row, const size_type index) const
size_type size() const
Definition: index_set.h:1648
bool is_element(const size_type index) const
Definition: index_set.h:1756
Epetra_Map make_trilinos_map(const MPI_Comm &communicator=MPI_COMM_WORLD, const bool overlapping=false) const
Definition: index_set.cc:789
size_type n_rows() const
size_type n_cols() const
void reinit(const size_type m, const size_type n, const ArrayView< const unsigned int > &row_lengths)
const Epetra_BlockMap & trilinos_partitioner() const
void reinit(const Vector &v, const bool omit_zeroing_entries=false, const bool allow_different_maps=false)
std::shared_ptr< std::vector< TrilinosScalar > > value_cache
std::shared_ptr< std::vector< size_type > > colnum_cache
void set(const size_type i, const size_type j, const TrilinosScalar value)
std::unique_ptr< Epetra_Map > column_space_map
SparseMatrix & operator*=(const TrilinosScalar factor)
void mmult(SparseMatrix &C, const SparseMatrix &B, const MPI::Vector &V=MPI::Vector()) const
std::unique_ptr< Epetra_Export > nonlocal_matrix_exporter
std::unique_ptr< Epetra_FECrsMatrix > matrix
const Epetra_CrsMatrix & trilinos_matrix() const
::types::global_dof_index size_type
TrilinosScalar matrix_norm_square(const MPI::Vector &v) const
IndexSet locally_owned_range_indices() const
void print(std::ostream &out, const bool write_extended_trilinos_info=false) const
void Tmmult(SparseMatrix &C, const SparseMatrix &B, const MPI::Vector &V=MPI::Vector()) const
void clear_row(const size_type row, const TrilinosScalar new_diag_value=0)
std::pair< size_type, size_type > local_range() const
const Epetra_CrsGraph & trilinos_sparsity_pattern() const
void reinit(const SparsityPatternType &sparsity_pattern)
std::enable_if_t< std::is_same< typename VectorType::value_type, TrilinosScalar >::value > Tvmult(VectorType &dst, const VectorType &src) const
void vmult_add(VectorType &dst, const VectorType &src) const
void compress(::VectorOperation::values operation)
SparseMatrix & operator/=(const TrilinosScalar factor)
void Tvmult_add(VectorType &dst, const VectorType &src) const
void clear_rows(const std::vector< size_type > &rows, const TrilinosScalar new_diag_value=0)
TrilinosScalar el(const size_type i, const size_type j) const
IndexSet locally_owned_domain_indices() const
TrilinosScalar residual(MPI::Vector &dst, const MPI::Vector &x, const MPI::Vector &b) const
bool in_local_range(const size_type index) const
void copy_from(const SparseMatrix &source)
unsigned int row_length(const size_type row) const
std::uint64_t n_nonzero_elements() const
TrilinosScalar diag_element(const size_type i) const
void add(const size_type i, const size_type j, const TrilinosScalar value)
unsigned int local_size() const
TrilinosScalar operator()(const size_type i, const size_type j) const
TrilinosScalar matrix_scalar_product(const MPI::Vector &u, const MPI::Vector &v) const
std::enable_if_t< std::is_same< typename VectorType::value_type, TrilinosScalar >::value > vmult(VectorType &dst, const VectorType &src) const
SparseMatrix & operator=(const SparseMatrix &)=delete
std::unique_ptr< Epetra_CrsMatrix > nonlocal_matrix
std::unique_ptr< Epetra_CrsGraph > nonlocal_graph
const Epetra_Map & domain_partitioner() const
const Epetra_FECrsGraph & trilinos_sparsity_pattern() const
std::function< void(VectorType &, const VectorType &)> inv_Tvmult
virtual int ApplyInverse(const VectorType &Y, VectorType &X) const override
virtual int Apply(const VectorType &X, VectorType &Y) const override
std::function< void(VectorType &, const VectorType &)> inv_vmult
Definition: vector.h:109
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:474
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:475
DerivativeForm< 1, spacedim, dim, Number > transpose(const DerivativeForm< 1, dim, spacedim, Number > &DF)
Point< 2 > second
Definition: grid_out.cc:4616
Point< 2 > first
Definition: grid_out.cc:4615
static ::ExceptionBase & ExcTrilinosError(int arg1)
static ::ExceptionBase & ExcAccessToNonlocalRow(std::size_t arg1)
static ::ExceptionBase & ExcDivideByZero()
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcInvalidIndex(size_type arg1, size_type arg2)
static ::ExceptionBase & ExcSourceEqualsDestination()
static ::ExceptionBase & ExcScalarAssignmentOnlyForZeroValue()
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
#define Assert(cond, exc)
Definition: exceptions.h:1586
static ::ExceptionBase & ExcNotImplemented()
#define AssertIsFinite(number)
Definition: exceptions.h:1854
static ::ExceptionBase & ExcTrilinosError(int arg1)
static ::ExceptionBase & ExcAccessToNonLocalElement(size_type arg1, size_type arg2, size_type arg3, size_type arg4)
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1759
#define AssertIndexRange(index, range)
Definition: exceptions.h:1827
static ::ExceptionBase & ExcIO()
static ::ExceptionBase & ExcMatrixNotCompressed()
static ::ExceptionBase & ExcAccessToNonPresentElement(size_type arg1, size_type arg2)
static ::ExceptionBase & ExcMessage(std::string arg1)
static ::ExceptionBase & ExcNotQuadratic()
#define AssertThrow(cond, exc)
Definition: exceptions.h:1675
IndexSet complete_index_set(const IndexSet::size_type N)
Definition: index_set.h:1076
Expression fabs(const Expression &x)
typename ::internal::FEInterfaceViews::ViewType< dim, spacedim, Extractor >::type View
@ matrix
Contents is actually a matrix.
static const char V
void swap(MemorySpaceData< T, MemorySpace > &u, MemorySpaceData< T, MemorySpace > &v)
SymmetricTensor< 2, dim, Number > C(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
SymmetricTensor< 2, dim, Number > b(const Tensor< 2, dim, Number > &F)
TrilinosPayload operator+(const TrilinosPayload &first_op, const TrilinosPayload &second_op)
TrilinosPayload operator*(const TrilinosPayload &first_op, const TrilinosPayload &second_op)
void check_vector_map_equality(const Epetra_CrsMatrix &, const VectorType &, const VectorType &)
VectorType::value_type * begin(VectorType &V)
VectorType::value_type * end(VectorType &V)
::types::global_dof_index size_type
void perform_mmult(const SparseMatrix &inputleft, const SparseMatrix &inputright, SparseMatrix &result, const MPI::Vector &V, const bool transpose_left)
TrilinosWrappers::types::int_type min_my_gid(const Epetra_BlockMap &map)
TrilinosWrappers::types::int64_type n_global_elements(const Epetra_BlockMap &map)
TrilinosWrappers::types::int_type global_column_index(const Epetra_CrsMatrix &matrix, const ::types::global_dof_index i)
TrilinosWrappers::types::int_type max_my_gid(const Epetra_BlockMap &map)
TrilinosWrappers::types::int_type global_row_index(const Epetra_CrsMatrix &matrix, const ::types::global_dof_index i)
TrilinosWrappers::types::int_type n_global_cols(const Epetra_CrsGraph &graph)
const Epetra_Comm & comm_self()
Iterator lower_bound(Iterator first, Iterator last, const T &val)
Definition: utilities.h:999
Definition: types.h:33
unsigned int global_dof_index
Definition: types.h:82