Reference documentation for deal.II version 9.0.0
trilinos_sparse_matrix.cc
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2008 - 2018 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE at
12 // the top level of the deal.II distribution.
13 //
14 // ---------------------------------------------------------------------
15 
16 #include <deal.II/lac/trilinos_index_access.h>
17 #include <deal.II/lac/trilinos_sparse_matrix.h>
18 
19 #ifdef DEAL_II_WITH_TRILINOS
20 
21 # include <deal.II/base/utilities.h>
22 # include <deal.II/lac/sparse_matrix.h>
23 # include <deal.II/lac/trilinos_sparsity_pattern.h>
24 # include <deal.II/lac/sparsity_pattern.h>
25 # include <deal.II/lac/dynamic_sparsity_pattern.h>
26 # include <deal.II/lac/sparsity_tools.h>
27 # include <deal.II/lac/la_parallel_vector.h>
28 # include <deal.II/lac/trilinos_index_access.h>
29 # include <deal.II/lac/trilinos_precondition.h>
30 
31 # include <Epetra_Export.h>
32 # include <ml_epetra_utils.h>
33 # include <ml_struct.h>
34 # include <Teuchos_RCP.hpp>
35 
36 #include <boost/container/small_vector.hpp>
37 #include <memory>
38 
39 DEAL_II_NAMESPACE_OPEN
40 
41 namespace TrilinosWrappers
42 {
43  namespace internal
44  {
45  template <typename VectorType>
46  double *begin(VectorType &V)
47  {
48  return V.begin();
49  }
50 
51  template <typename VectorType>
52  const double *begin(const VectorType &V)
53  {
54  return V.begin();
55  }
56 
57  template <typename VectorType>
58  double *end(VectorType &V)
59  {
60  return V.end();
61  }
62 
63  template <typename VectorType>
64  const double *end(const VectorType &V)
65  {
66  return V.end();
67  }
68 
69 #ifdef DEAL_II_WITH_MPI
70  template <>
72  {
73  return V.trilinos_vector()[0];
74  }
75 
76  template <>
77  const double *begin(const LinearAlgebra::EpetraWrappers::Vector &V)
78  {
79  return V.trilinos_vector()[0];
80  }
81 
82  template <>
84  {
85  return V.trilinos_vector()[0]+V.trilinos_vector().MyLength();
86  }
87 
88  template <>
89  const double *end(const LinearAlgebra::EpetraWrappers::Vector &V)
90  {
91  return V.trilinos_vector()[0]+V.trilinos_vector().MyLength();
92  }
93 #endif
94  }
95 
96 
97  namespace SparseMatrixIterators
98  {
99  void
101  {
102  // if we are asked to visit the past-the-end line, then simply
103  // release all our caches and go on with life.
104  //
105  // do the same if the row we're supposed to visit is not locally
106  // owned. this is simply going to make non-locally owned rows
107  // look like they're empty
108  if ((this->a_row == matrix->m())
109  ||
110  (matrix->in_local_range (this->a_row) == false))
111  {
112  colnum_cache.reset ();
113  value_cache.reset ();
114 
115  return;
116  }
117 
118  // get a representation of the present row
119  int ncols;
120  TrilinosWrappers::types::int_type colnums = matrix->n();
121  if (value_cache.get() == nullptr)
122  {
123  value_cache = std::make_shared<std::vector<TrilinosScalar>>(matrix->n());
124  colnum_cache = std::make_shared<std::vector<size_type>>(matrix->n());
125  }
126  else
127  {
128  value_cache->resize (matrix->n());
129  colnum_cache->resize (matrix->n());
130  }
131 
132  int ierr = matrix->trilinos_matrix().
133  ExtractGlobalRowCopy((TrilinosWrappers::types::int_type)this->a_row,
134  colnums,
135  ncols, &((*value_cache)[0]),
136  reinterpret_cast<TrilinosWrappers::types::int_type *>(&((*colnum_cache)[0])));
137  value_cache->resize (ncols);
138  colnum_cache->resize (ncols);
139  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
140 
141  // copy it into our caches if the
142  // line isn't empty. if it is, then
143  // we've done something wrong, since
144  // we shouldn't have initialized an
145  // iterator for an empty line (what
146  // would it point to?)
147  }
148  }
149 
150 
151  // The constructor is actually the
152  // only point where we have to check
153  // whether we build a serial or a
154  // parallel Trilinos matrix.
155  // Actually, it does not even matter
156  // how many threads there are, but
157  // only if we use an MPI compiler or
158  // a standard compiler. So, even one
159  // thread on a configuration with
160  // MPI will still get a parallel
161  // interface.
163  :
164  column_space_map (new Epetra_Map (0, 0,
165  Utilities::Trilinos::comm_self())),
166  matrix (new Epetra_FECrsMatrix(View, *column_space_map,
167  *column_space_map, 0)),
168  last_action (Zero),
169  compressed (true)
170  {
171  matrix->FillComplete();
172  }
173 
174 
175 
176  SparseMatrix::SparseMatrix (const Epetra_Map &input_map,
177  const size_type n_max_entries_per_row)
178  :
179  column_space_map (new Epetra_Map (input_map)),
180  matrix (new Epetra_FECrsMatrix(Copy, *column_space_map,
181  TrilinosWrappers::types::int_type(n_max_entries_per_row), false)),
182  last_action (Zero),
183  compressed (false)
184  {}
185 
186 
187 
188  SparseMatrix::SparseMatrix (const Epetra_Map &input_map,
189  const std::vector<unsigned int> &n_entries_per_row)
190  :
191  column_space_map (new Epetra_Map (input_map)),
192  matrix (new Epetra_FECrsMatrix
193  (Copy, *column_space_map,
194  (int *)const_cast<unsigned int *>(&(n_entries_per_row[0])),
195  false)),
196  last_action (Zero),
197  compressed (false)
198  {}
199 
200 
201 
202  SparseMatrix::SparseMatrix (const Epetra_Map &input_row_map,
203  const Epetra_Map &input_col_map,
204  const size_type n_max_entries_per_row)
205  :
206  column_space_map (new Epetra_Map (input_col_map)),
207  matrix (new Epetra_FECrsMatrix(Copy, input_row_map,
208  TrilinosWrappers::types::int_type(n_max_entries_per_row), false)),
209  last_action (Zero),
210  compressed (false)
211  {}
212 
213 
214 
215  SparseMatrix::SparseMatrix (const Epetra_Map &input_row_map,
216  const Epetra_Map &input_col_map,
217  const std::vector<unsigned int> &n_entries_per_row)
218  :
219  column_space_map (new Epetra_Map (input_col_map)),
220  matrix (new Epetra_FECrsMatrix(Copy, input_row_map,
221  (int *)const_cast<unsigned int *>(&(n_entries_per_row[0])),
222  false)),
223  last_action (Zero),
224  compressed (false)
225  {}
226 
227 
228 
230  const size_type n,
231  const unsigned int n_max_entries_per_row)
232  :
233  column_space_map (new Epetra_Map (static_cast<TrilinosWrappers::types::int_type>(n), 0,
234  Utilities::Trilinos::comm_self())),
235 
236  // on one processor only, we know how the
237  // columns of the matrix will be
238  // distributed (everything on one
239  // processor), so we can hand in this
240  // information to the constructor. we
241  // can't do so in parallel, where the
242  // information from columns is only
243  // available when entries have been added
244  matrix (new Epetra_FECrsMatrix(Copy,
245  Epetra_Map (static_cast<TrilinosWrappers::types::int_type>(m), 0,
246  Utilities::Trilinos::comm_self()),
247  *column_space_map,
248  n_max_entries_per_row,
249  false)),
250  last_action (Zero),
251  compressed (false)
252  {}
253 
254 
255 
257  const size_type n,
258  const std::vector<unsigned int> &n_entries_per_row)
259  :
260  column_space_map (new Epetra_Map (static_cast<TrilinosWrappers::types::int_type>(n), 0,
261  Utilities::Trilinos::comm_self())),
262  matrix (new Epetra_FECrsMatrix(Copy,
263  Epetra_Map (static_cast<TrilinosWrappers::types::int_type>(m), 0,
264  Utilities::Trilinos::comm_self()),
265  *column_space_map,
266  (int *)const_cast<unsigned int *>(&(n_entries_per_row[0])),
267  false)),
268  last_action (Zero),
269  compressed (false)
270  {}
271 
272 
273 
274  SparseMatrix::SparseMatrix (const IndexSet &parallel_partitioning,
275  const MPI_Comm &communicator,
276  const unsigned int n_max_entries_per_row)
277  :
278  column_space_map (new Epetra_Map(parallel_partitioning.
279  make_trilinos_map(communicator, false))),
280  matrix (new Epetra_FECrsMatrix(Copy,
281  *column_space_map,
282  n_max_entries_per_row,
283  false)),
284  last_action (Zero),
285  compressed (false)
286  {}
287 
288 
289 
290  SparseMatrix::SparseMatrix (const IndexSet &parallel_partitioning,
291  const MPI_Comm &communicator,
292  const std::vector<unsigned int> &n_entries_per_row)
293  :
294  column_space_map (new Epetra_Map(parallel_partitioning.
295  make_trilinos_map(communicator, false))),
296  matrix (new Epetra_FECrsMatrix(Copy,
297  *column_space_map,
298  (int *)const_cast<unsigned int *>(&(n_entries_per_row[0])),
299  false)),
300  last_action (Zero),
301  compressed (false)
302  {}
303 
304 
305 
306  SparseMatrix::SparseMatrix (const IndexSet &row_parallel_partitioning,
307  const IndexSet &col_parallel_partitioning,
308  const MPI_Comm &communicator,
309  const size_type n_max_entries_per_row)
310  :
311  column_space_map (new Epetra_Map(col_parallel_partitioning.
312  make_trilinos_map(communicator, false))),
313  matrix (new Epetra_FECrsMatrix(Copy,
314  row_parallel_partitioning.
315  make_trilinos_map(communicator, false),
316  n_max_entries_per_row,
317  false)),
318  last_action (Zero),
319  compressed (false)
320  {}
321 
322 
323 
324  SparseMatrix::SparseMatrix (const IndexSet &row_parallel_partitioning,
325  const IndexSet &col_parallel_partitioning,
326  const MPI_Comm &communicator,
327  const std::vector<unsigned int> &n_entries_per_row)
328  :
329  column_space_map (new Epetra_Map(col_parallel_partitioning.
330  make_trilinos_map(communicator, false))),
331  matrix (new Epetra_FECrsMatrix(Copy,
332  row_parallel_partitioning.
333  make_trilinos_map(communicator, false),
334  (int *)const_cast<unsigned int *>(&(n_entries_per_row[0])),
335  false)),
336  last_action (Zero),
337  compressed (false)
338  {}
339 
340 
341 
342  SparseMatrix::SparseMatrix (const SparsityPattern &sparsity_pattern)
343  :
344  column_space_map (new Epetra_Map (sparsity_pattern.domain_partitioner())),
345  matrix (new Epetra_FECrsMatrix(Copy,
346  sparsity_pattern.trilinos_sparsity_pattern(),
347  false)),
348  last_action (Zero),
349  compressed (true)
350  {
351  Assert(sparsity_pattern.trilinos_sparsity_pattern().Filled() == true,
352  ExcMessage("The Trilinos sparsity pattern has not been compressed."));
354  }
355 
356 
357 
359 :
360  column_space_map(std::move(other.column_space_map)),
361  matrix(std::move(other.matrix)),
362  nonlocal_matrix(std::move(other.nonlocal_matrix)),
363  nonlocal_matrix_exporter(std::move(other.nonlocal_matrix_exporter)),
364  last_action(other.last_action),
365  compressed(other.compressed)
366  {
367  other.last_action = Zero;
368  other.compressed = false;
369  }
370 
371 
372 
373  void
375  {
376  if (this == &rhs)
377  return;
378 
379  nonlocal_matrix.reset();
380  nonlocal_matrix_exporter.reset();
381 
382  // check whether we need to update the whole matrix layout (we have
383  // different maps or if we detect a row where the columns of the two
384  // matrices do not match)
385  bool needs_deep_copy =
386  !matrix->RowMap().SameAs(rhs.matrix->RowMap()) ||
387  !matrix->ColMap().SameAs(rhs.matrix->ColMap()) ||
388  !matrix->DomainMap().SameAs(rhs.matrix->DomainMap()) ||
390  if (!needs_deep_copy)
391  {
392  const std::pair<size_type, size_type>
393  local_range = rhs.local_range();
394 
395  // Try to copy all the rows of the matrix one by one. In case of error
396  // (i.e., the column indices are different), we need to abort and blow
397  // away the matrix.
398  for (size_type row=local_range.first; row < local_range.second; ++row)
399  {
400  const int row_local =
401  matrix->RowMap().LID(static_cast<TrilinosWrappers::types::int_type>(row));
402 
403  int n_entries, rhs_n_entries;
404  TrilinosScalar *value_ptr, *rhs_value_ptr;
405  int *index_ptr, *rhs_index_ptr;
406  int ierr = rhs.matrix->ExtractMyRowView (row_local, rhs_n_entries,
407  rhs_value_ptr, rhs_index_ptr);
408  (void)ierr;
409  Assert (ierr == 0, ExcTrilinosError(ierr));
410 
411  ierr = matrix->ExtractMyRowView (row_local, n_entries, value_ptr,
412  index_ptr);
413  Assert (ierr == 0, ExcTrilinosError(ierr));
414 
415  if (n_entries != rhs_n_entries ||
416  std::memcmp(static_cast<void *>(index_ptr),
417  static_cast<void *>(rhs_index_ptr),
418  sizeof(int)*n_entries) != 0)
419  {
420  needs_deep_copy = true;
421  break;
422  }
423 
424  for (int i=0; i<n_entries; ++i)
425  value_ptr[i] = rhs_value_ptr[i];
426  }
427  }
428 
429  if (needs_deep_copy)
430  {
431  column_space_map = std_cxx14::make_unique<Epetra_Map>(rhs.domain_partitioner());
432 
433  // release memory before reallocation
434  matrix = std_cxx14::make_unique<Epetra_FECrsMatrix>(*rhs.matrix);
435 
436  matrix->FillComplete(*column_space_map, matrix->RowMap());
437  }
438 
439  if (rhs.nonlocal_matrix.get() != nullptr)
440  nonlocal_matrix = std_cxx14::make_unique<Epetra_CrsMatrix>(Copy, rhs.nonlocal_matrix->Graph());
441  }
442 
443 
444 
445  namespace
446  {
447  typedef SparseMatrix::size_type size_type;
448 
449  template <typename SparsityPatternType>
450  void
451  reinit_matrix (const Epetra_Map &input_row_map,
452  const Epetra_Map &input_col_map,
453  const SparsityPatternType &sparsity_pattern,
454  const bool exchange_data,
455  std::unique_ptr<Epetra_Map> &column_space_map,
456  std::unique_ptr<Epetra_FECrsMatrix> &matrix,
457  std::unique_ptr<Epetra_CrsMatrix> &nonlocal_matrix,
458  std::unique_ptr<Epetra_Export> &nonlocal_matrix_exporter)
459  {
460  // release memory before reallocation
461  matrix.reset();
462  nonlocal_matrix.reset();
463  nonlocal_matrix_exporter.reset();
464 
465  if (input_row_map.Comm().MyPID() == 0)
466  {
467  AssertDimension (sparsity_pattern.n_rows(),
468  static_cast<size_type>(TrilinosWrappers::n_global_elements(input_row_map)));
469  AssertDimension (sparsity_pattern.n_cols(),
470  static_cast<size_type>(TrilinosWrappers::n_global_elements(input_col_map)));
471  }
472 
473  column_space_map = std_cxx14::make_unique<Epetra_Map>(input_col_map);
474 
475  // if we want to exchange data, build a usual Trilinos sparsity pattern
476  // and let that handle the exchange. otherwise, manually create a
477  // CrsGraph, which consumes considerably less memory because it can set
478  // correct number of indices right from the start
479  if (exchange_data)
480  {
481  SparsityPattern trilinos_sparsity;
482  trilinos_sparsity.reinit (input_row_map, input_col_map,
483  sparsity_pattern, exchange_data);
484  matrix = std_cxx14::make_unique<Epetra_FECrsMatrix>
485  (Copy, trilinos_sparsity.trilinos_sparsity_pattern(), false);
486 
487  return;
488  }
489 
490  const size_type first_row = TrilinosWrappers::min_my_gid(input_row_map),
491  last_row = TrilinosWrappers::max_my_gid(input_row_map)+1;
492  std::vector<int> n_entries_per_row(last_row-first_row);
493 
494  for (size_type row=first_row; row<last_row; ++row)
495  n_entries_per_row[row-first_row] = sparsity_pattern.row_length(row);
496 
497  // The deal.II notation of a Sparsity pattern corresponds to the Epetra
498  // concept of a Graph. Hence, we generate a graph by copying the
499  // sparsity pattern into it, and then build up the matrix from the
500  // graph. This is considerable faster than directly filling elements
501  // into the matrix. Moreover, it consumes less memory, since the
502  // internal reordering is done on ints only, and we can leave the
503  // doubles aside.
504 
505  // for more than one processor, need to specify only row map first and
506  // let the matrix entries decide about the column map (which says which
507  // columns are present in the matrix, not to be confused with the
508  // col_map that tells how the domain dofs of the matrix will be
509  // distributed). for only one processor, we can directly assign the
510  // columns as well. Compare this with bug # 4123 in the Sandia Bugzilla.
511  std::unique_ptr<Epetra_CrsGraph> graph;
512  if (input_row_map.Comm().NumProc() > 1)
513  graph = std_cxx14::make_unique<Epetra_CrsGraph>(Copy, input_row_map,
514  n_entries_per_row.data(), true);
515  else
516  graph = std_cxx14::make_unique<Epetra_CrsGraph>(Copy, input_row_map, input_col_map,
517  n_entries_per_row.data(), true);
518 
519  // This functions assumes that the sparsity pattern sits on all
520  // processors (completely). The parallel version uses an Epetra graph
521  // that is already distributed.
522 
523  // now insert the indices
524  std::vector<TrilinosWrappers::types::int_type> row_indices;
525 
526  for (size_type row=first_row; row<last_row; ++row)
527  {
528  const int row_length = sparsity_pattern.row_length(row);
529  if (row_length == 0)
530  continue;
531 
532  row_indices.resize (row_length, -1);
533  {
534  typename SparsityPatternType::iterator p = sparsity_pattern.begin(row);
535  for (size_type col=0; p != sparsity_pattern.end(row); ++p, ++col)
536  row_indices[col] = p->column();
537  }
538  graph->Epetra_CrsGraph::InsertGlobalIndices (row, row_length,
539  row_indices.data());
540  }
541 
542  // Eventually, optimize the graph structure (sort indices, make memory
543  // contiguous, etc). note that the documentation of the function indeed
544  // states that we first need to provide the column (domain) map and then
545  // the row (range) map
546  graph->FillComplete(input_col_map, input_row_map);
547  graph->OptimizeStorage();
548 
549  // check whether we got the number of columns right.
550  AssertDimension (sparsity_pattern.n_cols(),
551  static_cast<size_type>(TrilinosWrappers::n_global_cols(*graph)));
552  (void)n_global_cols;
553 
554  // And now finally generate the matrix.
555  matrix = std_cxx14::make_unique<Epetra_FECrsMatrix>(Copy, *graph, false);
556  }
557 
558 
559 
560  // for the non-local graph, we need to circumvent the problem that some
561  // processors will not add into the non-local graph at all: We do not want
562  // to insert dummy elements on >5000 processors because that gets very
563  // slow. Thus, we set a flag in Epetra_CrsGraph that sets the correct
564  // flag. Since it is protected, we need to expose this information by
565  // deriving a class from Epetra_CrsGraph for the purpose of creating the
566  // data structure
567  class Epetra_CrsGraphMod : public Epetra_CrsGraph
568  {
569  public:
570  Epetra_CrsGraphMod (const Epetra_Map &row_map,
571  const int *n_entries_per_row)
572  :
573  Epetra_CrsGraph(Copy, row_map, n_entries_per_row, true)
574  {};
575 
576  void SetIndicesAreGlobal()
577  {
578  this->Epetra_CrsGraph::SetIndicesAreGlobal(true);
579  }
580  };
581 
582 
583  // specialization for DynamicSparsityPattern which can provide us with
584  // more information about the non-locally owned rows
585  template <>
586  void
587  reinit_matrix (const Epetra_Map &input_row_map,
588  const Epetra_Map &input_col_map,
589  const DynamicSparsityPattern &sparsity_pattern,
590  const bool exchange_data,
591  std::unique_ptr<Epetra_Map> &column_space_map,
592  std::unique_ptr<Epetra_FECrsMatrix> &matrix,
593  std::unique_ptr<Epetra_CrsMatrix> &nonlocal_matrix,
594  std::unique_ptr<Epetra_Export> &nonlocal_matrix_exporter)
595  {
596  matrix.reset();
597  nonlocal_matrix.reset();
598  nonlocal_matrix_exporter.reset();
599 
600  AssertDimension (sparsity_pattern.n_rows(),
601  static_cast<size_type>(TrilinosWrappers::n_global_elements(input_row_map)));
602  AssertDimension (sparsity_pattern.n_cols(),
603  static_cast<size_type>(TrilinosWrappers::n_global_elements(input_col_map)));
604 
605  column_space_map = std_cxx14::make_unique<Epetra_Map>(input_col_map);
606 
607  IndexSet relevant_rows (sparsity_pattern.row_index_set());
608  // serial case
609  if (relevant_rows.size() == 0)
610  {
611  relevant_rows.set_size(TrilinosWrappers::n_global_elements(input_row_map));
612  relevant_rows.add_range(0, TrilinosWrappers::n_global_elements(input_row_map));
613  }
614  relevant_rows.compress();
615  Assert(relevant_rows.n_elements() >= static_cast<unsigned int>(input_row_map.NumMyElements()),
616  ExcMessage("Locally relevant rows of sparsity pattern must contain "
617  "all locally owned rows"));
618 
619  // check whether the relevant rows correspond to exactly the same map as
620  // the owned rows. In that case, do not create the nonlocal graph and
621  // fill the columns by demand
622  bool have_ghost_rows = false;
623  {
624  std::vector<::types::global_dof_index> indices;
625  relevant_rows.fill_index_vector(indices);
626  Epetra_Map relevant_map (TrilinosWrappers::types::int_type(-1),
627  TrilinosWrappers::types::int_type(relevant_rows.n_elements()),
628  (indices.empty() ? nullptr :
629  reinterpret_cast<TrilinosWrappers::types::int_type *>(indices.data())),
630  0, input_row_map.Comm());
631  if (relevant_map.SameAs(input_row_map))
632  have_ghost_rows = false;
633  else
634  have_ghost_rows = true;
635  }
636 
637  const unsigned int n_rows = relevant_rows.n_elements();
638  std::vector<TrilinosWrappers::types::int_type> ghost_rows;
639  std::vector<int> n_entries_per_row(input_row_map.NumMyElements());
640  std::vector<int> n_entries_per_ghost_row;
641  for (unsigned int i=0, own=0; i<n_rows; ++i)
642  {
643  const TrilinosWrappers::types::int_type global_row =
644  relevant_rows.nth_index_in_set(i);
645  if (input_row_map.MyGID(global_row))
646  n_entries_per_row[own++] = sparsity_pattern.row_length(global_row);
647  else if (sparsity_pattern.row_length(global_row) > 0)
648  {
649  ghost_rows.push_back(global_row);
650  n_entries_per_ghost_row.push_back(sparsity_pattern.row_length(global_row));
651  }
652  }
653 
654  Epetra_Map off_processor_map(-1, ghost_rows.size(),
655  (ghost_rows.size()>0)?(ghost_rows.data()):nullptr,
656  0, input_row_map.Comm());
657 
658  std::unique_ptr<Epetra_CrsGraph> graph;
659  std::unique_ptr<Epetra_CrsGraphMod> nonlocal_graph;
660  if (input_row_map.Comm().NumProc() > 1)
661  {
662  graph = std_cxx14::make_unique<Epetra_CrsGraph>(Copy, input_row_map,
663  (n_entries_per_row.size()>0)?(n_entries_per_row.data()):nullptr,
664  exchange_data ? false : true);
665  if (have_ghost_rows == true)
666  nonlocal_graph = std_cxx14::make_unique<Epetra_CrsGraphMod>(off_processor_map,
667  n_entries_per_ghost_row.data());
668  }
669  else
670  graph = std_cxx14::make_unique<Epetra_CrsGraph>(Copy, input_row_map, input_col_map,
671  (n_entries_per_row.size()>0)?(n_entries_per_row.data()):nullptr,
672  true);
673 
674  // now insert the indices, select between the right matrix
675  std::vector<TrilinosWrappers::types::int_type> row_indices;
676 
677  for (unsigned int i=0; i<n_rows; ++i)
678  {
679  const TrilinosWrappers::types::int_type global_row =
680  relevant_rows.nth_index_in_set(i);
681  const int row_length = sparsity_pattern.row_length(global_row);
682  if (row_length == 0)
683  continue;
684 
685  row_indices.resize (row_length, -1);
686  for (int col=0; col < row_length; ++col)
687  row_indices[col] = sparsity_pattern.column_number(global_row, col);
688 
689  if (input_row_map.MyGID(global_row))
690  graph->InsertGlobalIndices (global_row, row_length, row_indices.data());
691  else
692  {
693  Assert(nonlocal_graph.get() != nullptr, ExcInternalError());
694  nonlocal_graph->InsertGlobalIndices (global_row, row_length,
695  row_indices.data());
696  }
697  }
698 
699  // finalize nonlocal graph and create nonlocal matrix
700  if (nonlocal_graph.get() != nullptr)
701  {
702  // must make sure the IndicesAreGlobal flag is set on all processors
703  // because some processors might not call InsertGlobalIndices (and
704  // we do not want to insert dummy indices on all processors for
705  // large-scale simulations due to the bad impact on performance)
706  nonlocal_graph->SetIndicesAreGlobal();
707  Assert(nonlocal_graph->IndicesAreGlobal() == true,
708  ExcInternalError());
709  nonlocal_graph->FillComplete(input_col_map, input_row_map);
710  nonlocal_graph->OptimizeStorage();
711 
712  // insert data from nonlocal graph into the final sparsity pattern
713  if (exchange_data)
714  {
715  Epetra_Export exporter(nonlocal_graph->RowMap(), input_row_map);
716  int ierr = graph->Export(*nonlocal_graph, exporter, Add);
717  (void)ierr;
718  Assert (ierr==0, ExcTrilinosError(ierr));
719  }
720 
721  nonlocal_matrix = std_cxx14::make_unique<Epetra_CrsMatrix>(Copy, *nonlocal_graph);
722  }
723 
724  graph->FillComplete(input_col_map, input_row_map);
725  graph->OptimizeStorage();
726 
727  AssertDimension (sparsity_pattern.n_cols(),static_cast<size_type>(
729 
730  matrix = std_cxx14::make_unique<Epetra_FECrsMatrix>(Copy, *graph, false);
731  }
732  }
733 
734 
735 
736  template <typename SparsityPatternType>
737  void
738  SparseMatrix::reinit (const SparsityPatternType &sparsity_pattern)
739  {
740  const Epetra_Map rows (static_cast<TrilinosWrappers::types::int_type>(sparsity_pattern.n_rows()),
741  0,
743  const Epetra_Map columns (static_cast<TrilinosWrappers::types::int_type>(sparsity_pattern.n_cols()),
744  0,
746 
747  reinit_matrix (rows, columns, sparsity_pattern, false,
750  }
751 
752 
753 
754  template <typename SparsityPatternType>
755  void
756  SparseMatrix::reinit (const Epetra_Map &input_map,
757  const SparsityPatternType &sparsity_pattern,
758  const bool exchange_data)
759  {
760  reinit_matrix (input_map, input_map, sparsity_pattern, exchange_data,
763  }
764 
765 
766 
767 
768 
769 
770  template <typename SparsityPatternType>
771  inline
772  void SparseMatrix::reinit (const IndexSet &row_parallel_partitioning,
773  const IndexSet &col_parallel_partitioning,
774  const SparsityPatternType &sparsity_pattern,
775  const MPI_Comm &communicator,
776  const bool exchange_data)
777  {
778  Epetra_Map row_map =
779  row_parallel_partitioning.make_trilinos_map (communicator, false);
780  Epetra_Map col_map =
781  col_parallel_partitioning.make_trilinos_map (communicator, false);
782  reinit_matrix (row_map, col_map, sparsity_pattern, exchange_data,
785 
786  // In the end, the matrix needs to be compressed in order to be really
787  // ready.
788  last_action = Zero;
790  }
791 
792 
793 
794  template <typename SparsityPatternType>
795  inline
796  void SparseMatrix::reinit (const Epetra_Map &row_map,
797  const Epetra_Map &col_map,
798  const SparsityPatternType &sparsity_pattern,
799  const bool exchange_data)
800  {
801  reinit_matrix (row_map, col_map, sparsity_pattern, exchange_data,
804 
805  // In the end, the matrix needs to be compressed in order to be really
806  // ready.
807  last_action = Zero;
809  }
810 
811 
812 
813 
814  void
815  SparseMatrix::reinit (const SparsityPattern &sparsity_pattern)
816  {
817  matrix.reset ();
818  nonlocal_matrix_exporter.reset();
819 
820  // reinit with a (parallel) Trilinos sparsity pattern.
821  column_space_map = std_cxx14::make_unique<Epetra_Map>
822  (sparsity_pattern.domain_partitioner());
823  matrix = std_cxx14::make_unique<Epetra_FECrsMatrix>
824  (Copy, sparsity_pattern.trilinos_sparsity_pattern(), false);
825 
826  if (sparsity_pattern.nonlocal_graph.get() != nullptr)
827  nonlocal_matrix = std_cxx14::make_unique<Epetra_CrsMatrix>(Copy, *sparsity_pattern.nonlocal_graph);
828  else
829  nonlocal_matrix.reset ();
830 
831  last_action = Zero;
833  }
834 
835 
836 
837  void
838  SparseMatrix::reinit (const SparseMatrix &sparse_matrix)
839  {
840  if (this == &sparse_matrix)
841  return;
842 
843  column_space_map = std_cxx14::make_unique<Epetra_Map>(sparse_matrix.domain_partitioner());
844  matrix.reset ();
845  nonlocal_matrix_exporter.reset();
846  matrix = std_cxx14::make_unique<Epetra_FECrsMatrix>
847  (Copy, sparse_matrix.trilinos_sparsity_pattern(), false);
848 
849  if (sparse_matrix.nonlocal_matrix != nullptr)
850  nonlocal_matrix = std_cxx14::make_unique<Epetra_CrsMatrix>
851  (Copy, sparse_matrix.nonlocal_matrix->Graph());
852  else
853  nonlocal_matrix.reset();
854 
855  last_action = Zero;
857  }
858 
859 
860 
861  template <typename number>
862  inline
863  void SparseMatrix::reinit (const IndexSet &row_parallel_partitioning,
864  const IndexSet &col_parallel_partitioning,
865  const ::SparseMatrix<number> &dealii_sparse_matrix,
866  const MPI_Comm &communicator,
867  const double drop_tolerance,
868  const bool copy_values,
869  const ::SparsityPattern *use_this_sparsity)
870  {
871  if (copy_values == false)
872  {
873  // in case we do not copy values, just
874  // call the other function.
875  if (use_this_sparsity == nullptr)
876  reinit (row_parallel_partitioning, col_parallel_partitioning,
877  dealii_sparse_matrix.get_sparsity_pattern(),
878  communicator, false);
879  else
880  reinit (row_parallel_partitioning, col_parallel_partitioning,
881  *use_this_sparsity, communicator, false);
882  return;
883  }
884 
885  const size_type n_rows = dealii_sparse_matrix.m();
886 
887  AssertDimension (row_parallel_partitioning.size(), n_rows);
888  AssertDimension (col_parallel_partitioning.size(), dealii_sparse_matrix.n());
889 
890  const ::SparsityPattern &sparsity_pattern =
891  (use_this_sparsity!=nullptr)? *use_this_sparsity :
892  dealii_sparse_matrix.get_sparsity_pattern();
893 
894  if (matrix.get() == nullptr ||
895  m() != n_rows ||
896  n_nonzero_elements() != sparsity_pattern.n_nonzero_elements())
897  {
898  reinit (row_parallel_partitioning, col_parallel_partitioning,
899  sparsity_pattern, communicator, false);
900  }
901 
902  // fill the values. the same as above: go through all rows of the
903  // matrix, and then all columns. since the sparsity patterns of the
904  // input matrix and the specified sparsity pattern might be different,
905  // need to go through the row for both these sparsity structures
906  // simultaneously in order to really set the correct values.
907  size_type maximum_row_length = matrix->MaxNumEntries();
908  std::vector<size_type> row_indices (maximum_row_length);
909  std::vector<TrilinosScalar> values (maximum_row_length);
910 
911  for (size_type row=0; row<n_rows; ++row)
912  // see if the row is locally stored on this processor
913  if (row_parallel_partitioning.is_element(row) == true)
914  {
915  ::SparsityPattern::iterator select_index =
916  sparsity_pattern.begin(row);
917  typename ::SparseMatrix<number>::const_iterator it =
918  dealii_sparse_matrix.begin(row);
919  size_type col = 0;
920  if (sparsity_pattern.n_rows() == sparsity_pattern.n_cols())
921  {
922  // optimized diagonal
923  AssertDimension(it->column(), row);
924  if (std::fabs(it->value()) > drop_tolerance)
925  {
926  values[col] = it->value();
927  row_indices[col++] = it->column();
928  }
929  ++select_index;
930  ++it;
931  }
932 
933  while (it != dealii_sparse_matrix.end(row) &&
934  select_index != sparsity_pattern.end(row))
935  {
936  while (select_index->column() < it->column() &&
937  select_index != sparsity_pattern.end(row))
938  ++select_index;
939  while (it->column() < select_index->column() &&
940  it != dealii_sparse_matrix.end(row))
941  ++it;
942 
943  if (it == dealii_sparse_matrix.end(row))
944  break;
945  if (std::fabs(it->value()) > drop_tolerance)
946  {
947  values[col] = it->value();
948  row_indices[col++] = it->column();
949  }
950  ++select_index;
951  ++it;
952  }
953  set (row, col, reinterpret_cast<size_type *>(row_indices.data()),
954  values.data(), false);
955  }
957  }
958 
959 
960 
961  template <typename number>
962  void
963  SparseMatrix::reinit (const ::SparseMatrix<number> &dealii_sparse_matrix,
964  const double drop_tolerance,
965  const bool copy_values,
966  const ::SparsityPattern *use_this_sparsity)
967  {
968  reinit (complete_index_set(dealii_sparse_matrix.m()),
969  complete_index_set(dealii_sparse_matrix.n()),
970  dealii_sparse_matrix, MPI_COMM_SELF, drop_tolerance,
971  copy_values, use_this_sparsity);
972  }
973 
974 
975 
976  template <typename number>
977  void
978  SparseMatrix::reinit (const Epetra_Map &input_map,
979  const ::SparseMatrix<number> &dealii_sparse_matrix,
980  const double drop_tolerance,
981  const bool copy_values,
982  const ::SparsityPattern *use_this_sparsity)
983  {
984  reinit (IndexSet(input_map), IndexSet(input_map), dealii_sparse_matrix,
985  MPI_COMM_SELF, drop_tolerance, copy_values, use_this_sparsity);
986  }
987 
988 
989 
990  template <typename number>
991  void
992  SparseMatrix::reinit (const Epetra_Map &input_row_map,
993  const Epetra_Map &input_col_map,
994  const ::SparseMatrix<number> &dealii_sparse_matrix,
995  const double drop_tolerance,
996  const bool copy_values,
997  const ::SparsityPattern *use_this_sparsity)
998  {
999  reinit (IndexSet(input_row_map), IndexSet(input_col_map),
1000  dealii_sparse_matrix, MPI_COMM_SELF,
1001  drop_tolerance, copy_values, use_this_sparsity);
1002  }
1003 
1004 
1005 
1006  void
1007  SparseMatrix::reinit (const Epetra_CrsMatrix &input_matrix,
1008  const bool copy_values)
1009  {
1010  Assert (input_matrix.Filled()==true,
1011  ExcMessage("Input CrsMatrix has not called FillComplete()!"));
1012 
1013  column_space_map = std_cxx14::make_unique<Epetra_Map>(input_matrix.DomainMap());
1014 
1015  const Epetra_CrsGraph *graph = &input_matrix.Graph();
1016 
1017  nonlocal_matrix.reset();
1018  nonlocal_matrix_exporter.reset();
1019  matrix.reset ();
1020  matrix = std_cxx14::make_unique<Epetra_FECrsMatrix>(Copy, *graph, false);
1021 
1022  matrix->FillComplete (*column_space_map, input_matrix.RangeMap(), true);
1023 
1024  if (copy_values == true)
1025  {
1026  // point to the first data entry in the two
1027  // matrices and copy the content
1028  const TrilinosScalar *in_values = input_matrix[0];
1029  TrilinosScalar *values = (*matrix)[0];
1030  const size_type my_nonzeros = input_matrix.NumMyNonzeros();
1031  std::memcpy (values, in_values,
1032  my_nonzeros*sizeof (TrilinosScalar));
1033  }
1034 
1035  last_action = Zero;
1037  }
1038 
1039 
1040 
1041  void
1043  {
1044 
1045  Epetra_CombineMode mode = last_action;
1046  if (last_action == Zero)
1047  {
1048  if ((operation==::VectorOperation::add) ||
1049  (operation==::VectorOperation::unknown))
1050  mode = Add;
1051  else if (operation==::VectorOperation::insert)
1052  mode = Insert;
1053  }
1054  else
1055  {
1056  Assert(
1057  ((last_action == Add) && (operation!=::VectorOperation::insert))
1058  ||
1059  ((last_action == Insert) && (operation!=::VectorOperation::add)),
1060  ExcMessage("Operation and argument to compress() do not match"));
1061  }
1062 
1063  // flush buffers
1064  int ierr;
1065  if (nonlocal_matrix.get() != nullptr && mode == Add)
1066  {
1067  // do only export in case of an add() operation, otherwise the owning
1068  // processor must have set the correct entry
1069  nonlocal_matrix->FillComplete(*column_space_map, matrix->RowMap());
1070  if (nonlocal_matrix_exporter.get() == nullptr)
1071  nonlocal_matrix_exporter = std_cxx14::make_unique<Epetra_Export>
1072  (nonlocal_matrix->RowMap(), matrix->RowMap());
1073  ierr = matrix->Export(*nonlocal_matrix, *nonlocal_matrix_exporter, mode);
1074  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1075  ierr = matrix->FillComplete(*column_space_map, matrix->RowMap());
1076  nonlocal_matrix->PutScalar(0);
1077  }
1078  else
1079  ierr = matrix->GlobalAssemble (*column_space_map, matrix->RowMap(),
1080  true, mode);
1081 
1082  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
1083 
1084  ierr = matrix->OptimizeStorage ();
1085  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
1086 
1087  last_action = Zero;
1088 
1089  compressed = true;
1090  }
1091 
1092 
1093 
1094  void
1096  {
1097  // When we clear the matrix, reset
1098  // the pointer and generate an
1099  // empty matrix.
1100  column_space_map = std_cxx14::make_unique<Epetra_Map>
1102  matrix = std_cxx14::make_unique<Epetra_FECrsMatrix>(View, *column_space_map, 0);
1103  nonlocal_matrix.reset();
1104  nonlocal_matrix_exporter.reset();
1105 
1106  matrix->FillComplete();
1107 
1108  compressed = true;
1109  }
1110 
1111 
1112 
1113  void
1115  const TrilinosScalar new_diag_value)
1116  {
1117  Assert (matrix->Filled()==true, ExcMatrixNotCompressed());
1118 
1119  // Only do this on the rows owned
1120  // locally on this processor.
1121  int local_row =
1122  matrix->LRID(static_cast<TrilinosWrappers::types::int_type>(row));
1123  if (local_row >= 0)
1124  {
1125  TrilinosScalar *values;
1126  int *col_indices;
1127  int num_entries;
1128  const int ierr = matrix->ExtractMyRowView(local_row, num_entries,
1129  values, col_indices);
1130  (void)ierr;
1131 
1132  Assert (ierr == 0,
1133  ExcTrilinosError(ierr));
1134 
1135  int *diag_find = std::find(col_indices,col_indices+num_entries,local_row);
1136  int diag_index = (int)(diag_find - col_indices);
1137 
1138  for (TrilinosWrappers::types::int_type j=0; j<num_entries; ++j)
1139  if (diag_index != j || new_diag_value == 0)
1140  values[j] = 0.;
1141 
1142  if (diag_find && std::fabs(values[diag_index]) == 0.0 &&
1143  new_diag_value != 0.0)
1144  values[diag_index] = new_diag_value;
1145  }
1146  }
1147 
1148 
1149 
1150  void
1151  SparseMatrix::clear_rows (const std::vector<size_type> &rows,
1152  const TrilinosScalar new_diag_value)
1153  {
1154  for (size_type row=0; row<rows.size(); ++row)
1155  clear_row(rows[row], new_diag_value);
1156  }
1157 
1158 
1159 
1160  TrilinosScalar
1162  const size_type j) const
1163  {
1164  // Extract local indices in
1165  // the matrix.
1166  int trilinos_i = matrix->LRID(static_cast<TrilinosWrappers::types::int_type>(i)),
1167  trilinos_j = matrix->LCID(static_cast<TrilinosWrappers::types::int_type>(j));
1168  TrilinosScalar value = 0.;
1169 
1170  // If the data is not on the
1171  // present processor, we throw
1172  // an exception. This is one of
1173  // the two tiny differences to
1174  // the el(i,j) call, which does
1175  // not throw any assertions.
1176  if (trilinos_i == -1)
1177  {
1178  Assert (false, ExcAccessToNonLocalElement(i, j, local_range().first,
1179  local_range().second));
1180  }
1181  else
1182  {
1183  // Check whether the matrix has
1184  // already been transformed to local
1185  // indices.
1186  Assert (matrix->Filled(), ExcMatrixNotCompressed());
1187 
1188  // Prepare pointers for extraction
1189  // of a view of the row.
1190  int nnz_present = matrix->NumMyEntries(trilinos_i);
1191  int nnz_extracted;
1192  int *col_indices;
1193  TrilinosScalar *values;
1194 
1195  // Generate the view and make
1196  // sure that we have not generated
1197  // an error.
1198  // TODO Check that col_indices are int and not long long
1199  int ierr = matrix->ExtractMyRowView(trilinos_i, nnz_extracted,
1200  values, col_indices);
1201  (void)ierr;
1202  Assert (ierr==0, ExcTrilinosError(ierr));
1203 
1204  Assert (nnz_present == nnz_extracted,
1205  ExcDimensionMismatch(nnz_present, nnz_extracted));
1206 
1207  // Search the index where we
1208  // look for the value, and then
1209  // finally get it.
1210 
1211  int *el_find = std::find(col_indices, col_indices + nnz_present, trilinos_j);
1212 
1213  int local_col_index = (int)(el_find - col_indices);
1214 
1215  // This is actually the only
1216  // difference to the el(i,j)
1217  // function, which means that
1218  // we throw an exception in
1219  // this case instead of just
1220  // returning zero for an
1221  // element that is not present
1222  // in the sparsity pattern.
1223  if (local_col_index == nnz_present)
1224  {
1225  Assert (false, ExcInvalidIndex (i,j));
1226  }
1227  else
1228  value = values[local_col_index];
1229  }
1230 
1231  return value;
1232  }
1233 
1234 
1235 
1236  TrilinosScalar
1238  const size_type j) const
1239  {
1240  // Extract local indices in
1241  // the matrix.
1242  int trilinos_i = matrix->LRID(static_cast<TrilinosWrappers::types::int_type>(i)),
1243  trilinos_j = matrix->LCID(static_cast<TrilinosWrappers::types::int_type>(j));
1244  TrilinosScalar value = 0.;
1245 
1246  // If the data is not on the
1247  // present processor, we can't
1248  // continue. Just print out zero
1249  // as discussed in the
1250  // documentation of this
1251  // function. if you want error
1252  // checking, use operator().
1253  if ((trilinos_i == -1 ) || (trilinos_j == -1))
1254  return 0.;
1255  else
1256  {
1257  // Check whether the matrix
1258  // already is transformed to
1259  // local indices.
1260  Assert (matrix->Filled(), ExcMatrixNotCompressed());
1261 
1262  // Prepare pointers for extraction
1263  // of a view of the row.
1264  int nnz_present = matrix->NumMyEntries(trilinos_i);
1265  int nnz_extracted;
1266  int *col_indices;
1267  TrilinosScalar *values;
1268 
1269  // Generate the view and make
1270  // sure that we have not generated
1271  // an error.
1272  int ierr = matrix->ExtractMyRowView(trilinos_i, nnz_extracted,
1273  values, col_indices);
1274  (void)ierr;
1275  Assert (ierr==0, ExcTrilinosError(ierr));
1276 
1277  Assert (nnz_present == nnz_extracted,
1278  ExcDimensionMismatch(nnz_present, nnz_extracted));
1279 
1280  // Search the index where we
1281  // look for the value, and then
1282  // finally get it.
1283  int *el_find = std::find(col_indices, col_indices + nnz_present, trilinos_j);
1284 
1285  int local_col_index = (int)(el_find - col_indices);
1286 
1287 
1288  // This is actually the only
1289  // difference to the () function
1290  // querying (i,j), where we throw an
1291  // exception instead of just
1292  // returning zero for an element
1293  // that is not present in the
1294  // sparsity pattern.
1295  if (local_col_index == nnz_present)
1296  value = 0;
1297  else
1298  value = values[local_col_index];
1299  }
1300 
1301  return value;
1302  }
1303 
1304 
1305 
1306  TrilinosScalar
1308  {
1309  Assert (m() == n(), ExcNotQuadratic());
1310 
1311 #ifdef DEBUG
1312  // use operator() in debug mode because
1313  // it checks if this is a valid element
1314  // (in parallel)
1315  return operator()(i,i);
1316 #else
1317  // Trilinos doesn't seem to have a
1318  // more efficient way to access the
1319  // diagonal than by just using the
1320  // standard el(i,j) function.
1321  return el(i,i);
1322 #endif
1323  }
1324 
1325 
1326 
1327  unsigned int
1329  {
1330  Assert (row < m(), ExcInternalError());
1331 
1332  // get a representation of the
1333  // present row
1334  int ncols = -1;
1335  int local_row = matrix->LRID(static_cast<TrilinosWrappers::types::int_type>(row));
1336 
1337  // on the processor who owns this
1338  // row, we'll have a non-negative
1339  // value.
1340  if (local_row >= 0)
1341  {
1342  int ierr = matrix->NumMyRowEntries (local_row, ncols);
1343  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
1344  }
1345 
1346  return ncols;
1347  }
1348 
1349 
1350 
1351  void
1352  SparseMatrix::set (const std::vector<size_type> &row_indices,
1353  const std::vector<size_type> &col_indices,
1354  const FullMatrix<TrilinosScalar> &values,
1355  const bool elide_zero_values)
1356  {
1357  Assert (row_indices.size() == values.m(),
1358  ExcDimensionMismatch(row_indices.size(), values.m()));
1359  Assert (col_indices.size() == values.n(),
1360  ExcDimensionMismatch(col_indices.size(), values.n()));
1361 
1362  for (size_type i=0; i<row_indices.size(); ++i)
1363  set (row_indices[i], col_indices.size(), col_indices.data(), &values(i,0),
1364  elide_zero_values);
1365  }
1366 
1367 
1368 
1369  void
1371  const std::vector<size_type> &col_indices,
1372  const std::vector<TrilinosScalar> &values,
1373  const bool elide_zero_values)
1374  {
1375  Assert (col_indices.size() == values.size(),
1376  ExcDimensionMismatch(col_indices.size(), values.size()));
1377 
1378  set (row, col_indices.size(), col_indices.data(), values.data(),
1379  elide_zero_values);
1380  }
1381 
1382 
1383 
1384  void
1386  const size_type n_cols,
1387  const size_type *col_indices,
1388  const TrilinosScalar *values,
1389  const bool elide_zero_values)
1390  {
1391  AssertIndexRange(row, this->m());
1392 
1393  int ierr;
1394  if (last_action == Add)
1395  {
1396  ierr = matrix->GlobalAssemble (*column_space_map, matrix->RowMap(),
1397  true);
1398 
1399  Assert (ierr == 0, ExcTrilinosError(ierr));
1400  }
1401 
1402  last_action = Insert;
1403 
1404  TrilinosWrappers::types::int_type *col_index_ptr;
1405  TrilinosScalar *col_value_ptr;
1406  TrilinosWrappers::types::int_type n_columns;
1407 
1408  boost::container::small_vector<TrilinosScalar, 200> local_value_array(n_cols);
1409  boost::container::small_vector<TrilinosWrappers::types::int_type, 200> local_index_array(n_cols);
1410 
1411  // If we don't elide zeros, the pointers are already available... need to
1412  // cast to non-const pointers as that is the format taken by Trilinos (but
1413  // we will not modify const data)
1414  if (elide_zero_values == false)
1415  {
1416  col_index_ptr = (TrilinosWrappers::types::int_type *)col_indices;
1417  col_value_ptr = const_cast<TrilinosScalar *>(values);
1418  n_columns = n_cols;
1419  }
1420  else
1421  {
1422  // Otherwise, extract nonzero values in each row and get the
1423  // respective indices.
1424  col_index_ptr = local_index_array.data();
1425  col_value_ptr = local_value_array.data();
1426 
1427  n_columns = 0;
1428  for (size_type j=0; j<n_cols; ++j)
1429  {
1430  const double value = values[j];
1431  AssertIsFinite(value);
1432  if (value != 0)
1433  {
1434  col_index_ptr[n_columns] = col_indices[j];
1435  col_value_ptr[n_columns] = value;
1436  n_columns++;
1437  }
1438  }
1439 
1440  Assert(n_columns <= (TrilinosWrappers::types::int_type)n_cols, ExcInternalError());
1441  }
1442 
1443 
1444  // If the calling matrix owns the row to which we want to insert values,
1445  // we can directly call the Epetra_CrsMatrix input function, which is much
1446  // faster than the Epetra_FECrsMatrix function. We distinguish between two
1447  // cases: the first one is when the matrix is not filled (i.e., it is
1448  // possible to add new elements to the sparsity pattern), and the second
1449  // one is when the pattern is already fixed. In the former case, we add
1450  // the possibility to insert new values, and in the second we just replace
1451  // data.
1452  if (matrix->RowMap().MyGID(static_cast<TrilinosWrappers::types::int_type>(row)) == true)
1453  {
1454  if (matrix->Filled() == false)
1455  {
1456  ierr = matrix->Epetra_CrsMatrix::InsertGlobalValues(
1457  static_cast<TrilinosWrappers::types::int_type>(row),
1458  static_cast<int>(n_columns),const_cast<double *>(col_value_ptr),
1459  col_index_ptr);
1460 
1461  // When inserting elements, we do not want to create exceptions in
1462  // the case when inserting non-local data (since that's what we
1463  // want to do right now).
1464  if (ierr > 0)
1465  ierr = 0;
1466  }
1467  else
1468  ierr = matrix->Epetra_CrsMatrix::ReplaceGlobalValues(row, n_columns,
1469  col_value_ptr,
1470  col_index_ptr);
1471  }
1472  else
1473  {
1474  // When we're at off-processor data, we have to stick with the
1475  // standard Insert/ReplaceGlobalValues function. Nevertheless, the way
1476  // we call it is the fastest one (any other will lead to repeated
1477  // allocation and deallocation of memory in order to call the function
1478  // we already use, which is very inefficient if writing one element at
1479  // a time).
1480  compressed = false;
1481 
1482  if (matrix->Filled() == false)
1483  {
1484  ierr = matrix->InsertGlobalValues (1,
1485  (TrilinosWrappers::types::int_type *)&row,
1486  n_columns, col_index_ptr,
1487  &col_value_ptr,
1488  Epetra_FECrsMatrix::ROW_MAJOR);
1489  if (ierr > 0)
1490  ierr = 0;
1491  }
1492  else
1493  ierr = matrix->ReplaceGlobalValues (1,
1494  (TrilinosWrappers::types::int_type *)&row,
1495  n_columns, col_index_ptr,
1496  &col_value_ptr,
1497  Epetra_FECrsMatrix::ROW_MAJOR);
1498  // use the FECrsMatrix facilities for set even in the case when we
1499  // have explicitly set the off-processor rows because that only works
1500  // properly when adding elements, not when setting them (since we want
1501  // to only touch elements that have been set explicitly, and there is
1502  // no way on the receiving processor to identify them otherwise)
1503  }
1504 
1505  Assert (ierr <= 0, ExcAccessToNonPresentElement(row, col_index_ptr[0]));
1506  AssertThrow (ierr >= 0, ExcTrilinosError(ierr));
1507  }
1508 
1509 
1510 
1511  void
1512  SparseMatrix::add (const std::vector<size_type> &indices,
1513  const FullMatrix<TrilinosScalar> &values,
1514  const bool elide_zero_values)
1515  {
1516  Assert (indices.size() == values.m(),
1517  ExcDimensionMismatch(indices.size(), values.m()));
1518  Assert (values.m() == values.n(), ExcNotQuadratic());
1519 
1520  for (size_type i=0; i<indices.size(); ++i)
1521  add (indices[i], indices.size(), indices.data(), &values(i,0),
1522  elide_zero_values);
1523  }
1524 
1525 
1526 
1527  void
1528  SparseMatrix::add (const std::vector<size_type> &row_indices,
1529  const std::vector<size_type> &col_indices,
1530  const FullMatrix<TrilinosScalar> &values,
1531  const bool elide_zero_values)
1532  {
1533  Assert (row_indices.size() == values.m(),
1534  ExcDimensionMismatch(row_indices.size(), values.m()));
1535  Assert (col_indices.size() == values.n(),
1536  ExcDimensionMismatch(col_indices.size(), values.n()));
1537 
1538  for (size_type i=0; i<row_indices.size(); ++i)
1539  add (row_indices[i], col_indices.size(), col_indices.data(),
1540  &values(i,0), elide_zero_values);
1541  }
1542 
1543 
1544 
1545  void
1547  const std::vector<size_type> &col_indices,
1548  const std::vector<TrilinosScalar> &values,
1549  const bool elide_zero_values)
1550  {
1551  Assert (col_indices.size() == values.size(),
1552  ExcDimensionMismatch(col_indices.size(), values.size()));
1553 
1554  add (row, col_indices.size(), col_indices.data(), values.data(),
1555  elide_zero_values);
1556  }
1557 
1558 
1559 
1560  void
1562  const size_type n_cols,
1563  const size_type *col_indices,
1564  const TrilinosScalar *values,
1565  const bool elide_zero_values,
1566  const bool /*col_indices_are_sorted*/)
1567  {
1568  AssertIndexRange(row, this->m());
1569  int ierr;
1570  if (last_action == Insert)
1571  {
1572  // TODO: this could lead to a dead lock when only one processor
1573  // calls GlobalAssemble.
1574  ierr = matrix->GlobalAssemble(*column_space_map,
1575  matrix->RowMap(), false);
1576 
1577  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
1578  }
1579 
1580  last_action = Add;
1581 
1582  TrilinosWrappers::types::int_type *col_index_ptr;
1583  TrilinosScalar *col_value_ptr;
1584  TrilinosWrappers::types::int_type n_columns;
1585 
1586  boost::container::small_vector<TrilinosScalar, 100> local_value_array(n_cols);
1587  boost::container::small_vector<TrilinosWrappers::types::int_type, 100> local_index_array(n_cols);
1588 
1589  // If we don't elide zeros, the pointers are already available... need to
1590  // cast to non-const pointers as that is the format taken by Trilinos (but
1591  // we will not modify const data)
1592  if (elide_zero_values == false)
1593  {
1594  col_index_ptr = (TrilinosWrappers::types::int_type *)col_indices;
1595  col_value_ptr = const_cast<TrilinosScalar *>(values);
1596  n_columns = n_cols;
1597 #ifdef DEBUG
1598  for (size_type j=0; j<n_cols; ++j)
1599  AssertIsFinite(values[j]);
1600 #endif
1601  }
1602  else
1603  {
1604  // Otherwise, extract nonzero values in each row and the corresponding
1605  // index.
1606  col_index_ptr = local_index_array.data();
1607  col_value_ptr = local_value_array.data();
1608 
1609  n_columns = 0;
1610  for (size_type j=0; j<n_cols; ++j)
1611  {
1612  const double value = values[j];
1613 
1614  AssertIsFinite(value);
1615  if (value != 0)
1616  {
1617  col_index_ptr[n_columns] = col_indices[j];
1618  col_value_ptr[n_columns] = value;
1619  n_columns++;
1620  }
1621  }
1622 
1623  Assert(n_columns <= (TrilinosWrappers::types::int_type)n_cols, ExcInternalError());
1624 
1625  }
1626  // Exit early if there is nothing to do
1627  if (n_columns == 0)
1628  {
1629  return;
1630  }
1631 
1632  // If the calling processor owns the row to which we want to add values, we
1633  // can directly call the Epetra_CrsMatrix input function, which is much
1634  // faster than the Epetra_FECrsMatrix function.
1635  if (matrix->RowMap().MyGID(static_cast<TrilinosWrappers::types::int_type>(row)) == true)
1636  {
1637  ierr = matrix->Epetra_CrsMatrix::SumIntoGlobalValues(row, n_columns,
1638  col_value_ptr,
1639  col_index_ptr);
1640  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
1641  }
1642  else if (nonlocal_matrix.get() != nullptr)
1643  {
1644  compressed = false;
1645  // this is the case when we have explicitly set the off-processor rows
1646  // and want to create a separate matrix object for them (to retain
1647  // thread-safety)
1648  Assert (nonlocal_matrix->RowMap().LID(static_cast<TrilinosWrappers::types::int_type>(row)) != -1,
1649  ExcMessage("Attempted to write into off-processor matrix row "
1650  "that has not be specified as being writable upon "
1651  "initialization"));
1652  ierr = nonlocal_matrix->SumIntoGlobalValues(row, n_columns,
1653  col_value_ptr,
1654  col_index_ptr);
1655  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
1656  }
1657  else
1658  {
1659  // When we're at off-processor data, we have to stick with the
1660  // standard SumIntoGlobalValues function. Nevertheless, the way we
1661  // call it is the fastest one (any other will lead to repeated
1662  // allocation and deallocation of memory in order to call the function
1663  // we already use, which is very inefficient if writing one element at
1664  // a time).
1665  compressed = false;
1666 
1667  ierr = matrix->SumIntoGlobalValues (1,
1668  (TrilinosWrappers::types::int_type *)&row, n_columns,
1669  col_index_ptr,
1670  &col_value_ptr,
1671  Epetra_FECrsMatrix::ROW_MAJOR);
1672  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
1673  }
1674 
1675 #ifdef DEBUG
1676  if (ierr > 0)
1677  {
1678  std::cout << "------------------------------------------"
1679  << std::endl;
1680  std::cout << "Got error " << ierr << " in row " << row
1681  << " of proc " << matrix->RowMap().Comm().MyPID()
1682  << " when trying to add the columns:" << std::endl;
1683  for (TrilinosWrappers::types::int_type i=0; i<n_columns; ++i)
1684  std::cout << col_index_ptr[i] << " ";
1685  std::cout << std::endl << std::endl;
1686  std::cout << "Matrix row "
1687  << (matrix->RowMap().MyGID(static_cast<TrilinosWrappers::types::int_type>(row)) == false ? "(nonlocal part)" : "")
1688  << " has the following indices:" << std::endl;
1689  std::vector<TrilinosWrappers::types::int_type> indices;
1690  const Epetra_CrsGraph *graph =
1691  (nonlocal_matrix.get() != nullptr &&
1692  matrix->RowMap().MyGID(static_cast<TrilinosWrappers::types::int_type>(row)) == false) ?
1693  &nonlocal_matrix->Graph() : &matrix->Graph();
1694 
1695  indices.resize(graph->NumGlobalIndices(static_cast<TrilinosWrappers::types::int_type>(row)));
1696  int n_indices = 0;
1697  graph->ExtractGlobalRowCopy(static_cast<TrilinosWrappers::types::int_type>(row),
1698  indices.size(), n_indices, indices.data());
1699  AssertDimension(static_cast<unsigned int>(n_indices), indices.size());
1700 
1701  for (TrilinosWrappers::types::int_type i=0; i<n_indices; ++i)
1702  std::cout << indices[i] << " ";
1703  std::cout << std::endl << std::endl;
1704  Assert (ierr <= 0,
1705  ExcAccessToNonPresentElement(row, col_index_ptr[0]));
1706  }
1707 #endif
1708  Assert (ierr >= 0, ExcTrilinosError(ierr));
1709  }
1710 
1711 
1712 
1713  SparseMatrix &
1715  {
1717  compress (::VectorOperation::unknown); // TODO: why do we do this? Should we not check for is_compressed?
1718 
1719  const int ierr = matrix->PutScalar(d);
1720  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
1721  if (nonlocal_matrix.get() != nullptr)
1722  nonlocal_matrix->PutScalar(d);
1723 
1724  return *this;
1725  }
1726 
1727 
1728 
1729  void
1730  SparseMatrix::add (const TrilinosScalar factor,
1731  const SparseMatrix &rhs)
1732  {
1733  AssertDimension (rhs.m(), m());
1734  AssertDimension (rhs.n(), n());
1735  AssertDimension (rhs.local_range().first, local_range().first);
1736  AssertDimension (rhs.local_range().second, local_range().second);
1737  Assert(matrix->RowMap().SameAs(rhs.matrix->RowMap()),
1738  ExcMessage("Can only add matrices with same distribution of rows"));
1739  Assert(matrix->Filled() && rhs.matrix->Filled(),
1740  ExcMessage("Addition of matrices only allowed if matrices are "
1741  "filled, i.e., compress() has been called"));
1742 
1743  const std::pair<size_type, size_type>
1744  local_range = rhs.local_range();
1745  const bool same_col_map = matrix->ColMap().SameAs(rhs.matrix->ColMap());
1746 
1747  for (size_type row=local_range.first; row < local_range.second; ++row)
1748  {
1749  const int row_local =
1750  matrix->RowMap().LID(static_cast<TrilinosWrappers::types::int_type>(row));
1751 
1752  // First get a view to the matrix columns of both matrices. Note that
1753  // the data is in local index spaces so we need to be careful not only
1754  // to compare column indices in case they are derived from the same
1755  // map.
1756  int n_entries, rhs_n_entries;
1757  TrilinosScalar *value_ptr, *rhs_value_ptr;
1758  int *index_ptr, *rhs_index_ptr;
1759  int ierr = rhs.matrix->ExtractMyRowView (row_local, rhs_n_entries,
1760  rhs_value_ptr, rhs_index_ptr);
1761  (void)ierr;
1762  Assert (ierr == 0, ExcTrilinosError(ierr));
1763 
1764  ierr = matrix->ExtractMyRowView (row_local, n_entries, value_ptr,
1765  index_ptr);
1766  Assert (ierr == 0, ExcTrilinosError(ierr));
1767  bool expensive_checks = (n_entries != rhs_n_entries || !same_col_map);
1768  if (!expensive_checks)
1769  {
1770  // check if the column indices are the same. If yes, can simply
1771  // copy over the data.
1772  expensive_checks = std::memcmp(static_cast<void *>(index_ptr),
1773  static_cast<void *>(rhs_index_ptr),
1774  sizeof(int)*n_entries) != 0;
1775  if (!expensive_checks)
1776  for (int i=0; i<n_entries; ++i)
1777  value_ptr[i] += rhs_value_ptr[i] * factor;
1778  }
1779  // Now to the expensive case where we need to check all column indices
1780  // against each other (transformed into global index space) and where
1781  // we need to make sure that all entries we are about to add into the
1782  // lhs matrix actually exist
1783  if (expensive_checks)
1784  {
1785  for (int i=0; i<rhs_n_entries; ++i)
1786  {
1787  if (rhs_value_ptr[i] == 0.)
1788  continue;
1789  const TrilinosWrappers::types::int_type rhs_global_col =
1790  global_column_index(*rhs.matrix, rhs_index_ptr[i]);
1791  int local_col = matrix->ColMap().LID(rhs_global_col);
1792  int *local_index = Utilities::lower_bound(index_ptr,
1793  index_ptr+n_entries,
1794  local_col);
1795  Assert(local_index != index_ptr + n_entries &&
1796  *local_index == local_col,
1797  ExcMessage("Adding the entries from the other matrix "
1798  "failed, because the sparsity pattern "
1799  "of that matrix includes more elements than the "
1800  "calling matrix, which is not allowed."));
1801  value_ptr[local_index-index_ptr] += factor * rhs_value_ptr[i];
1802  }
1803  }
1804  }
1805  }
1806 
1807 
1808 
1809  void
1811  {
1812  // This only flips a flag that tells
1813  // Trilinos that any vmult operation
1814  // should be done with the
1815  // transpose. However, the matrix
1816  // structure is not reset.
1817  int ierr;
1818 
1819  if (!matrix->UseTranspose())
1820  {
1821  ierr = matrix->SetUseTranspose (true);
1822  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
1823  }
1824  else
1825  {
1826  ierr = matrix->SetUseTranspose (false);
1827  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
1828  }
1829  }
1830 
1831 
1832 
1833  SparseMatrix &
1834  SparseMatrix::operator *= (const TrilinosScalar a)
1835  {
1836  const int ierr = matrix->Scale (a);
1837  Assert (ierr == 0, ExcTrilinosError(ierr));
1838  (void)ierr; // removes -Wunused-variable in optimized mode
1839 
1840  return *this;
1841  }
1842 
1843 
1844 
1845  SparseMatrix &
1846  SparseMatrix::operator /= (const TrilinosScalar a)
1847  {
1848  Assert (a !=0, ExcDivideByZero());
1849 
1850  const TrilinosScalar factor = 1./a;
1851 
1852  const int ierr = matrix->Scale (factor);
1853  Assert (ierr == 0, ExcTrilinosError(ierr));
1854  (void)ierr; // removes -Wunused-variable in optimized mode
1855 
1856  return *this;
1857  }
1858 
1859 
1860 
1861  TrilinosScalar
1863  {
1864  Assert (matrix->Filled(), ExcMatrixNotCompressed());
1865  return matrix->NormOne();
1866  }
1867 
1868 
1869 
1870  TrilinosScalar
1872  {
1873  Assert (matrix->Filled(), ExcMatrixNotCompressed());
1874  return matrix->NormInf();
1875  }
1876 
1877 
1878 
1879  TrilinosScalar
1881  {
1882  Assert (matrix->Filled(), ExcMatrixNotCompressed());
1883  return matrix->NormFrobenius();
1884  }
1885 
1886 
1887 
1888  namespace internal
1889  {
1890  namespace SparseMatrixImplementation
1891  {
1892  template <typename VectorType>
1893  inline
1894  void check_vector_map_equality(const Epetra_CrsMatrix &,
1895  const VectorType &,
1896  const VectorType &)
1897  {
1898  }
1899 
1900  inline
1901  void check_vector_map_equality(const Epetra_CrsMatrix &m,
1903  const TrilinosWrappers::MPI::Vector &out)
1904  {
1905  Assert (in.vector_partitioner().SameAs(m.DomainMap()) == true,
1906  ExcMessage ("Column map of matrix does not fit with vector map!"));
1907  Assert (out.vector_partitioner().SameAs(m.RangeMap()) == true,
1908  ExcMessage ("Row map of matrix does not fit with vector map!"));
1909  (void)m;
1910  (void)in;
1911  (void)out;
1912  }
1913  }
1914  }
1915 
1916 
1917  template <typename VectorType>
1918  void
1919  SparseMatrix::vmult (VectorType &dst,
1920  const VectorType &src) const
1921  {
1922  Assert (&src != &dst, ExcSourceEqualsDestination());
1923  Assert (matrix->Filled(), ExcMatrixNotCompressed());
1924  (void)src;
1925  (void)dst;
1926 
1927  internal::SparseMatrixImplementation::check_vector_map_equality(*matrix, src, dst);
1928  const size_type dst_local_size = internal::end(dst) - internal::begin(dst);
1929  AssertDimension (dst_local_size, static_cast<size_type>(matrix->RangeMap().NumMyPoints()));
1930  const size_type src_local_size = internal::end(src) - internal::begin(src);
1931  AssertDimension (src_local_size, static_cast<size_type>(matrix->DomainMap().NumMyPoints()));
1932 
1933  Epetra_MultiVector tril_dst (View, matrix->RangeMap(), internal::begin(dst),
1934  dst_local_size, 1);
1935  Epetra_MultiVector tril_src (View, matrix->DomainMap(),
1936  const_cast<TrilinosScalar *>(internal::begin(src)),
1937  src_local_size, 1);
1938 
1939  const int ierr = matrix->Multiply (false, tril_src, tril_dst);
1940  Assert (ierr == 0, ExcTrilinosError(ierr));
1941  (void)ierr; // removes -Wunused-variable in optimized mode
1942  }
1943 
1944 
1945 
1946  template <typename VectorType>
1947  void
1948  SparseMatrix::Tvmult (VectorType &dst,
1949  const VectorType &src) const
1950  {
1951  Assert (&src != &dst, ExcSourceEqualsDestination());
1952  Assert (matrix->Filled(), ExcMatrixNotCompressed());
1953 
1954  internal::SparseMatrixImplementation::check_vector_map_equality(*matrix, dst, src);
1955  const size_type dst_local_size = internal::end(dst) - internal::begin(dst);
1956  AssertDimension (dst_local_size, static_cast<size_type>(matrix->DomainMap().NumMyPoints()));
1957  const size_type src_local_size = internal::end(src) - internal::begin(src);
1958  AssertDimension (src_local_size, static_cast<size_type>(matrix->RangeMap().NumMyPoints()));
1959 
1960  Epetra_MultiVector tril_dst (View, matrix->DomainMap(), internal::begin(dst),
1961  dst_local_size, 1);
1962  Epetra_MultiVector tril_src (View, matrix->RangeMap(),
1963  const_cast<double *>(internal::begin(src)),
1964  src_local_size, 1);
1965 
1966  const int ierr = matrix->Multiply (true, tril_src, tril_dst);
1967  Assert (ierr == 0, ExcTrilinosError(ierr));
1968  (void)ierr; // removes -Wunused-variable in optimized mode
1969  }
1970 
1971 
1972 
1973  template <typename VectorType>
1974  void
1975  SparseMatrix::vmult_add (VectorType &dst,
1976  const VectorType &src) const
1977  {
1978  Assert (&src != &dst, ExcSourceEqualsDestination());
1979 
1980  // Reinit a temporary vector with fast argument set, which does not
1981  // overwrite the content (to save time).
1982  VectorType tmp_vector;
1983  tmp_vector.reinit(dst, true);
1984  vmult (tmp_vector, src);
1985  dst += tmp_vector;
1986  }
1987 
1988 
1989 
1990  template <typename VectorType>
1991  void
1992  SparseMatrix::Tvmult_add (VectorType &dst,
1993  const VectorType &src) const
1994  {
1995  Assert (&src != &dst, ExcSourceEqualsDestination());
1996 
1997  // Reinit a temporary vector with fast argument set, which does not
1998  // overwrite the content (to save time).
1999  VectorType tmp_vector;
2000  tmp_vector.reinit(dst, true);
2001  Tvmult (tmp_vector, src);
2002  dst += tmp_vector;
2003  }
2004 
2005 
2006 
2007  TrilinosScalar
2009  {
2010  Assert (matrix->RowMap().SameAs(matrix->DomainMap()),
2011  ExcNotQuadratic());
2012 
2013  MPI::Vector temp_vector;
2014  temp_vector.reinit(v, true);
2015 
2016  vmult (temp_vector, v);
2017  return temp_vector*v;
2018  }
2019 
2020 
2021 
2022  TrilinosScalar
2024  const MPI::Vector &v) const
2025  {
2026  Assert (matrix->RowMap().SameAs(matrix->DomainMap()),
2027  ExcNotQuadratic());
2028 
2029  MPI::Vector temp_vector;
2030  temp_vector.reinit(v, true);
2031 
2032  vmult (temp_vector, v);
2033  return u*temp_vector;
2034  }
2035 
2036 
2037 
2038  TrilinosScalar
2040  const MPI::Vector &x,
2041  const MPI::Vector &b) const
2042  {
2043  vmult (dst, x);
2044  dst -= b;
2045  dst *= -1.;
2046 
2047  return dst.l2_norm();
2048  }
2049 
2050 
2051 
2052  namespace internals
2053  {
2054  typedef ::types::global_dof_index size_type;
2055 
2056  void perform_mmult (const SparseMatrix &inputleft,
2057  const SparseMatrix &inputright,
2058  SparseMatrix &result,
2059  const MPI::Vector &V,
2060  const bool transpose_left)
2061  {
2062 #ifdef DEAL_II_WITH_64BIT_INDICES
2063  Assert(false,ExcNotImplemented())
2064 #endif
2065  const bool use_vector = (V.size() == inputright.m() ? true : false);
2066  if (transpose_left == false)
2067  {
2068  Assert (inputleft.n() == inputright.m(),
2069  ExcDimensionMismatch(inputleft.n(), inputright.m()));
2070  Assert (inputleft.domain_partitioner().SameAs(inputright.range_partitioner()),
2071  ExcMessage ("Parallel partitioning of A and B does not fit."));
2072  }
2073  else
2074  {
2075  Assert (inputleft.m() == inputright.m(),
2076  ExcDimensionMismatch(inputleft.m(), inputright.m()));
2077  Assert (inputleft.range_partitioner().SameAs(inputright.range_partitioner()),
2078  ExcMessage ("Parallel partitioning of A and B does not fit."));
2079  }
2080 
2081  result.clear();
2082 
2083  // create a suitable operator B: in case
2084  // we do not use a vector, all we need to
2085  // do is to set the pointer. Otherwise,
2086  // we insert the data from B, but
2087  // multiply each row with the respective
2088  // vector element.
2089  Teuchos::RCP<Epetra_CrsMatrix> mod_B;
2090  if (use_vector == false)
2091  {
2092  mod_B = Teuchos::rcp(const_cast<Epetra_CrsMatrix *>
2093  (&inputright.trilinos_matrix()),
2094  false);
2095  }
2096  else
2097  {
2098  mod_B = Teuchos::rcp(new Epetra_CrsMatrix
2099  (Copy, inputright.trilinos_sparsity_pattern()),
2100  true);
2101  mod_B->FillComplete(inputright.domain_partitioner(),
2102  inputright.range_partitioner());
2103  Assert (inputright.local_range() == V.local_range(),
2104  ExcMessage ("Parallel distribution of matrix B and vector V "
2105  "does not match."));
2106 
2107  const int local_N = inputright.local_size();
2108  for (int i=0; i<local_N; ++i)
2109  {
2110  int N_entries = -1;
2111  double *new_data, *B_data;
2112  mod_B->ExtractMyRowView (i, N_entries, new_data);
2113  inputright.trilinos_matrix().ExtractMyRowView (i, N_entries, B_data);
2114  double value = V.trilinos_vector()[0][i];
2115  for (TrilinosWrappers::types::int_type j=0; j<N_entries; ++j)
2116  new_data[j] = value * B_data[j];
2117  }
2118  }
2119 
2120  // use ML built-in method for performing
2121  // the matrix-matrix product.
2122  // create ML operators on top of the
2123  // Epetra matrices. if we use a
2124  // transposed matrix, let ML know it
2125  ML_Comm *comm;
2126  ML_Comm_Create(&comm);
2127 #ifdef ML_MPI
2128  const Epetra_MpiComm *epcomm = dynamic_cast<const Epetra_MpiComm *>(&(inputleft.trilinos_matrix().Comm()));
2129  // Get the MPI communicator, as it may not be MPI_COMM_W0RLD, and update the ML comm object
2130  if (epcomm) ML_Comm_Set_UsrComm(comm,epcomm->Comm());
2131 #endif
2132  ML_Operator *A_ = ML_Operator_Create(comm);
2133  ML_Operator *B_ = ML_Operator_Create(comm);
2134  ML_Operator *C_ = ML_Operator_Create(comm);
2135  SparseMatrix transposed_mat;
2136 
2137  if (transpose_left == false)
2138  ML_Operator_WrapEpetraCrsMatrix
2139  (const_cast<Epetra_CrsMatrix *>(&inputleft.trilinos_matrix()),A_,
2140  false);
2141  else
2142  {
2143  // create transposed matrix
2144  SparsityPattern sparsity_transposed (inputleft.domain_partitioner(),
2145  inputleft.range_partitioner());
2146  Assert (inputleft.domain_partitioner().LinearMap() == true,
2147  ExcMessage("Matrix must be partitioned contiguously between procs."));
2148  for (::types::global_dof_index i=0; i<inputleft.local_size(); ++i)
2149  {
2150  int num_entries, * indices;
2151  inputleft.trilinos_sparsity_pattern().ExtractMyRowView(i, num_entries,
2152  indices);
2153  Assert (num_entries >= 0, ExcInternalError());
2154 
2155  const auto &trilinos_matrix = inputleft.trilinos_matrix();
2156  const size_type GID = TrilinosWrappers::global_index(trilinos_matrix.RowMap(), i);
2157  for (TrilinosWrappers::types::int_type j=0; j<num_entries; ++j)
2158  sparsity_transposed.add
2159  (TrilinosWrappers::global_index(trilinos_matrix.ColMap(),
2160  indices[j]),
2161  GID);
2162  }
2163 
2164  sparsity_transposed.compress();
2165  transposed_mat.reinit (sparsity_transposed);
2166  for (::types::global_dof_index i=0; i<inputleft.local_size(); ++i)
2167  {
2168  int num_entries, * indices;
2169  double *values;
2170  inputleft.trilinos_matrix().ExtractMyRowView(i, num_entries,
2171  values, indices);
2172  Assert (num_entries >= 0, ExcInternalError());
2173 
2174  const auto &trilinos_matrix = inputleft.trilinos_matrix();
2175  const size_type GID = TrilinosWrappers::global_index(trilinos_matrix.RowMap(), i);
2176  for (TrilinosWrappers::types::int_type j=0; j<num_entries; ++j)
2177  transposed_mat.set (TrilinosWrappers::global_index(trilinos_matrix.ColMap(),
2178  indices[j]),
2179  GID, values[j]);
2180  }
2181  transposed_mat.compress(VectorOperation::insert);
2182  ML_Operator_WrapEpetraCrsMatrix
2183  (const_cast<Epetra_CrsMatrix *>(&transposed_mat.trilinos_matrix()),
2184  A_,false);
2185  }
2186  ML_Operator_WrapEpetraCrsMatrix(mod_B.get(),B_,false);
2187 
2188  // We implement the multiplication by
2189  // hand in a similar way as is done in
2190  // ml/src/Operator/ml_rap.c for a triple
2191  // matrix product. This means that the
2192  // code is very similar to the one found
2193  // in ml/src/Operator/ml_rap.c
2194 
2195  // import data if necessary
2196  ML_Operator *Btmp, *Ctmp, *Ctmp2, *tptr;
2197  ML_CommInfoOP *getrow_comm;
2198  int max_per_proc;
2199  TrilinosWrappers::types::int_type N_input_vector = B_->invec_leng;
2200  getrow_comm = B_->getrow->pre_comm;
2201  if ( getrow_comm != nullptr)
2202  for (TrilinosWrappers::types::int_type i = 0; i < getrow_comm->N_neighbors; i++)
2203  for (TrilinosWrappers::types::int_type j = 0; j < getrow_comm->neighbors[i].N_send; j++)
2204  AssertThrow (getrow_comm->neighbors[i].send_list[j] < N_input_vector,
2205  ExcInternalError());
2206 
2207  ML_create_unique_col_id(N_input_vector, &(B_->getrow->loc_glob_map),
2208  getrow_comm, &max_per_proc, B_->comm);
2209  B_->getrow->use_loc_glob_map = ML_YES;
2210  if (A_->getrow->pre_comm != nullptr)
2211  ML_exchange_rows( B_, &Btmp, A_->getrow->pre_comm);
2212  else Btmp = B_;
2213 
2214  // perform matrix-matrix product
2215  ML_matmat_mult(A_, Btmp, &Ctmp);
2216 
2217  // release temporary structures we needed
2218  // for multiplication
2219  ML_free(B_->getrow->loc_glob_map);
2220  B_->getrow->loc_glob_map = nullptr;
2221  B_->getrow->use_loc_glob_map = ML_NO;
2222  if (A_->getrow->pre_comm != nullptr)
2223  {
2224  tptr = Btmp;
2225  while ( (tptr!= nullptr) && (tptr->sub_matrix != B_))
2226  tptr = tptr->sub_matrix;
2227  if (tptr != nullptr) tptr->sub_matrix = nullptr;
2228  ML_RECUR_CSR_MSRdata_Destroy(Btmp);
2229  ML_Operator_Destroy(&Btmp);
2230  }
2231 
2232  // make correct data structures
2233  if (A_->getrow->post_comm != nullptr)
2234  ML_exchange_rows(Ctmp, &Ctmp2, A_->getrow->post_comm);
2235  else
2236  Ctmp2 = Ctmp;
2237 
2238  ML_back_to_csrlocal(Ctmp2, C_, max_per_proc);
2239 
2240  ML_RECUR_CSR_MSRdata_Destroy (Ctmp);
2241  ML_Operator_Destroy (&Ctmp);
2242 
2243  if (A_->getrow->post_comm != nullptr)
2244  {
2245  ML_RECUR_CSR_MSRdata_Destroy(Ctmp2);
2246  ML_Operator_Destroy (&Ctmp2);
2247  }
2248 
2249  // create an Epetra matrix from the ML
2250  // matrix that we got as a result.
2251  Epetra_CrsMatrix *C_mat;
2252  ML_Operator2EpetraCrsMatrix(C_, C_mat);
2253  C_mat->FillComplete(mod_B->DomainMap(),
2254  transpose_left ?
2255  inputleft.trilinos_matrix().DomainMap() :
2256  inputleft.trilinos_matrix().RangeMap());
2257  C_mat->OptimizeStorage();
2258  result.reinit (*C_mat);
2259 
2260  // destroy allocated memory
2261  delete C_mat;
2262  ML_Operator_Destroy (&A_);
2263  ML_Operator_Destroy (&B_);
2264  ML_Operator_Destroy (&C_);
2265  ML_Comm_Destroy (&comm);
2266  }
2267  }
2268 
2269 
2270  void
2272  const SparseMatrix &B,
2273  const MPI::Vector &V) const
2274  {
2275 #ifdef DEAL_II_WITH_64BIT_INDICES
2276  Assert(false,ExcNotImplemented())
2277 #endif
2278  internals::perform_mmult (*this, B, C, V, false);
2279  }
2280 
2281 
2282 
2283  void
2285  const SparseMatrix &B,
2286  const MPI::Vector &V) const
2287  {
2288 #ifdef DEAL_II_WITH_64BIT_INDICES
2289  Assert(false,ExcNotImplemented())
2290 #endif
2291  internals::perform_mmult (*this, B, C, V, true);
2292  }
2293 
2294 
2295 
2296  void
2298  {
2299  Assert (false, ExcNotImplemented());
2300  }
2301 
2302 
2303 
2304  // As of now, no particularly neat
2305  // output is generated in case of
2306  // multiple processors.
2307  void
2308  SparseMatrix::print (std::ostream &out,
2309  const bool print_detailed_trilinos_information) const
2310  {
2311  if (print_detailed_trilinos_information == true)
2312  out << *matrix;
2313  else
2314  {
2315  double *values;
2316  int *indices;
2317  int num_entries;
2318 
2319  for (int i=0; i<matrix->NumMyRows(); ++i)
2320  {
2321  matrix->ExtractMyRowView (i, num_entries, values, indices);
2322  for (TrilinosWrappers::types::int_type j=0; j<num_entries; ++j)
2323  out << "(" << TrilinosWrappers::global_row_index(*matrix,i) << ","
2324  << TrilinosWrappers::global_column_index(*matrix,indices[j]) << ") "
2325  << values[j] << std::endl;
2326  }
2327  }
2328 
2329  AssertThrow (out, ExcIO());
2330  }
2331 
2332 
2333 
2336  {
2337  size_type static_memory = sizeof(*this) + sizeof (*matrix)
2338  + sizeof(*matrix->Graph().DataPtr());
2339  return ((sizeof(TrilinosScalar)+sizeof(TrilinosWrappers::types::int_type))*
2340  matrix->NumMyNonzeros() + sizeof(int)*local_size() + static_memory);
2341  }
2342 
2343 
2344 
2345  const Epetra_Map &
2347  {
2348  return matrix->DomainMap();
2349  }
2350 
2351 
2352 
2353  const Epetra_Map &
2355  {
2356  return matrix->RangeMap();
2357  }
2358 
2359 
2360 
2361  const Epetra_Map &
2363  {
2364  return matrix->RowMap();
2365  }
2366 
2367 
2368 
2369  const Epetra_Map &
2371  {
2372  return matrix->ColMap();
2373  }
2374 
2375 
2376 
2378  {
2379 
2380 #ifdef DEAL_II_WITH_MPI
2381 
2382  const Epetra_MpiComm *mpi_comm
2383  = dynamic_cast<const Epetra_MpiComm *>(&matrix->RangeMap().Comm());
2384  Assert(mpi_comm != nullptr, ExcInternalError());
2385  return mpi_comm->Comm();
2386 #else
2387 
2388  return MPI_COMM_SELF;
2389 
2390 #endif
2391 
2392  }
2393 }
2394 
2395 
2396 namespace TrilinosWrappers
2397 {
2398  namespace internal
2399  {
2400  namespace
2401  {
2402 #ifndef DEAL_II_WITH_MPI
2403  Epetra_Map
2404  make_serial_Epetra_map (const IndexSet &serial_partitioning)
2405  {
2406  // See IndexSet::make_trilinos_map
2407  return Epetra_Map(TrilinosWrappers::types::int_type(serial_partitioning.size()),
2408  TrilinosWrappers::types::int_type(serial_partitioning.n_elements()),
2409  0, Epetra_SerialComm());
2410  }
2411 #endif
2412  }
2413 
2414  namespace LinearOperatorImplementation
2415  {
2416 
2418  : use_transpose (false),
2419 #ifdef DEAL_II_WITH_MPI
2420  communicator (MPI_COMM_SELF),
2421  domain_map (IndexSet().make_trilinos_map(communicator.Comm())),
2422  range_map (IndexSet().make_trilinos_map(communicator.Comm()))
2423 #else
2424  domain_map (internal::make_serial_Epetra_map(IndexSet())),
2425  range_map (internal::make_serial_Epetra_map(IndexSet()))
2426 #endif
2427  {
2428  vmult = [](Range &, const Domain &)
2429  {
2430  Assert(false,
2431  ExcMessage("Uninitialized TrilinosPayload::vmult called"
2432  "(Default constructor)"));
2433  };
2434 
2435  Tvmult = [](Domain &, const Range &)
2436  {
2437  Assert(false,
2438  ExcMessage("Uninitialized TrilinosPayload::Tvmult called"
2439  "(Default constructor)"));
2440  };
2441 
2442  inv_vmult = [](Domain &, const Range &)
2443  {
2444  Assert(false,
2445  ExcMessage("Uninitialized TrilinosPayload::inv_vmult called"
2446  "(Default constructor)"));
2447  };
2448 
2449  inv_Tvmult = [](Range &, const Domain &)
2450  {
2451  Assert(false,
2452  ExcMessage("Uninitialized TrilinosPayload::inv_Tvmult called"
2453  "(Default constructor)"));
2454  };
2455  }
2456 
2457 
2458 
2460  const TrilinosWrappers::SparseMatrix &matrix)
2461  : use_transpose (matrix_exemplar.trilinos_matrix().UseTranspose()),
2462 #ifdef DEAL_II_WITH_MPI
2463  communicator (matrix_exemplar.get_mpi_communicator()),
2464  domain_map (matrix_exemplar.locally_owned_domain_indices().make_trilinos_map(communicator.Comm())),
2465  range_map (matrix_exemplar.locally_owned_range_indices().make_trilinos_map(communicator.Comm()))
2466 #else
2467  domain_map (internal::make_serial_Epetra_map(matrix_exemplar.locally_owned_domain_indices())),
2468  range_map (internal::make_serial_Epetra_map(matrix_exemplar.locally_owned_range_indices()))
2469 #endif
2470  {
2471  vmult = [&matrix_exemplar,&matrix](Range &tril_dst, const Domain &tril_src)
2472  {
2473  // Duplicated from TrilinosWrappers::SparseMatrix::vmult
2475  Assert (matrix.trilinos_matrix().Filled(), TrilinosWrappers::SparseMatrix::ExcMatrixNotCompressed());
2476  internal::check_vector_map_equality(matrix_exemplar.trilinos_matrix(),
2477  tril_src, tril_dst,
2478  matrix_exemplar.trilinos_matrix().UseTranspose());
2479  internal::check_vector_map_equality(matrix.trilinos_matrix(),
2480  tril_src, tril_dst,
2481  matrix.trilinos_matrix().UseTranspose());
2482 
2483  const int ierr = matrix.trilinos_matrix().Apply (tril_src, tril_dst);
2484  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
2485  };
2486 
2487  Tvmult = [&matrix_exemplar,&matrix](Domain &tril_dst, const Range &tril_src)
2488  {
2489  // Duplicated from TrilinosWrappers::SparseMatrix::Tvmult
2491  Assert (matrix.trilinos_matrix().Filled(), TrilinosWrappers::SparseMatrix::ExcMatrixNotCompressed());
2492  internal::check_vector_map_equality(matrix_exemplar.trilinos_matrix(),
2493  tril_src, tril_dst,
2494  !matrix_exemplar.trilinos_matrix().UseTranspose());
2495  internal::check_vector_map_equality(matrix.trilinos_matrix(),
2496  tril_src, tril_dst,
2497  !matrix.trilinos_matrix().UseTranspose());
2498 
2499  Epetra_CrsMatrix &tril_mtrx_non_const = const_cast<Epetra_CrsMatrix &>(matrix.trilinos_matrix());
2500  tril_mtrx_non_const.SetUseTranspose(!matrix.trilinos_matrix().UseTranspose());
2501  const int ierr = matrix.trilinos_matrix().Apply (tril_src, tril_dst);
2502  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
2503  tril_mtrx_non_const.SetUseTranspose(!matrix.trilinos_matrix().UseTranspose());
2504  };
2505 
2506  inv_vmult = [](Domain &, const Range &)
2507  {
2508  Assert(false,
2509  ExcMessage("Uninitialized TrilinosPayload::inv_vmult called"
2510  "(Matrix constructor with matrix exemplar)"));
2511  };
2512 
2513  inv_Tvmult = [](Range &, const Domain &)
2514  {
2515  Assert(false,
2516  ExcMessage("Uninitialized TrilinosPayload::inv_Tvmult called"
2517  "(Matrix constructor with matrix exemplar)"));
2518  };
2519  }
2520 
2521 
2522 
2524  const TrilinosWrappers::PreconditionBase &preconditioner)
2525  : use_transpose (matrix_exemplar.trilinos_matrix().UseTranspose()),
2526 #ifdef DEAL_II_WITH_MPI
2527  communicator (matrix_exemplar.get_mpi_communicator()),
2528  domain_map (matrix_exemplar.locally_owned_domain_indices().make_trilinos_map(communicator.Comm())),
2529  range_map (matrix_exemplar.locally_owned_range_indices().make_trilinos_map(communicator.Comm()))
2530 #else
2531  domain_map (internal::make_serial_Epetra_map(matrix_exemplar.locally_owned_domain_indices())),
2532  range_map (internal::make_serial_Epetra_map(matrix_exemplar.locally_owned_range_indices()))
2533 #endif
2534  {
2535  vmult = [&matrix_exemplar,&preconditioner](Range &tril_dst, const Domain &tril_src)
2536  {
2537  // Duplicated from TrilinosWrappers::PreconditionBase::vmult
2538  // as well as from TrilinosWrappers::SparseMatrix::Tvmult
2540  internal::check_vector_map_equality(matrix_exemplar.trilinos_matrix(),
2541  tril_src, tril_dst,
2542  matrix_exemplar.trilinos_matrix().UseTranspose());
2543  internal::check_vector_map_equality(preconditioner.trilinos_operator(),
2544  tril_src, tril_dst,
2545  preconditioner.trilinos_operator().UseTranspose());
2546 
2547  const int ierr = preconditioner.trilinos_operator().ApplyInverse (tril_src,tril_dst);
2548  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
2549  };
2550 
2551  Tvmult = [&matrix_exemplar,&preconditioner](Domain &tril_dst, const Range &tril_src)
2552  {
2553  // Duplicated from TrilinosWrappers::PreconditionBase::vmult
2554  // as well as from TrilinosWrappers::SparseMatrix::Tvmult
2556  internal::check_vector_map_equality(matrix_exemplar.trilinos_matrix(),
2557  tril_src, tril_dst,
2558  !matrix_exemplar.trilinos_matrix().UseTranspose());
2559  internal::check_vector_map_equality(preconditioner.trilinos_operator(),
2560  tril_src, tril_dst,
2561  !preconditioner.trilinos_operator().UseTranspose());
2562 
2563  preconditioner.trilinos_operator().SetUseTranspose(!preconditioner.trilinos_operator().UseTranspose());
2564  const int ierr = preconditioner.trilinos_operator().ApplyInverse (tril_src,tril_dst);
2565  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
2566  preconditioner.trilinos_operator().SetUseTranspose(!preconditioner.trilinos_operator().UseTranspose());
2567  };
2568 
2569  inv_vmult = [&matrix_exemplar,&preconditioner](Domain &tril_dst, const Range &tril_src)
2570  {
2571  // Duplicated from TrilinosWrappers::PreconditionBase::vmult
2572  // as well as from TrilinosWrappers::SparseMatrix::Tvmult
2574  internal::check_vector_map_equality(matrix_exemplar.trilinos_matrix(),
2575  tril_src, tril_dst,
2576  !matrix_exemplar.trilinos_matrix().UseTranspose());
2577  internal::check_vector_map_equality(preconditioner.trilinos_operator(),
2578  tril_src, tril_dst,
2579  !preconditioner.trilinos_operator().UseTranspose());
2580 
2581  const int ierr = preconditioner.trilinos_operator().ApplyInverse (tril_src,tril_dst);
2582  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
2583  };
2584 
2585  inv_Tvmult = [&matrix_exemplar,&preconditioner](Range &tril_dst, const Domain &tril_src)
2586  {
2587  // Duplicated from TrilinosWrappers::PreconditionBase::vmult
2588  // as well as from TrilinosWrappers::SparseMatrix::Tvmult
2590  internal::check_vector_map_equality(matrix_exemplar.trilinos_matrix(),
2591  tril_src, tril_dst,
2592  matrix_exemplar.trilinos_matrix().UseTranspose());
2593  internal::check_vector_map_equality(preconditioner.trilinos_operator(),
2594  tril_src, tril_dst,
2595  preconditioner.trilinos_operator().UseTranspose());
2596 
2597  preconditioner.trilinos_operator().SetUseTranspose(!preconditioner.trilinos_operator().UseTranspose());
2598  const int ierr = preconditioner.trilinos_operator().ApplyInverse (tril_src,tril_dst);
2599  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
2600  preconditioner.trilinos_operator().SetUseTranspose(!preconditioner.trilinos_operator().UseTranspose());
2601  };
2602  }
2603 
2604 
2605 
2607  const TrilinosWrappers::PreconditionBase &preconditioner)
2608  : use_transpose (preconditioner_exemplar.trilinos_operator().UseTranspose()),
2609 #ifdef DEAL_II_WITH_MPI
2610  communicator (preconditioner_exemplar.get_mpi_communicator()),
2611  domain_map (preconditioner_exemplar.locally_owned_domain_indices().make_trilinos_map(communicator.Comm())),
2612  range_map (preconditioner_exemplar.locally_owned_range_indices().make_trilinos_map(communicator.Comm()))
2613 #else
2614  domain_map (internal::make_serial_Epetra_map(preconditioner_exemplar.locally_owned_domain_indices())),
2615  range_map (internal::make_serial_Epetra_map(preconditioner_exemplar.locally_owned_range_indices()))
2616 #endif
2617  {
2618  vmult = [&preconditioner_exemplar,&preconditioner](Range &tril_dst, const Domain &tril_src)
2619  {
2620  // Duplicated from TrilinosWrappers::PreconditionBase::vmult
2621  // as well as from TrilinosWrappers::SparseMatrix::Tvmult
2623  internal::check_vector_map_equality(preconditioner_exemplar.trilinos_operator(),
2624  tril_src, tril_dst,
2625  preconditioner_exemplar.trilinos_operator().UseTranspose());
2626  internal::check_vector_map_equality(preconditioner.trilinos_operator(),
2627  tril_src, tril_dst,
2628  preconditioner.trilinos_operator().UseTranspose());
2629 
2630  const int ierr = preconditioner.trilinos_operator().Apply (tril_src,tril_dst);
2631  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
2632  };
2633 
2634  Tvmult = [&preconditioner_exemplar,&preconditioner](Domain &tril_dst, const Range &tril_src)
2635  {
2636  // Duplicated from TrilinosWrappers::PreconditionBase::vmult
2637  // as well as from TrilinosWrappers::SparseMatrix::Tvmult
2639  internal::check_vector_map_equality(preconditioner_exemplar.trilinos_operator(),
2640  tril_src, tril_dst,
2641  !preconditioner_exemplar.trilinos_operator().UseTranspose());
2642  internal::check_vector_map_equality(preconditioner.trilinos_operator(),
2643  tril_src, tril_dst,
2644  !preconditioner.trilinos_operator().UseTranspose());
2645 
2646  preconditioner.trilinos_operator().SetUseTranspose(!preconditioner.trilinos_operator().UseTranspose());
2647  const int ierr = preconditioner.trilinos_operator().Apply (tril_src,tril_dst);
2648  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
2649  preconditioner.trilinos_operator().SetUseTranspose(!preconditioner.trilinos_operator().UseTranspose());
2650  };
2651 
2652  inv_vmult = [&preconditioner_exemplar,&preconditioner](Domain &tril_dst, const Range &tril_src)
2653  {
2654  // Duplicated from TrilinosWrappers::PreconditionBase::vmult
2655  // as well as from TrilinosWrappers::SparseMatrix::Tvmult
2657  internal::check_vector_map_equality(preconditioner_exemplar.trilinos_operator(),
2658  tril_src, tril_dst,
2659  !preconditioner_exemplar.trilinos_operator().UseTranspose());
2660  internal::check_vector_map_equality(preconditioner.trilinos_operator(),
2661  tril_src, tril_dst,
2662  !preconditioner.trilinos_operator().UseTranspose());
2663 
2664  const int ierr = preconditioner.trilinos_operator().ApplyInverse (tril_src,tril_dst);
2665  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
2666  };
2667 
2668  inv_Tvmult = [&preconditioner_exemplar,&preconditioner](Range &tril_dst, const Domain &tril_src)
2669  {
2670  // Duplicated from TrilinosWrappers::PreconditionBase::vmult
2671  // as well as from TrilinosWrappers::SparseMatrix::Tvmult
2673  internal::check_vector_map_equality(preconditioner_exemplar.trilinos_operator(),
2674  tril_src, tril_dst,
2675  preconditioner_exemplar.trilinos_operator().UseTranspose());
2676  internal::check_vector_map_equality(preconditioner.trilinos_operator(),
2677  tril_src, tril_dst,
2678  preconditioner.trilinos_operator().UseTranspose());
2679 
2680  preconditioner.trilinos_operator().SetUseTranspose(!preconditioner.trilinos_operator().UseTranspose());
2681  const int ierr = preconditioner.trilinos_operator().ApplyInverse (tril_src,tril_dst);
2682  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
2683  preconditioner.trilinos_operator().SetUseTranspose(!preconditioner.trilinos_operator().UseTranspose());
2684  };
2685  }
2686 
2687 
2688 
2690  : vmult (payload.vmult),
2691  Tvmult (payload.Tvmult),
2692  inv_vmult (payload.inv_vmult),
2693  inv_Tvmult (payload.inv_Tvmult),
2694  use_transpose (payload.use_transpose),
2695  communicator (payload.communicator),
2696  domain_map (payload.domain_map),
2697  range_map (payload.range_map)
2698  { }
2699 
2700 
2701 
2702  // Composite copy constructor
2703  // This is required for PackagedOperations
2705  const TrilinosPayload &second_op)
2706  : use_transpose (false), // The combination of operators provides the exact definition of the operation
2707  communicator (first_op.communicator),
2708  domain_map (second_op.domain_map),
2709  range_map (first_op.range_map)
2710  { }
2711 
2712 
2713 
2716  {
2717  TrilinosPayload return_op (*this);
2718 
2719  return_op.vmult = [](Range &tril_dst,
2720  const Range &tril_src)
2721  {
2722  tril_dst = tril_src;
2723  };
2724 
2725  return_op.Tvmult = [](Range &tril_dst,
2726  const Range &tril_src)
2727  {
2728  tril_dst = tril_src;
2729  };
2730 
2731  return_op.inv_vmult = [](Range &tril_dst,
2732  const Range &tril_src)
2733  {
2734  tril_dst = tril_src;
2735  };
2736 
2737  return_op.inv_Tvmult = [](Range &tril_dst,
2738  const Range &tril_src)
2739  {
2740  tril_dst = tril_src;
2741  };
2742 
2743  return return_op;
2744  }
2745 
2746 
2747 
2750  {
2751  TrilinosPayload return_op (*this);
2752 
2753  return_op.vmult = [](Range &tril_dst,
2754  const Domain &)
2755  {
2756  const int ierr = tril_dst.PutScalar(0.0);
2757 
2758  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
2759  };
2760 
2761  return_op.Tvmult = [](Domain &tril_dst,
2762  const Range &)
2763  {
2764  const int ierr = tril_dst.PutScalar(0.0);
2765 
2766  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
2767  };
2768 
2769  return_op.inv_vmult = [](Domain &tril_dst,
2770  const Range &)
2771  {
2772  AssertThrow(false, ExcMessage("Cannot compute inverse of null operator"));
2773 
2774  const int ierr = tril_dst.PutScalar(0.0);
2775 
2776  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
2777  };
2778 
2779  return_op.inv_Tvmult = [](Range &tril_dst,
2780  const Domain &)
2781  {
2782  AssertThrow(false, ExcMessage("Cannot compute inverse of null operator"));
2783 
2784  const int ierr = tril_dst.PutScalar(0.0);
2785 
2786  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
2787  };
2788 
2789  return return_op;
2790  }
2791 
2792 
2793 
2796  {
2797  TrilinosPayload return_op (*this);
2798  return_op.transpose();
2799  return return_op;
2800  }
2801 
2802 
2803 
2804  IndexSet
2806  {
2807  return IndexSet(domain_map);
2808  }
2809 
2810 
2811 
2812  IndexSet
2814  {
2815  return IndexSet(range_map);
2816  }
2817 
2818 
2819 
2820  MPI_Comm
2822  {
2823 #ifdef DEAL_II_WITH_MPI
2824  return communicator.Comm();
2825 #else
2826  return MPI_COMM_SELF;
2827 #endif
2828  }
2829 
2830 
2831 
2832  void
2834  {
2836  }
2837 
2838 
2839 
2840  bool
2842  {
2843  return use_transpose;
2844  }
2845 
2846 
2847 
2848  int
2850  {
2851  if (use_transpose != UseTranspose)
2852  {
2857  }
2858  return 0;
2859  }
2860 
2861 
2862 
2863  int
2865  VectorType &Y) const
2866  {
2867  // The transposedness of the operations is taken care of
2868  // when we hit the transpose flag.
2869  vmult(Y,X);
2870  return 0;
2871  }
2872 
2873 
2874 
2875  int
2877  VectorType &Y) const
2878  {
2879  // The transposedness of the operations is taken care of
2880  // when we hit the transpose flag.
2881  inv_vmult(Y,X);
2882  return 0;
2883  }
2884 
2885 
2886 
2887  const char *
2889  {
2890  return "TrilinosPayload";
2891  }
2892 
2893 
2894 
2895  const Epetra_Comm &
2897  {
2898  return communicator;
2899  }
2900 
2901 
2902 
2903  const Epetra_Map &
2905  {
2906  return domain_map;
2907  }
2908 
2909 
2910 
2911  const Epetra_Map &
2913  {
2914  return range_map;
2915  }
2916 
2917 
2918 
2919  bool
2921  {
2922  return false;
2923  }
2924 
2925 
2926 
2927  double
2929  {
2930  AssertThrow(false, ExcNotImplemented());
2931  return 0.0;
2932  }
2933 
2934 
2935 
2936  TrilinosPayload operator+(const TrilinosPayload &first_op,
2937  const TrilinosPayload &second_op)
2938  {
2939  typedef typename TrilinosPayload::Domain Domain;
2940  typedef typename TrilinosPayload::Range Range;
2941  typedef typename TrilinosPayload::VectorType Intermediate;
2942  typedef TrilinosWrappers::MPI::Vector GVMVectorType;
2943 
2945  ExcMessage("Operators are set to work on incompatible IndexSets."));
2947  ExcMessage("Operators are set to work on incompatible IndexSets."));
2948 
2949  TrilinosPayload return_op (first_op, second_op);
2950 
2951  // Capture by copy so the payloads are always valid
2952  return_op.vmult = [first_op, second_op](Range &tril_dst,
2953  const Domain &tril_src)
2954  {
2955  // Duplicated from LinearOperator::operator*
2956  // TODO: Template the constructor on GrowingVectorMemory vector type?
2957  GrowingVectorMemory<GVMVectorType> vector_memory;
2958  VectorMemory<GVMVectorType>::Pointer i (vector_memory);
2959 
2960  // Initialize intermediate vector
2961  const Epetra_Map &first_op_init_map = first_op.OperatorDomainMap();
2962  i->reinit(IndexSet(first_op_init_map),
2963  first_op.get_mpi_communicator(),
2964  /*bool omit_zeroing_entries =*/true);
2965 
2966  // Duplicated from TrilinosWrappers::SparseMatrix::vmult
2967  const size_type i_local_size = i->end() - i->begin();
2968  AssertDimension (i_local_size, static_cast<size_type>(first_op_init_map.NumMyPoints()));
2969  const Epetra_Map &second_op_init_map = second_op.OperatorDomainMap();
2970  AssertDimension (i_local_size, static_cast<size_type>(second_op_init_map.NumMyPoints()));
2971  (void)second_op_init_map;
2972  Intermediate tril_int (View, first_op_init_map,
2973  const_cast<TrilinosScalar *>(i->begin()),
2974  i_local_size, 1);
2975 
2976  // These operators may themselves be transposed or not, so we let them
2977  // decide what the intended outcome is
2978  second_op.Apply(tril_src, tril_int);
2979  first_op.Apply(tril_src, tril_dst);
2980  const int ierr = tril_dst.Update (1.0, tril_int, 1.0);
2981  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
2982  };
2983 
2984  return_op.Tvmult = [first_op, second_op](Domain &tril_dst,
2985  const Range &tril_src)
2986  {
2987  // Duplicated from LinearOperator::operator*
2988  // TODO: Template the constructor on GrowingVectorMemory vector type?
2989  GrowingVectorMemory<GVMVectorType> vector_memory;
2990  VectorMemory<GVMVectorType>::Pointer i (vector_memory);
2991 
2992  // These operators may themselves be transposed or not, so we let them
2993  // decide what the intended outcome is
2994  // We must first transpose the operators to get the right IndexSets
2995  // for the input, intermediate and result vectors
2996  const_cast<TrilinosPayload &>(first_op).transpose();
2997  const_cast<TrilinosPayload &>(second_op).transpose();
2998 
2999  // Initialize intermediate vector
3000  const Epetra_Map &first_op_init_map = first_op.OperatorRangeMap();
3001  i->reinit(IndexSet(first_op_init_map),
3002  first_op.get_mpi_communicator(),
3003  /*bool omit_zeroing_entries =*/true);
3004 
3005  // Duplicated from TrilinosWrappers::SparseMatrix::vmult
3006  const size_type i_local_size = i->end() - i->begin();
3007  AssertDimension (i_local_size, static_cast<size_type>(first_op_init_map.NumMyPoints()));
3008  const Epetra_Map &second_op_init_map = second_op.OperatorRangeMap();
3009  AssertDimension (i_local_size, static_cast<size_type>(second_op_init_map.NumMyPoints()));
3010  (void)second_op_init_map;
3011  Intermediate tril_int (View, first_op_init_map,
3012  const_cast<TrilinosScalar *>(i->begin()),
3013  i_local_size, 1);
3014 
3015  // These operators may themselves be transposed or not, so we let them
3016  // decide what the intended outcome is
3017  second_op.Apply(tril_src, tril_int);
3018  first_op.Apply(tril_src, tril_dst);
3019  const int ierr = tril_dst.Update (1.0, tril_int, 1.0);
3020  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
3021 
3022  // Reset transpose flag
3023  const_cast<TrilinosPayload &>(first_op).transpose();
3024  const_cast<TrilinosPayload &>(second_op).transpose();
3025  };
3026 
3027  return_op.inv_vmult = [first_op, second_op](Domain &tril_dst,
3028  const Range &tril_src)
3029  {
3030  // Duplicated from LinearOperator::operator*
3031  // TODO: Template the constructor on GrowingVectorMemory vector type?
3032  GrowingVectorMemory<GVMVectorType> vector_memory;
3033  VectorMemory<GVMVectorType>::Pointer i (vector_memory);
3034 
3035  // Initialize intermediate vector
3036  const Epetra_Map &first_op_init_map = first_op.OperatorRangeMap();
3037  i->reinit(IndexSet(first_op_init_map),
3038  first_op.get_mpi_communicator(),
3039  /*bool omit_zeroing_entries =*/true);
3040 
3041  // Duplicated from TrilinosWrappers::SparseMatrix::vmult
3042  const size_type i_local_size = i->end() - i->begin();
3043  AssertDimension (i_local_size, static_cast<size_type>(first_op_init_map.NumMyPoints()));
3044  const Epetra_Map &second_op_init_map = second_op.OperatorRangeMap();
3045  AssertDimension (i_local_size, static_cast<size_type>(second_op_init_map.NumMyPoints()));
3046  (void)second_op_init_map;
3047  Intermediate tril_int (View, first_op_init_map,
3048  const_cast<TrilinosScalar *>(i->begin()),
3049  i_local_size, 1);
3050 
3051  // These operators may themselves be transposed or not, so we let them
3052  // decide what the intended outcome is
3053  second_op.ApplyInverse(tril_src, tril_int);
3054  first_op.ApplyInverse(tril_src, tril_dst);
3055  const int ierr = tril_dst.Update (1.0, tril_int, 1.0);
3056  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
3057  };
3058 
3059  return_op.inv_Tvmult = [first_op, second_op](Range &tril_dst,
3060  const Domain &tril_src)
3061  {
3062  // Duplicated from LinearOperator::operator*
3063  // TODO: Template the constructor on GrowingVectorMemory vector type?
3064  GrowingVectorMemory<GVMVectorType> vector_memory;
3065  VectorMemory<GVMVectorType>::Pointer i (vector_memory);
3066 
3067  // These operators may themselves be transposed or not, so we let them
3068  // decide what the intended outcome is
3069  // We must first transpose the operators to get the right IndexSets
3070  // for the input, intermediate and result vectors
3071  const_cast<TrilinosPayload &>(first_op).transpose();
3072  const_cast<TrilinosPayload &>(second_op).transpose();
3073 
3074  // Initialize intermediate vector
3075  const Epetra_Map &first_op_init_map = first_op.OperatorDomainMap();
3076  i->reinit(IndexSet(first_op_init_map),
3077  first_op.get_mpi_communicator(),
3078  /*bool omit_zeroing_entries =*/true);
3079 
3080  // Duplicated from TrilinosWrappers::SparseMatrix::vmult
3081  const size_type i_local_size = i->end() - i->begin();
3082  AssertDimension (i_local_size, static_cast<size_type>(first_op_init_map.NumMyPoints()));
3083  const Epetra_Map &second_op_init_map = second_op.OperatorDomainMap();
3084  AssertDimension (i_local_size, static_cast<size_type>(second_op_init_map.NumMyPoints()));
3085  (void)second_op_init_map;
3086  Intermediate tril_int (View, first_op_init_map,
3087  const_cast<TrilinosScalar *>(i->begin()),
3088  i_local_size, 1);
3089 
3090  // These operators may themselves be transposed or not, so we let them
3091  // decide what the intended outcome is
3092  second_op.ApplyInverse(tril_src, tril_int);
3093  first_op.ApplyInverse(tril_src, tril_dst);
3094  const int ierr = tril_dst.Update (1.0, tril_int, 1.0);
3095  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
3096 
3097  // Reset transpose flag
3098  const_cast<TrilinosPayload &>(first_op).transpose();
3099  const_cast<TrilinosPayload &>(second_op).transpose();
3100  };
3101 
3102  return return_op;
3103  }
3104 
3105 
3106 
3107  TrilinosPayload operator*(const TrilinosPayload &first_op,
3108  const TrilinosPayload &second_op)
3109  {
3110  typedef typename TrilinosPayload::Domain Domain;
3111  typedef typename TrilinosPayload::Range Range;
3112  typedef typename TrilinosPayload::VectorType Intermediate;
3113  typedef TrilinosWrappers::MPI::Vector GVMVectorType;
3114 
3115  AssertThrow(first_op.locally_owned_domain_indices() == second_op.locally_owned_range_indices(),
3116  ExcMessage("Operators are set to work on incompatible IndexSets."));
3117 
3118  TrilinosPayload return_op (first_op, second_op);
3119 
3120  // Capture by copy so the payloads are always valid
3121  return_op.vmult = [first_op, second_op](Range &tril_dst,
3122  const Domain &tril_src)
3123  {
3124  // Duplicated from LinearOperator::operator*
3125  // TODO: Template the constructor on GrowingVectorMemory vector type?
3126  GrowingVectorMemory<GVMVectorType> vector_memory;
3127  VectorMemory<GVMVectorType>::Pointer i (vector_memory);
3128 
3129  // Initialize intermediate vector
3130  const Epetra_Map &first_op_init_map = first_op.OperatorDomainMap();
3131  i->reinit(IndexSet(first_op_init_map),
3132  first_op.get_mpi_communicator(),
3133  /*bool omit_zeroing_entries =*/true);
3134 
3135  // Duplicated from TrilinosWrappers::SparseMatrix::vmult
3136  const size_type i_local_size = i->end() - i->begin();
3137  AssertDimension (i_local_size, static_cast<size_type>(first_op_init_map.NumMyPoints()));
3138  const Epetra_Map &second_op_init_map = second_op.OperatorRangeMap();
3139  AssertDimension (i_local_size, static_cast<size_type>(second_op_init_map.NumMyPoints()));
3140  (void)second_op_init_map;
3141  Intermediate tril_int (View, first_op_init_map,
3142  const_cast<TrilinosScalar *>(i->begin()),
3143  i_local_size, 1);
3144 
3145  // These operators may themselves be transposed or not, so we let them
3146  // decide what the intended outcome is
3147  second_op.Apply(tril_src, tril_int);
3148  first_op.Apply(tril_int, tril_dst);
3149  };
3150 
3151  return_op.Tvmult = [first_op, second_op](Domain &tril_dst,
3152  const Range &tril_src)
3153  {
3154  // Duplicated from LinearOperator::operator*
3155  // TODO: Template the constructor on GrowingVectorMemory vector type?
3156  GrowingVectorMemory<GVMVectorType> vector_memory;
3157  VectorMemory<GVMVectorType>::Pointer i (vector_memory);
3158 
3159  // These operators may themselves be transposed or not, so we let them
3160  // decide what the intended outcome is
3161  // We must first transpose the operators to get the right IndexSets
3162  // for the input, intermediate and result vectors
3163  const_cast<TrilinosPayload &>(first_op).transpose();
3164  const_cast<TrilinosPayload &>(second_op).transpose();
3165 
3166  // Initialize intermediate vector
3167  const Epetra_Map &first_op_init_map = first_op.OperatorRangeMap();
3168  i->reinit(IndexSet(first_op_init_map),
3169  first_op.get_mpi_communicator(),
3170  /*bool omit_zeroing_entries =*/true);
3171 
3172  // Duplicated from TrilinosWrappers::SparseMatrix::vmult
3173  const size_type i_local_size = i->end() - i->begin();
3174  AssertDimension (i_local_size, static_cast<size_type>(first_op_init_map.NumMyPoints()));
3175  const Epetra_Map &second_op_init_map = second_op.OperatorDomainMap();
3176  AssertDimension (i_local_size, static_cast<size_type>(second_op_init_map.NumMyPoints()));
3177  (void)second_op_init_map;
3178  Intermediate tril_int (View, first_op_init_map,
3179  const_cast<TrilinosScalar *>(i->begin()),
3180  i_local_size, 1);
3181 
3182  // Apply the operators in the reverse order to vmult
3183  first_op.Apply(tril_src, tril_int);
3184  second_op.Apply(tril_int, tril_dst);
3185 
3186  // Reset transpose flag
3187  const_cast<TrilinosPayload &>(first_op).transpose();
3188  const_cast<TrilinosPayload &>(second_op).transpose();
3189  };
3190 
3191  return_op.inv_vmult = [first_op, second_op](Domain &tril_dst,
3192  const Range &tril_src)
3193  {
3194  // Duplicated from LinearOperator::operator*
3195  // TODO: Template the constructor on GrowingVectorMemory vector type?
3196  GrowingVectorMemory<GVMVectorType> vector_memory;
3197  VectorMemory<GVMVectorType>::Pointer i (vector_memory);
3198 
3199  // Initialize intermediate vector
3200  const Epetra_Map &first_op_init_map = first_op.OperatorRangeMap();
3201  i->reinit(IndexSet(first_op_init_map),
3202  first_op.get_mpi_communicator(),
3203  /*bool omit_zeroing_entries =*/true);
3204 
3205  // Duplicated from TrilinosWrappers::SparseMatrix::vmult
3206  const size_type i_local_size = i->end() - i->begin();
3207  AssertDimension (i_local_size, static_cast<size_type>(first_op_init_map.NumMyPoints()));
3208  const Epetra_Map &second_op_init_map = second_op.OperatorDomainMap();
3209  AssertDimension (i_local_size, static_cast<size_type>(second_op_init_map.NumMyPoints()));
3210  (void)second_op_init_map;
3211  Intermediate tril_int (View, first_op_init_map,
3212  const_cast<TrilinosScalar *>(i->begin()),
3213  i_local_size, 1);
3214 
3215  // Apply the operators in the reverse order to vmult
3216  // and the same order as Tvmult
3217  first_op.ApplyInverse(tril_src, tril_int);
3218  second_op.ApplyInverse(tril_int, tril_dst);
3219  };
3220 
3221  return_op.inv_Tvmult = [first_op, second_op](Range &tril_dst,
3222  const Domain &tril_src)
3223  {
3224  // Duplicated from LinearOperator::operator*
3225  // TODO: Template the constructor on GrowingVectorMemory vector type?
3226  GrowingVectorMemory<GVMVectorType> vector_memory;
3227  VectorMemory<GVMVectorType>::Pointer i (vector_memory);
3228 
3229  // These operators may themselves be transposed or not, so we let them
3230  // decide what the intended outcome is
3231  // We must first transpose the operators to get the right IndexSets
3232  // for the input, intermediate and result vectors
3233  const_cast<TrilinosPayload &>(first_op).transpose();
3234  const_cast<TrilinosPayload &>(second_op).transpose();
3235 
3236  // Initialize intermediate vector
3237  const Epetra_Map &first_op_init_map = first_op.OperatorDomainMap();
3238  i->reinit(IndexSet(first_op_init_map),
3239  first_op.get_mpi_communicator(),
3240  /*bool omit_zeroing_entries =*/true);
3241 
3242  // Duplicated from TrilinosWrappers::SparseMatrix::vmult
3243  const size_type i_local_size = i->end() - i->begin();
3244  AssertDimension (i_local_size, static_cast<size_type>(first_op_init_map.NumMyPoints()));
3245  const Epetra_Map &second_op_init_map = second_op.OperatorRangeMap();
3246  AssertDimension (i_local_size, static_cast<size_type>(second_op_init_map.NumMyPoints()));
3247  (void)second_op_init_map;
3248  Intermediate tril_int (View, first_op_init_map,
3249  const_cast<TrilinosScalar *>(i->begin()),
3250  i_local_size, 1);
3251 
3252  // These operators may themselves be transposed or not, so we let them
3253  // decide what the intended outcome is
3254  // Apply the operators in the reverse order to Tvmult
3255  // and the same order as vmult
3256  second_op.ApplyInverse(tril_src, tril_int);
3257  first_op.ApplyInverse(tril_int, tril_dst);
3258 
3259  // Reset transpose flag
3260  const_cast<TrilinosPayload &>(first_op).transpose();
3261  const_cast<TrilinosPayload &>(second_op).transpose();
3262  };
3263 
3264  return return_op;
3265  }
3266 
3267  } /* namespace LinearOperator */
3268  } /* namespace internal */
3269 } /* namespace TrilinosWrappers */
3270 
3271 
3272 
3273 // explicit instantiations
3274 #include "trilinos_sparse_matrix.inst"
3275 
3276 
3277 // TODO: put these instantiations into generic file
3278 namespace TrilinosWrappers
3279 {
3280  template void
3281  SparseMatrix::reinit (const ::SparsityPattern &);
3282 
3283  template void
3285 
3286  template void
3287  SparseMatrix::reinit (const Epetra_Map &,
3288  const ::SparsityPattern &,
3289  const bool);
3290  template void
3291  SparseMatrix::reinit (const Epetra_Map &,
3292  const DynamicSparsityPattern &,
3293  const bool);
3294  template void
3295  SparseMatrix::reinit (const Epetra_Map &,
3296  const Epetra_Map &,
3297  const ::SparsityPattern &,
3298  const bool);
3299  template void
3300  SparseMatrix::reinit (const Epetra_Map &,
3301  const Epetra_Map &,
3302  const DynamicSparsityPattern &,
3303  const bool);
3304  template void
3305  SparseMatrix::reinit (const IndexSet &,
3306  const IndexSet &,
3307  const ::SparsityPattern &,
3308  const MPI_Comm &,
3309  const bool);
3310 
3311  template void
3312  SparseMatrix::reinit (const IndexSet &,
3313  const IndexSet &,
3314  const DynamicSparsityPattern &,
3315  const MPI_Comm &,
3316  const bool);
3317 
3318  template void
3319  SparseMatrix::vmult (MPI::Vector &,
3320  const MPI::Vector &) const;
3321  template void
3323  const ::Vector<double> &) const;
3324  template void
3326  const ::LinearAlgebra::distributed::Vector<double> &) const;
3327 #ifdef DEAL_II_WITH_MPI
3328  template void
3330  const ::LinearAlgebra::EpetraWrappers::Vector &) const;
3331 #endif
3332  template void
3333  SparseMatrix::Tvmult (MPI::Vector &,
3334  const MPI::Vector &) const;
3335  template void
3337  const ::Vector<double> &) const;
3338  template void
3340  const ::LinearAlgebra::distributed::Vector<double> &) const;
3341 #ifdef DEAL_II_WITH_MPI
3342  template void
3344  const ::LinearAlgebra::EpetraWrappers::Vector &) const;
3345 #endif
3346  template void
3347  SparseMatrix::vmult_add (MPI::Vector &,
3348  const MPI::Vector &) const;
3349  template void
3351  const ::Vector<double> &) const;
3352  template void
3354  const ::LinearAlgebra::distributed::Vector<double> &) const;
3355 #ifdef DEAL_II_WITH_MPI
3356  template void
3358  const ::LinearAlgebra::EpetraWrappers::Vector &) const;
3359 #endif
3360  template void
3361  SparseMatrix::Tvmult_add (MPI::Vector &,
3362  const MPI::Vector &) const;
3363  template void
3365  const ::Vector<double> &) const;
3366  template void
3368  const ::LinearAlgebra::distributed::Vector<double> &) const;
3369 #ifdef DEAL_II_WITH_MPI
3370  template void
3372  const ::LinearAlgebra::EpetraWrappers::Vector &) const;
3373 #endif
3374 }
3375 
3376 DEAL_II_NAMESPACE_CLOSE
3377 
3378 #endif // DEAL_II_WITH_TRILINOS
TrilinosScalar diag_element(const size_type i) const
Iterator lower_bound(Iterator first, Iterator last, const T &val)
Definition: utilities.h:891
static ::ExceptionBase & ExcTrilinosError(int arg1)
TrilinosScalar el(const size_type i, const size_type j) const
size_type m() const
const Epetra_Map & col_partitioner() const
const Epetra_Map & domain_partitioner() const
void mmult(SparseMatrix &C, const SparseMatrix &B, const MPI::Vector &V=MPI::Vector()) const
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1248
Contents is actually a matrix.
static ::ExceptionBase & ExcSourceEqualsDestination()
static ::ExceptionBase & ExcIO()
void clear_row(const size_type row, const TrilinosScalar new_diag_value=0)
size_type column_number(const size_type row, const size_type index) const
const Epetra_Map & range_partitioner() const
static ::ExceptionBase & ExcScalarAssignmentOnlyForZeroValue()
void vmult_add(VectorType &dst, const VectorType &src) const
std::function< void(VectorType &, const VectorType &)> inv_Tvmult
const Epetra_FECrsGraph & trilinos_sparsity_pattern() const
void reinit(const Vector &v, const bool omit_zeroing_entries=false, const bool allow_different_maps=false)
void set(const size_type i, const size_type j, const number value)
::types::global_dof_index size_type
#define AssertIndexRange(index, range)
Definition: exceptions.h:1284
TrilinosWrappers::types::int_type min_my_gid(const Epetra_BlockMap &map)
std::pair< size_type, size_type > local_range() const
TrilinosScalar operator()(const size_type i, const size_type j) const
virtual int ApplyInverse(const VectorType &Y, VectorType &X) const
static ::ExceptionBase & ExcInvalidIndex(size_type arg1, size_type arg2)
const Epetra_Map & row_partitioner() const
#define AssertThrow(cond, exc)
Definition: exceptions.h:1221
static ::ExceptionBase & ExcAccessToNonPresentElement(size_type arg1, size_type arg2)
SymmetricTensor< rank_, dim, typename ProductType< Number, OtherNumber >::type > operator+(const SymmetricTensor< rank_, dim, Number > &left, const SymmetricTensor< rank_, dim, OtherNumber > &right)
TrilinosScalar residual(MPI::Vector &dst, const MPI::Vector &x, const MPI::Vector &b) const
const Epetra_Comm & comm_self()
Definition: utilities.cc:777
static ::ExceptionBase & ExcTrilinosError(int arg1)
SymmetricTensor< rank_, dim, Number > operator*(const SymmetricTensor< rank_, dim, Number > &t, const Number &factor)
TrilinosScalar matrix_scalar_product(const MPI::Vector &u, const MPI::Vector &v) const
virtual void reinit(const SparsityPattern &sparsity)
std::unique_ptr< Epetra_CrsGraph > nonlocal_graph
static ::ExceptionBase & ExcDivideByZero()
void clear_rows(const std::vector< size_type > &rows, const TrilinosScalar new_diag_value=0)
void Tvmult(VectorType &dst, const VectorType &src) const
Epetra_Map make_trilinos_map(const MPI_Comm &communicator=MPI_COMM_WORLD, const bool overlapping=false) const
Definition: index_set.cc:549
std::shared_ptr< std::vector< size_type > > colnum_cache
SparseMatrix & operator*=(const TrilinosScalar factor)
std::unique_ptr< Epetra_FECrsMatrix > matrix
size_type size() const
Definition: index_set.h:1575
SparseMatrix & operator=(const SparseMatrix &)=delete
bool in_local_range(const size_type index) const
static ::ExceptionBase & ExcMessage(std::string arg1)
size_type n() const
Definition: types.h:30
unsigned int global_dof_index
Definition: types.h:88
void add(const size_type i, const size_type j, const TrilinosScalar value)
#define Assert(cond, exc)
Definition: exceptions.h:1142
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
std::unique_ptr< Epetra_Export > nonlocal_matrix_exporter
TrilinosWrappers::types::int_type n_global_cols(const Epetra_CrsGraph &graph)
TrilinosWrappers::types::int_type global_row_index(const Epetra_CrsMatrix &matrix, const ::types::global_dof_index i)
const IndexSet & row_index_set() const
void print(std::ostream &out, const bool write_extended_trilinos_info=false) const
std::function< void(VectorType &, const VectorType &)> inv_vmult
TrilinosScalar matrix_norm_square(const MPI::Vector &v) const
const Epetra_FEVector & trilinos_vector() const
const Epetra_CrsMatrix & trilinos_matrix() const
SymmetricTensor< rank_, dim, Number > transpose(const SymmetricTensor< rank_, dim, Number > &t)
const Epetra_Map & vector_partitioner() const
TrilinosWrappers::types::int_type global_column_index(const Epetra_CrsMatrix &matrix, const ::types::global_dof_index i)
Epetra_Operator & trilinos_operator() const
void vmult(VectorType &dst, const VectorType &src) const
size_type n_nonzero_elements() const
void reinit(const size_type m, const size_type n, const unsigned int max_per_row)
static ::ExceptionBase & ExcNotQuadratic()
TrilinosWrappers::types::int_type n_global_elements(const Epetra_BlockMap &map)
void swap(Vector< Number > &u, Vector< Number > &v)
Definition: vector.h:1312
unsigned int row_length(const size_type row) const
void set_size(const size_type size)
Definition: index_set.h:1562
void Tvmult_add(VectorType &dst, const VectorType &src) const
Definition: cuda.h:31
void reinit(const SparsityPatternType &sparsity_pattern)
size_type row_length(const size_type row) const
void copy_from(const SparseMatrix &source)
const Epetra_Map & domain_partitioner() const
SparseMatrix & operator/=(const TrilinosScalar factor)
TrilinosWrappers::types::int_type max_my_gid(const Epetra_BlockMap &map)
std::pair< size_type, size_type > local_range() const
static ::ExceptionBase & ExcNotImplemented()
TrilinosWrappers::types::int_type global_index(const Epetra_BlockMap &map, const ::types::global_dof_index i)
static ::ExceptionBase & ExcMatrixNotCompressed()
const Epetra_CrsGraph & trilinos_sparsity_pattern() const
bool is_element(const size_type index) const
Definition: index_set.h:1645
void Tmmult(SparseMatrix &C, const SparseMatrix &B, const MPI::Vector &V=MPI::Vector()) const
std::unique_ptr< Epetra_Map > column_space_map
void compress(::VectorOperation::values operation)
const Epetra_MultiVector & trilinos_vector() const
static ::ExceptionBase & ExcAccessToNonLocalElement(size_type arg1, size_type arg2, size_type arg3, size_type arg4)
std::unique_ptr< Epetra_CrsMatrix > nonlocal_matrix
void set(const size_type i, const size_type j, const TrilinosScalar value)
size_type n_elements() const
Definition: index_set.h:1717
#define AssertIsFinite(number)
Definition: exceptions.h:1299
unsigned int local_size() const
void compress(::VectorOperation::values)
static ::ExceptionBase & ExcInternalError()
std::shared_ptr< std::vector< TrilinosScalar > > value_cache