Reference documentation for deal.II version 8.5.1
trilinos_sparse_matrix.cc
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2008 - 2017 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE at
12 // the top level of the deal.II distribution.
13 //
14 // ---------------------------------------------------------------------
15 
16 #include <deal.II/lac/trilinos_sparse_matrix.h>
17 
18 #ifdef DEAL_II_WITH_TRILINOS
19 
20 # include <deal.II/base/utilities.h>
21 # include <deal.II/lac/sparse_matrix.h>
22 # include <deal.II/lac/trilinos_sparsity_pattern.h>
23 # include <deal.II/lac/sparsity_pattern.h>
24 # include <deal.II/lac/dynamic_sparsity_pattern.h>
25 # include <deal.II/lac/sparsity_tools.h>
26 # include <deal.II/lac/la_parallel_vector.h>
27 
28 #ifdef DEAL_II_WITH_CXX11
29 # include <deal.II/lac/trilinos_precondition.h>
30 #endif
31 
32 DEAL_II_DISABLE_EXTRA_DIAGNOSTICS
33 # include <Epetra_Export.h>
34 # include <ml_epetra_utils.h>
35 # include <ml_struct.h>
36 # include <Teuchos_RCP.hpp>
37 DEAL_II_ENABLE_EXTRA_DIAGNOSTICS
38 
39 DEAL_II_NAMESPACE_OPEN
40 
41 namespace TrilinosWrappers
42 {
43  namespace
44  {
45 #ifndef DEAL_II_WITH_64BIT_INDICES
46  // define a helper function that queries the size of an Epetra_Map object
47  // by calling either the 32- or 64-bit function necessary, and returns the
48  // result in the correct data type so that we can use it in calling other
49  // Epetra member functions that are overloaded by index type
50  int n_global_elements (const Epetra_BlockMap &map)
51  {
52  return map.NumGlobalElements();
53  }
54 
55  int min_my_gid(const Epetra_BlockMap &map)
56  {
57  return map.MinMyGID();
58  }
59 
60  int max_my_gid(const Epetra_BlockMap &map)
61  {
62  return map.MaxMyGID();
63  }
64 
65  int n_global_cols(const Epetra_CrsGraph &graph)
66  {
67  return graph.NumGlobalCols();
68  }
69 
70  int global_column_index(const Epetra_CrsMatrix &matrix, int i)
71  {
72  return matrix.GCID(i);
73  }
74 
75  int global_row_index(const Epetra_CrsMatrix &matrix, int i)
76  {
77  return matrix.GRID(i);
78  }
79 #else
80  // define a helper function that queries the size of an Epetra_Map object
81  // by calling either the 32- or 64-bit function necessary, and returns the
82  // result in the correct data type so that we can use it in calling other
83  // Epetra member functions that are overloaded by index type
84  long long int n_global_elements (const Epetra_BlockMap &map)
85  {
86  return map.NumGlobalElements64();
87  }
88 
89  long long int min_my_gid(const Epetra_BlockMap &map)
90  {
91  return map.MinMyGID64();
92  }
93 
94  long long int max_my_gid(const Epetra_BlockMap &map)
95  {
96  return map.MaxMyGID64();
97  }
98 
99  long long int n_global_cols(const Epetra_CrsGraph &graph)
100  {
101  return graph.NumGlobalCols64();
102  }
103 
104  long long int global_column_index(const Epetra_CrsMatrix &matrix, int i)
105  {
106  return matrix.GCID64(i);
107  }
108 
109  long long int global_row_index(const Epetra_CrsMatrix &matrix, int i)
110  {
111  return matrix.GRID64(i);
112  }
113 #endif
114  }
115 
116 
117  namespace SparseMatrixIterators
118  {
119  void
121  {
122  // if we are asked to visit the past-the-end line, then simply
123  // release all our caches and go on with life.
124  //
125  // do the same if the row we're supposed to visit is not locally
126  // owned. this is simply going to make non-locally owned rows
127  // look like they're empty
128  if ((this->a_row == matrix->m())
129  ||
130  (matrix->in_local_range (this->a_row) == false))
131  {
132  colnum_cache.reset ();
133  value_cache.reset ();
134 
135  return;
136  }
137 
138  // get a representation of the present row
139  int ncols;
140  TrilinosWrappers::types::int_type colnums = matrix->n();
141  if (value_cache.get() == 0)
142  {
143  value_cache.reset (new std::vector<TrilinosScalar> (matrix->n()));
144  colnum_cache.reset (new std::vector<size_type> (matrix->n()));
145  }
146  else
147  {
148  value_cache->resize (matrix->n());
149  colnum_cache->resize (matrix->n());
150  }
151 
152  int ierr = matrix->trilinos_matrix().
153  ExtractGlobalRowCopy((TrilinosWrappers::types::int_type)this->a_row,
154  colnums,
155  ncols, &((*value_cache)[0]),
156  reinterpret_cast<TrilinosWrappers::types::int_type *>(&((*colnum_cache)[0])));
157  value_cache->resize (ncols);
158  colnum_cache->resize (ncols);
159  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
160 
161  // copy it into our caches if the
162  // line isn't empty. if it is, then
163  // we've done something wrong, since
164  // we shouldn't have initialized an
165  // iterator for an empty line (what
166  // would it point to?)
167  }
168  }
169 
170 
171  // The constructor is actually the
172  // only point where we have to check
173  // whether we build a serial or a
174  // parallel Trilinos matrix.
175  // Actually, it does not even matter
176  // how many threads there are, but
177  // only if we use an MPI compiler or
178  // a standard compiler. So, even one
179  // thread on a configuration with
180  // MPI will still get a parallel
181  // interface.
183  :
184  column_space_map (new Epetra_Map (0, 0,
185  Utilities::Trilinos::comm_self())),
186  matrix (new Epetra_FECrsMatrix(View, *column_space_map,
187  *column_space_map, 0)),
188  last_action (Zero),
189  compressed (true)
190  {
191  matrix->FillComplete();
192  }
193 
194 
195 
196  SparseMatrix::SparseMatrix (const Epetra_Map &input_map,
197  const size_type n_max_entries_per_row)
198  :
199  column_space_map (new Epetra_Map (input_map)),
200  matrix (new Epetra_FECrsMatrix(Copy, *column_space_map,
201  TrilinosWrappers::types::int_type(n_max_entries_per_row), false)),
202  last_action (Zero),
203  compressed (false)
204  {}
205 
206 
207 
208  SparseMatrix::SparseMatrix (const Epetra_Map &input_map,
209  const std::vector<unsigned int> &n_entries_per_row)
210  :
211  column_space_map (new Epetra_Map (input_map)),
212  matrix (new Epetra_FECrsMatrix
213  (Copy, *column_space_map,
214  (int *)const_cast<unsigned int *>(&(n_entries_per_row[0])),
215  false)),
216  last_action (Zero),
217  compressed (false)
218  {}
219 
220 
221 
222  SparseMatrix::SparseMatrix (const Epetra_Map &input_row_map,
223  const Epetra_Map &input_col_map,
224  const size_type n_max_entries_per_row)
225  :
226  column_space_map (new Epetra_Map (input_col_map)),
227  matrix (new Epetra_FECrsMatrix(Copy, input_row_map,
228  TrilinosWrappers::types::int_type(n_max_entries_per_row), false)),
229  last_action (Zero),
230  compressed (false)
231  {}
232 
233 
234 
235  SparseMatrix::SparseMatrix (const Epetra_Map &input_row_map,
236  const Epetra_Map &input_col_map,
237  const std::vector<unsigned int> &n_entries_per_row)
238  :
239  column_space_map (new Epetra_Map (input_col_map)),
240  matrix (new Epetra_FECrsMatrix(Copy, input_row_map,
241  (int *)const_cast<unsigned int *>(&(n_entries_per_row[0])),
242  false)),
243  last_action (Zero),
244  compressed (false)
245  {}
246 
247 
248 
250  const size_type n,
251  const unsigned int n_max_entries_per_row)
252  :
253  column_space_map (new Epetra_Map (static_cast<TrilinosWrappers::types::int_type>(n), 0,
254  Utilities::Trilinos::comm_self())),
255 
256  // on one processor only, we know how the
257  // columns of the matrix will be
258  // distributed (everything on one
259  // processor), so we can hand in this
260  // information to the constructor. we
261  // can't do so in parallel, where the
262  // information from columns is only
263  // available when entries have been added
264  matrix (new Epetra_FECrsMatrix(Copy,
265  Epetra_Map (static_cast<TrilinosWrappers::types::int_type>(m), 0,
266  Utilities::Trilinos::comm_self()),
267  *column_space_map,
268  n_max_entries_per_row,
269  false)),
270  last_action (Zero),
271  compressed (false)
272  {}
273 
274 
275 
277  const size_type n,
278  const std::vector<unsigned int> &n_entries_per_row)
279  :
280  column_space_map (new Epetra_Map (static_cast<TrilinosWrappers::types::int_type>(n), 0,
281  Utilities::Trilinos::comm_self())),
282  matrix (new Epetra_FECrsMatrix(Copy,
283  Epetra_Map (static_cast<TrilinosWrappers::types::int_type>(m), 0,
284  Utilities::Trilinos::comm_self()),
285  *column_space_map,
286  (int *)const_cast<unsigned int *>(&(n_entries_per_row[0])),
287  false)),
288  last_action (Zero),
289  compressed (false)
290  {}
291 
292 
293 
294  SparseMatrix::SparseMatrix (const IndexSet &parallel_partitioning,
295  const MPI_Comm &communicator,
296  const unsigned int n_max_entries_per_row)
297  :
298  column_space_map (new Epetra_Map(parallel_partitioning.
299  make_trilinos_map(communicator, false))),
300  matrix (new Epetra_FECrsMatrix(Copy,
301  *column_space_map,
302  n_max_entries_per_row,
303  false)),
304  last_action (Zero),
305  compressed (false)
306  {}
307 
308 
309 
310  SparseMatrix::SparseMatrix (const IndexSet &parallel_partitioning,
311  const MPI_Comm &communicator,
312  const std::vector<unsigned int> &n_entries_per_row)
313  :
314  column_space_map (new Epetra_Map(parallel_partitioning.
315  make_trilinos_map(communicator, false))),
316  matrix (new Epetra_FECrsMatrix(Copy,
317  *column_space_map,
318  (int *)const_cast<unsigned int *>(&(n_entries_per_row[0])),
319  false)),
320  last_action (Zero),
321  compressed (false)
322  {}
323 
324 
325 
326  SparseMatrix::SparseMatrix (const IndexSet &row_parallel_partitioning,
327  const IndexSet &col_parallel_partitioning,
328  const MPI_Comm &communicator,
329  const size_type n_max_entries_per_row)
330  :
331  column_space_map (new Epetra_Map(col_parallel_partitioning.
332  make_trilinos_map(communicator, false))),
333  matrix (new Epetra_FECrsMatrix(Copy,
334  row_parallel_partitioning.
335  make_trilinos_map(communicator, false),
336  n_max_entries_per_row,
337  false)),
338  last_action (Zero),
339  compressed (false)
340  {}
341 
342 
343 
344  SparseMatrix::SparseMatrix (const IndexSet &row_parallel_partitioning,
345  const IndexSet &col_parallel_partitioning,
346  const MPI_Comm &communicator,
347  const std::vector<unsigned int> &n_entries_per_row)
348  :
349  column_space_map (new Epetra_Map(col_parallel_partitioning.
350  make_trilinos_map(communicator, false))),
351  matrix (new Epetra_FECrsMatrix(Copy,
352  row_parallel_partitioning.
353  make_trilinos_map(communicator, false),
354  (int *)const_cast<unsigned int *>(&(n_entries_per_row[0])),
355  false)),
356  last_action (Zero),
357  compressed (false)
358  {}
359 
360 
361 
362  SparseMatrix::SparseMatrix (const SparsityPattern &sparsity_pattern)
363  :
364  column_space_map (new Epetra_Map (sparsity_pattern.domain_partitioner())),
365  matrix (new Epetra_FECrsMatrix(Copy,
366  sparsity_pattern.trilinos_sparsity_pattern(),
367  false)),
368  last_action (Zero),
369  compressed (true)
370  {
371  Assert(sparsity_pattern.trilinos_sparsity_pattern().Filled() == true,
372  ExcMessage("The Trilinos sparsity pattern has not been compressed."));
374  }
375 
376 
377 
379  {}
380 
381 
382 
383  void
385  {
386  if (this == &rhs)
387  return;
388 
389  nonlocal_matrix.reset();
390  nonlocal_matrix_exporter.reset();
391 
392  // check whether we need to update the whole matrix layout (we have
393  // different maps or if we detect a row where the columns of the two
394  // matrices do not match)
395  bool needs_deep_copy =
396  !matrix->RowMap().SameAs(rhs.matrix->RowMap()) ||
397  !matrix->ColMap().SameAs(rhs.matrix->ColMap()) ||
398  !matrix->DomainMap().SameAs(rhs.matrix->DomainMap()) ||
400  if (!needs_deep_copy)
401  {
402  const std::pair<size_type, size_type>
403  local_range = rhs.local_range();
404 
405  // Try to copy all the rows of the matrix one by one. In case of error
406  // (i.e., the column indices are different), we need to abort and blow
407  // away the matrix.
408  for (size_type row=local_range.first; row < local_range.second; ++row)
409  {
410  const int row_local =
411  matrix->RowMap().LID(static_cast<TrilinosWrappers::types::int_type>(row));
412 
413  int n_entries, rhs_n_entries;
414  TrilinosScalar *value_ptr, *rhs_value_ptr;
415  int *index_ptr, *rhs_index_ptr;
416  int ierr = rhs.matrix->ExtractMyRowView (row_local, rhs_n_entries,
417  rhs_value_ptr, rhs_index_ptr);
418  (void)ierr;
419  Assert (ierr == 0, ExcTrilinosError(ierr));
420 
421  ierr = matrix->ExtractMyRowView (row_local, n_entries, value_ptr,
422  index_ptr);
423  Assert (ierr == 0, ExcTrilinosError(ierr));
424 
425  if (n_entries != rhs_n_entries ||
426  std::memcmp(static_cast<void *>(index_ptr),
427  static_cast<void *>(rhs_index_ptr),
428  sizeof(int)*n_entries) != 0)
429  {
430  needs_deep_copy = true;
431  break;
432  }
433 
434  for (int i=0; i<n_entries; ++i)
435  value_ptr[i] = rhs_value_ptr[i];
436  }
437  }
438 
439  if (needs_deep_copy)
440  {
441  column_space_map.reset (new Epetra_Map (rhs.domain_partitioner()));
442 
443  // release memory before reallocation
444  matrix.reset ();
445  matrix.reset (new Epetra_FECrsMatrix(*rhs.matrix));
446 
447  matrix->FillComplete(*column_space_map, matrix->RowMap());
448  }
449 
450  if (rhs.nonlocal_matrix.get() != 0)
451  nonlocal_matrix.reset(new Epetra_CrsMatrix(Copy, rhs.nonlocal_matrix->Graph()));
452  }
453 
454 
455 
456  namespace
457  {
458  typedef SparseMatrix::size_type size_type;
459 
460  template <typename SparsityPatternType>
461  void
462  reinit_matrix (const Epetra_Map &input_row_map,
463  const Epetra_Map &input_col_map,
464  const SparsityPatternType &sparsity_pattern,
465  const bool exchange_data,
466  std_cxx11::shared_ptr<Epetra_Map> &column_space_map,
467  std_cxx11::shared_ptr<Epetra_FECrsMatrix> &matrix,
468  std_cxx11::shared_ptr<Epetra_CrsMatrix> &nonlocal_matrix,
469  std_cxx11::shared_ptr<Epetra_Export> &nonlocal_matrix_exporter)
470  {
471  // release memory before reallocation
472  matrix.reset();
473  nonlocal_matrix.reset();
474  nonlocal_matrix_exporter.reset();
475 
476  if (input_row_map.Comm().MyPID() == 0)
477  {
478  AssertDimension (sparsity_pattern.n_rows(),
479  static_cast<size_type>(n_global_elements(input_row_map)));
480  AssertDimension (sparsity_pattern.n_cols(),
481  static_cast<size_type>(n_global_elements(input_col_map)));
482  }
483 
484  column_space_map.reset (new Epetra_Map (input_col_map));
485 
486  // if we want to exchange data, build a usual Trilinos sparsity pattern
487  // and let that handle the exchange. otherwise, manually create a
488  // CrsGraph, which consumes considerably less memory because it can set
489  // correct number of indices right from the start
490  if (exchange_data)
491  {
492  SparsityPattern trilinos_sparsity;
493  trilinos_sparsity.reinit (input_row_map, input_col_map,
494  sparsity_pattern, exchange_data);
495  matrix.reset (new Epetra_FECrsMatrix
496  (Copy, trilinos_sparsity.trilinos_sparsity_pattern(), false));
497 
498  return;
499  }
500 
501  const size_type first_row = min_my_gid(input_row_map),
502  last_row = max_my_gid(input_row_map)+1;
503  std::vector<int> n_entries_per_row(last_row-first_row);
504 
505  for (size_type row=first_row; row<last_row; ++row)
506  n_entries_per_row[row-first_row] = sparsity_pattern.row_length(row);
507 
508  // The deal.II notation of a Sparsity pattern corresponds to the Epetra
509  // concept of a Graph. Hence, we generate a graph by copying the
510  // sparsity pattern into it, and then build up the matrix from the
511  // graph. This is considerable faster than directly filling elements
512  // into the matrix. Moreover, it consumes less memory, since the
513  // internal reordering is done on ints only, and we can leave the
514  // doubles aside.
515 
516  // for more than one processor, need to specify only row map first and
517  // let the matrix entries decide about the column map (which says which
518  // columns are present in the matrix, not to be confused with the
519  // col_map that tells how the domain dofs of the matrix will be
520  // distributed). for only one processor, we can directly assign the
521  // columns as well. Compare this with bug # 4123 in the Sandia Bugzilla.
522  std_cxx11::shared_ptr<Epetra_CrsGraph> graph;
523  if (input_row_map.Comm().NumProc() > 1)
524  graph.reset (new Epetra_CrsGraph (Copy, input_row_map,
525  &n_entries_per_row[0], true));
526  else
527  graph.reset (new Epetra_CrsGraph (Copy, input_row_map, input_col_map,
528  &n_entries_per_row[0], true));
529 
530  // This functions assumes that the sparsity pattern sits on all
531  // processors (completely). The parallel version uses an Epetra graph
532  // that is already distributed.
533 
534  // now insert the indices
535  std::vector<TrilinosWrappers::types::int_type> row_indices;
536 
537  for (size_type row=first_row; row<last_row; ++row)
538  {
539  const int row_length = sparsity_pattern.row_length(row);
540  if (row_length == 0)
541  continue;
542 
543  row_indices.resize (row_length, -1);
544  {
545  typename SparsityPatternType::iterator p = sparsity_pattern.begin(row);
546  for (size_type col=0; p != sparsity_pattern.end(row); ++p, ++col)
547  row_indices[col] = p->column();
548  }
549  graph->Epetra_CrsGraph::InsertGlobalIndices (row, row_length,
550  &row_indices[0]);
551  }
552 
553  // Eventually, optimize the graph structure (sort indices, make memory
554  // contiguous, etc). note that the documentation of the function indeed
555  // states that we first need to provide the column (domain) map and then
556  // the row (range) map
557  graph->FillComplete(input_col_map, input_row_map);
558  graph->OptimizeStorage();
559 
560  // check whether we got the number of columns right.
561  AssertDimension (sparsity_pattern.n_cols(),
562  static_cast<size_type>(n_global_cols(*graph)));
563  (void)n_global_cols;
564 
565  // And now finally generate the matrix.
566  matrix.reset (new Epetra_FECrsMatrix(Copy, *graph, false));
567  }
568 
569 
570 
571  // for the non-local graph, we need to circumvent the problem that some
572  // processors will not add into the non-local graph at all: We do not want
573  // to insert dummy elements on >5000 processors because that gets very
574  // slow. Thus, we set a flag in Epetra_CrsGraph that sets the correct
575  // flag. Since it is protected, we need to expose this information by
576  // deriving a class from Epetra_CrsGraph for the purpose of creating the
577  // data structure
578  class Epetra_CrsGraphMod : public Epetra_CrsGraph
579  {
580  public:
581  Epetra_CrsGraphMod (const Epetra_Map &row_map,
582  const int *n_entries_per_row)
583  :
584  Epetra_CrsGraph(Copy, row_map, n_entries_per_row, true)
585  {};
586 
587  void SetIndicesAreGlobal()
588  {
589  this->Epetra_CrsGraph::SetIndicesAreGlobal(true);
590  }
591  };
592 
593 
594  // specialization for DynamicSparsityPattern which can provide us with
595  // more information about the non-locally owned rows
596  template <>
597  void
598  reinit_matrix (const Epetra_Map &input_row_map,
599  const Epetra_Map &input_col_map,
600  const DynamicSparsityPattern &sparsity_pattern,
601  const bool exchange_data,
602  std_cxx11::shared_ptr<Epetra_Map> &column_space_map,
603  std_cxx11::shared_ptr<Epetra_FECrsMatrix> &matrix,
604  std_cxx11::shared_ptr<Epetra_CrsMatrix> &nonlocal_matrix,
605  std_cxx11::shared_ptr<Epetra_Export> &nonlocal_matrix_exporter)
606  {
607  matrix.reset();
608  nonlocal_matrix.reset();
609  nonlocal_matrix_exporter.reset();
610 
611  AssertDimension (sparsity_pattern.n_rows(),
612  static_cast<size_type>(n_global_elements(input_row_map)));
613  AssertDimension (sparsity_pattern.n_cols(),
614  static_cast<size_type>(n_global_elements(input_col_map)));
615 
616  column_space_map.reset (new Epetra_Map (input_col_map));
617 
618  IndexSet relevant_rows (sparsity_pattern.row_index_set());
619  // serial case
620  if (relevant_rows.size() == 0)
621  {
622  relevant_rows.set_size(n_global_elements(input_row_map));
623  relevant_rows.add_range(0, n_global_elements(input_row_map));
624  }
625  relevant_rows.compress();
626  Assert(relevant_rows.n_elements() >= static_cast<unsigned int>(input_row_map.NumMyElements()),
627  ExcMessage("Locally relevant rows of sparsity pattern must contain "
628  "all locally owned rows"));
629 
630  // check whether the relevant rows correspond to exactly the same map as
631  // the owned rows. In that case, do not create the nonlocal graph and
632  // fill the columns by demand
633  bool have_ghost_rows = false;
634  {
635  std::vector<::types::global_dof_index> indices;
636  relevant_rows.fill_index_vector(indices);
637  Epetra_Map relevant_map (TrilinosWrappers::types::int_type(-1),
638  TrilinosWrappers::types::int_type(relevant_rows.n_elements()),
639  (indices.empty() ? 0 :
640  reinterpret_cast<TrilinosWrappers::types::int_type *>(&indices[0])),
641  0, input_row_map.Comm());
642  if (relevant_map.SameAs(input_row_map))
643  have_ghost_rows = false;
644  else
645  have_ghost_rows = true;
646  }
647 
648  const unsigned int n_rows = relevant_rows.n_elements();
649  std::vector<TrilinosWrappers::types::int_type> ghost_rows;
650  std::vector<int> n_entries_per_row(input_row_map.NumMyElements());
651  std::vector<int> n_entries_per_ghost_row;
652  for (unsigned int i=0, own=0; i<n_rows; ++i)
653  {
654  const TrilinosWrappers::types::int_type global_row =
655  relevant_rows.nth_index_in_set(i);
656  if (input_row_map.MyGID(global_row))
657  n_entries_per_row[own++] = sparsity_pattern.row_length(global_row);
658  else if (sparsity_pattern.row_length(global_row) > 0)
659  {
660  ghost_rows.push_back(global_row);
661  n_entries_per_ghost_row.push_back(sparsity_pattern.row_length(global_row));
662  }
663  }
664 
665  Epetra_Map off_processor_map(-1, ghost_rows.size(),
666  (ghost_rows.size()>0)?(&ghost_rows[0]):NULL,
667  0, input_row_map.Comm());
668 
669  std_cxx11::shared_ptr<Epetra_CrsGraph> graph;
670  std_cxx11::shared_ptr<Epetra_CrsGraphMod> nonlocal_graph;
671  if (input_row_map.Comm().NumProc() > 1)
672  {
673  graph.reset (new Epetra_CrsGraph (Copy, input_row_map,
674  (n_entries_per_row.size()>0)?(&n_entries_per_row[0]):NULL,
675  exchange_data ? false : true));
676  if (have_ghost_rows == true)
677  nonlocal_graph.reset (new Epetra_CrsGraphMod (off_processor_map,
678  &n_entries_per_ghost_row[0]));
679  }
680  else
681  graph.reset (new Epetra_CrsGraph (Copy, input_row_map, input_col_map,
682  (n_entries_per_row.size()>0)?(&n_entries_per_row[0]):NULL,
683  true));
684 
685  // now insert the indices, select between the right matrix
686  std::vector<TrilinosWrappers::types::int_type> row_indices;
687 
688  for (unsigned int i=0; i<n_rows; ++i)
689  {
690  const TrilinosWrappers::types::int_type global_row =
691  relevant_rows.nth_index_in_set(i);
692  const int row_length = sparsity_pattern.row_length(global_row);
693  if (row_length == 0)
694  continue;
695 
696  row_indices.resize (row_length, -1);
697  for (int col=0; col < row_length; ++col)
698  row_indices[col] = sparsity_pattern.column_number(global_row, col);
699 
700  if (input_row_map.MyGID(global_row))
701  graph->InsertGlobalIndices (global_row, row_length, &row_indices[0]);
702  else
703  {
704  Assert(nonlocal_graph.get() != 0, ExcInternalError());
705  nonlocal_graph->InsertGlobalIndices (global_row, row_length,
706  &row_indices[0]);
707  }
708  }
709 
710  // finalize nonlocal graph and create nonlocal matrix
711  if (nonlocal_graph.get() != 0)
712  {
713  // must make sure the IndicesAreGlobal flag is set on all processors
714  // because some processors might not call InsertGlobalIndices (and
715  // we do not want to insert dummy indices on all processors for
716  // large-scale simulations due to the bad impact on performance)
717  nonlocal_graph->SetIndicesAreGlobal();
718  Assert(nonlocal_graph->IndicesAreGlobal() == true,
719  ExcInternalError());
720  nonlocal_graph->FillComplete(input_col_map, input_row_map);
721  nonlocal_graph->OptimizeStorage();
722 
723  // insert data from nonlocal graph into the final sparsity pattern
724  if (exchange_data)
725  {
726  Epetra_Export exporter(nonlocal_graph->RowMap(), input_row_map);
727  int ierr = graph->Export(*nonlocal_graph, exporter, Add);
728  (void)ierr;
729  Assert (ierr==0, ExcTrilinosError(ierr));
730  }
731 
732  nonlocal_matrix.reset (new Epetra_CrsMatrix(Copy, *nonlocal_graph));
733  }
734 
735  graph->FillComplete(input_col_map, input_row_map);
736  graph->OptimizeStorage();
737 
738  AssertDimension (sparsity_pattern.n_cols(),static_cast<size_type>(
739  n_global_cols(*graph)));
740 
741  matrix.reset (new Epetra_FECrsMatrix(Copy, *graph, false));
742  }
743  }
744 
745 
746 
747  template <typename SparsityPatternType>
748  void
749  SparseMatrix::reinit (const SparsityPatternType &sparsity_pattern)
750  {
751  const Epetra_Map rows (static_cast<TrilinosWrappers::types::int_type>(sparsity_pattern.n_rows()),
752  0,
754  const Epetra_Map columns (static_cast<TrilinosWrappers::types::int_type>(sparsity_pattern.n_cols()),
755  0,
757 
758  reinit_matrix (rows, columns, sparsity_pattern, false,
761  }
762 
763 
764 
765  template <typename SparsityPatternType>
766  void
767  SparseMatrix::reinit (const Epetra_Map &input_map,
768  const SparsityPatternType &sparsity_pattern,
769  const bool exchange_data)
770  {
771  reinit_matrix (input_map, input_map, sparsity_pattern, exchange_data,
774  }
775 
776 
777 
778 
779 
780 
781  template <typename SparsityPatternType>
782  inline
783  void SparseMatrix::reinit (const IndexSet &row_parallel_partitioning,
784  const IndexSet &col_parallel_partitioning,
785  const SparsityPatternType &sparsity_pattern,
786  const MPI_Comm &communicator,
787  const bool exchange_data)
788  {
789  Epetra_Map row_map =
790  row_parallel_partitioning.make_trilinos_map (communicator, false);
791  Epetra_Map col_map =
792  col_parallel_partitioning.make_trilinos_map (communicator, false);
793  reinit_matrix (row_map, col_map, sparsity_pattern, exchange_data,
796 
797  // In the end, the matrix needs to be compressed in order to be really
798  // ready.
799  last_action = Zero;
801  }
802 
803 
804 
805  template <typename SparsityPatternType>
806  inline
807  void SparseMatrix::reinit (const Epetra_Map &row_map,
808  const Epetra_Map &col_map,
809  const SparsityPatternType &sparsity_pattern,
810  const bool exchange_data)
811  {
812  reinit_matrix (row_map, col_map, sparsity_pattern, exchange_data,
815 
816  // In the end, the matrix needs to be compressed in order to be really
817  // ready.
818  last_action = Zero;
820  }
821 
822 
823 
824 
825  void
826  SparseMatrix::reinit (const SparsityPattern &sparsity_pattern)
827  {
828  matrix.reset ();
829  nonlocal_matrix_exporter.reset();
830 
831  // reinit with a (parallel) Trilinos sparsity pattern.
832  column_space_map.reset (new Epetra_Map
833  (sparsity_pattern.domain_partitioner()));
834  matrix.reset (new Epetra_FECrsMatrix
835  (Copy, sparsity_pattern.trilinos_sparsity_pattern(), false));
836 
837  if (sparsity_pattern.nonlocal_graph.get() != 0)
838  nonlocal_matrix.reset (new Epetra_CrsMatrix(Copy, *sparsity_pattern.nonlocal_graph));
839  else
840  nonlocal_matrix.reset ();
841 
842  last_action = Zero;
844  }
845 
846 
847 
848  void
849  SparseMatrix::reinit (const SparseMatrix &sparse_matrix)
850  {
851  if (this == &sparse_matrix)
852  return;
853 
854  column_space_map.reset (new Epetra_Map (sparse_matrix.domain_partitioner()));
855  matrix.reset ();
856  nonlocal_matrix_exporter.reset();
857  matrix.reset (new Epetra_FECrsMatrix
858  (Copy, sparse_matrix.trilinos_sparsity_pattern(), false));
859 
860  if (sparse_matrix.nonlocal_matrix != 0)
861  nonlocal_matrix.reset (new Epetra_CrsMatrix
862  (Copy, sparse_matrix.nonlocal_matrix->Graph()));
863  else
864  nonlocal_matrix.reset();
865 
866  last_action = Zero;
868  }
869 
870 
871 
872  template <typename number>
873  inline
874  void SparseMatrix::reinit (const IndexSet &row_parallel_partitioning,
875  const IndexSet &col_parallel_partitioning,
876  const ::SparseMatrix<number> &dealii_sparse_matrix,
877  const MPI_Comm &communicator,
878  const double drop_tolerance,
879  const bool copy_values,
880  const ::SparsityPattern *use_this_sparsity)
881  {
882  if (copy_values == false)
883  {
884  // in case we do not copy values, just
885  // call the other function.
886  if (use_this_sparsity == 0)
887  reinit (row_parallel_partitioning, col_parallel_partitioning,
888  dealii_sparse_matrix.get_sparsity_pattern(),
889  communicator, false);
890  else
891  reinit (row_parallel_partitioning, col_parallel_partitioning,
892  *use_this_sparsity, communicator, false);
893  return;
894  }
895 
896  const size_type n_rows = dealii_sparse_matrix.m();
897 
898  AssertDimension (row_parallel_partitioning.size(), n_rows);
899  AssertDimension (col_parallel_partitioning.size(), dealii_sparse_matrix.n());
900 
901  const ::SparsityPattern &sparsity_pattern =
902  (use_this_sparsity!=0)? *use_this_sparsity :
903  dealii_sparse_matrix.get_sparsity_pattern();
904 
905  if (matrix.get() == 0 ||
906  m() != n_rows ||
907  n_nonzero_elements() != sparsity_pattern.n_nonzero_elements())
908  {
909  reinit (row_parallel_partitioning, col_parallel_partitioning,
910  sparsity_pattern, communicator, false);
911  }
912 
913  // fill the values. the same as above: go through all rows of the
914  // matrix, and then all columns. since the sparsity patterns of the
915  // input matrix and the specified sparsity pattern might be different,
916  // need to go through the row for both these sparsity structures
917  // simultaneously in order to really set the correct values.
918  size_type maximum_row_length = matrix->MaxNumEntries();
919  std::vector<size_type> row_indices (maximum_row_length);
920  std::vector<TrilinosScalar> values (maximum_row_length);
921 
922  for (size_type row=0; row<n_rows; ++row)
923  // see if the row is locally stored on this processor
924  if (row_parallel_partitioning.is_element(row) == true)
925  {
926  ::SparsityPattern::iterator select_index =
927  sparsity_pattern.begin(row);
928  typename ::SparseMatrix<number>::const_iterator it =
929  dealii_sparse_matrix.begin(row);
930  size_type col = 0;
931  if (sparsity_pattern.n_rows() == sparsity_pattern.n_cols())
932  {
933  // optimized diagonal
934  AssertDimension(it->column(), row);
935  if (std::fabs(it->value()) > drop_tolerance)
936  {
937  values[col] = it->value();
938  row_indices[col++] = it->column();
939  }
940  ++select_index;
941  ++it;
942  }
943 
944  while (it != dealii_sparse_matrix.end(row) &&
945  select_index != sparsity_pattern.end(row))
946  {
947  while (select_index->column() < it->column() &&
948  select_index != sparsity_pattern.end(row))
949  ++select_index;
950  while (it->column() < select_index->column() &&
951  it != dealii_sparse_matrix.end(row))
952  ++it;
953 
954  if (it == dealii_sparse_matrix.end(row))
955  break;
956  if (std::fabs(it->value()) > drop_tolerance)
957  {
958  values[col] = it->value();
959  row_indices[col++] = it->column();
960  }
961  ++select_index;
962  ++it;
963  }
964  set (row, col, reinterpret_cast<size_type *>(&row_indices[0]),
965  &values[0], false);
966  }
968  }
969 
970 
971 
972  template <typename number>
973  void
974  SparseMatrix::reinit (const ::SparseMatrix<number> &dealii_sparse_matrix,
975  const double drop_tolerance,
976  const bool copy_values,
977  const ::SparsityPattern *use_this_sparsity)
978  {
979  reinit (complete_index_set(dealii_sparse_matrix.m()),
980  complete_index_set(dealii_sparse_matrix.n()),
981  dealii_sparse_matrix, MPI_COMM_SELF, drop_tolerance,
982  copy_values, use_this_sparsity);
983  }
984 
985 
986 
987  template <typename number>
988  void
989  SparseMatrix::reinit (const Epetra_Map &input_map,
990  const ::SparseMatrix<number> &dealii_sparse_matrix,
991  const double drop_tolerance,
992  const bool copy_values,
993  const ::SparsityPattern *use_this_sparsity)
994  {
995  reinit (IndexSet(input_map), IndexSet(input_map), dealii_sparse_matrix,
996  MPI_COMM_SELF, drop_tolerance, copy_values, use_this_sparsity);
997  }
998 
999 
1000 
1001  template <typename number>
1002  void
1003  SparseMatrix::reinit (const Epetra_Map &input_row_map,
1004  const Epetra_Map &input_col_map,
1005  const ::SparseMatrix<number> &dealii_sparse_matrix,
1006  const double drop_tolerance,
1007  const bool copy_values,
1008  const ::SparsityPattern *use_this_sparsity)
1009  {
1010  reinit (IndexSet(input_row_map), IndexSet(input_col_map),
1011  dealii_sparse_matrix, MPI_COMM_SELF,
1012  drop_tolerance, copy_values, use_this_sparsity);
1013  }
1014 
1015 
1016 
1017  void
1018  SparseMatrix::reinit (const Epetra_CrsMatrix &input_matrix,
1019  const bool copy_values)
1020  {
1021  Assert (input_matrix.Filled()==true,
1022  ExcMessage("Input CrsMatrix has not called FillComplete()!"));
1023 
1024  column_space_map.reset (new Epetra_Map (input_matrix.DomainMap()));
1025 
1026  const Epetra_CrsGraph *graph = &input_matrix.Graph();
1027 
1028  nonlocal_matrix.reset();
1029  nonlocal_matrix_exporter.reset();
1030  matrix.reset ();
1031  matrix.reset (new Epetra_FECrsMatrix(Copy, *graph, false));
1032 
1033  matrix->FillComplete (*column_space_map, input_matrix.RangeMap(), true);
1034 
1035  if (copy_values == true)
1036  {
1037  // point to the first data entry in the two
1038  // matrices and copy the content
1039  const TrilinosScalar *in_values = input_matrix[0];
1040  TrilinosScalar *values = (*matrix)[0];
1041  const size_type my_nonzeros = input_matrix.NumMyNonzeros();
1042  std::memcpy (&values[0], &in_values[0],
1043  my_nonzeros*sizeof (TrilinosScalar));
1044  }
1045 
1046  last_action = Zero;
1048  }
1049 
1050 
1051 
1052  void
1054  {
1055 
1056  Epetra_CombineMode mode = last_action;
1057  if (last_action == Zero)
1058  {
1059  if ((operation==::VectorOperation::add) ||
1060  (operation==::VectorOperation::unknown))
1061  mode = Add;
1062  else if (operation==::VectorOperation::insert)
1063  mode = Insert;
1064  }
1065  else
1066  {
1067  Assert(
1068  ((last_action == Add) && (operation!=::VectorOperation::insert))
1069  ||
1070  ((last_action == Insert) && (operation!=::VectorOperation::add)),
1071  ExcMessage("Operation and argument to compress() do not match"));
1072  }
1073 
1074  // flush buffers
1075  int ierr;
1076  if (nonlocal_matrix.get() != 0 && mode == Add)
1077  {
1078  // do only export in case of an add() operation, otherwise the owning
1079  // processor must have set the correct entry
1080  nonlocal_matrix->FillComplete(*column_space_map, matrix->RowMap());
1081  if (nonlocal_matrix_exporter.get() == 0)
1083  (new Epetra_Export(nonlocal_matrix->RowMap(), matrix->RowMap()));
1084  ierr = matrix->Export(*nonlocal_matrix, *nonlocal_matrix_exporter, mode);
1085  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1086  ierr = matrix->FillComplete(*column_space_map, matrix->RowMap());
1087  nonlocal_matrix->PutScalar(0);
1088  }
1089  else
1090  ierr = matrix->GlobalAssemble (*column_space_map, matrix->RowMap(),
1091  true, mode);
1092 
1093  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
1094 
1095  ierr = matrix->OptimizeStorage ();
1096  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
1097 
1098  last_action = Zero;
1099 
1100  compressed = true;
1101  }
1102 
1103 
1104 
1105  void
1107  {
1108  // When we clear the matrix, reset
1109  // the pointer and generate an
1110  // empty matrix.
1111  column_space_map.reset (new Epetra_Map (0, 0,
1113  matrix.reset (new Epetra_FECrsMatrix(View, *column_space_map, 0));
1114  nonlocal_matrix.reset();
1115  nonlocal_matrix_exporter.reset();
1116 
1117  matrix->FillComplete();
1118 
1119  compressed = true;
1120  }
1121 
1122 
1123 
1124  void
1126  const TrilinosScalar new_diag_value)
1127  {
1128  Assert (matrix->Filled()==true, ExcMatrixNotCompressed());
1129 
1130  // Only do this on the rows owned
1131  // locally on this processor.
1132  int local_row =
1133  matrix->LRID(static_cast<TrilinosWrappers::types::int_type>(row));
1134  if (local_row >= 0)
1135  {
1136  TrilinosScalar *values;
1137  int *col_indices;
1138  int num_entries;
1139  const int ierr = matrix->ExtractMyRowView(local_row, num_entries,
1140  values, col_indices);
1141  (void)ierr;
1142 
1143  Assert (ierr == 0,
1144  ExcTrilinosError(ierr));
1145 
1146  int *diag_find = std::find(col_indices,col_indices+num_entries,local_row);
1147  int diag_index = (int)(diag_find - col_indices);
1148 
1149  for (TrilinosWrappers::types::int_type j=0; j<num_entries; ++j)
1150  if (diag_index != j || new_diag_value == 0)
1151  values[j] = 0.;
1152 
1153  if (diag_find && std::fabs(values[diag_index]) == 0.0 &&
1154  new_diag_value != 0.0)
1155  values[diag_index] = new_diag_value;
1156  }
1157  }
1158 
1159 
1160 
1161  void
1162  SparseMatrix::clear_rows (const std::vector<size_type> &rows,
1163  const TrilinosScalar new_diag_value)
1164  {
1165  for (size_type row=0; row<rows.size(); ++row)
1166  clear_row(rows[row], new_diag_value);
1167  }
1168 
1169 
1170 
1171  TrilinosScalar
1173  const size_type j) const
1174  {
1175  // Extract local indices in
1176  // the matrix.
1177  int trilinos_i = matrix->LRID(static_cast<TrilinosWrappers::types::int_type>(i)),
1178  trilinos_j = matrix->LCID(static_cast<TrilinosWrappers::types::int_type>(j));
1179  TrilinosScalar value = 0.;
1180 
1181  // If the data is not on the
1182  // present processor, we throw
1183  // an exception. This is one of
1184  // the two tiny differences to
1185  // the el(i,j) call, which does
1186  // not throw any assertions.
1187  if (trilinos_i == -1)
1188  {
1189  Assert (false, ExcAccessToNonLocalElement(i, j, local_range().first,
1190  local_range().second));
1191  }
1192  else
1193  {
1194  // Check whether the matrix has
1195  // already been transformed to local
1196  // indices.
1197  Assert (matrix->Filled(), ExcMatrixNotCompressed());
1198 
1199  // Prepare pointers for extraction
1200  // of a view of the row.
1201  int nnz_present = matrix->NumMyEntries(trilinos_i);
1202  int nnz_extracted;
1203  int *col_indices;
1204  TrilinosScalar *values;
1205 
1206  // Generate the view and make
1207  // sure that we have not generated
1208  // an error.
1209  // TODO Check that col_indices are int and not long long
1210  int ierr = matrix->ExtractMyRowView(trilinos_i, nnz_extracted,
1211  values, col_indices);
1212  (void)ierr;
1213  Assert (ierr==0, ExcTrilinosError(ierr));
1214 
1215  Assert (nnz_present == nnz_extracted,
1216  ExcDimensionMismatch(nnz_present, nnz_extracted));
1217 
1218  // Search the index where we
1219  // look for the value, and then
1220  // finally get it.
1221 
1222  int *el_find = std::find(col_indices, col_indices + nnz_present, trilinos_j);
1223 
1224  int local_col_index = (int)(el_find - col_indices);
1225 
1226  // This is actually the only
1227  // difference to the el(i,j)
1228  // function, which means that
1229  // we throw an exception in
1230  // this case instead of just
1231  // returning zero for an
1232  // element that is not present
1233  // in the sparsity pattern.
1234  if (local_col_index == nnz_present)
1235  {
1236  Assert (false, ExcInvalidIndex (i,j));
1237  }
1238  else
1239  value = values[local_col_index];
1240  }
1241 
1242  return value;
1243  }
1244 
1245 
1246 
1247  TrilinosScalar
1249  const size_type j) const
1250  {
1251  // Extract local indices in
1252  // the matrix.
1253  int trilinos_i = matrix->LRID(static_cast<TrilinosWrappers::types::int_type>(i)),
1254  trilinos_j = matrix->LCID(static_cast<TrilinosWrappers::types::int_type>(j));
1255  TrilinosScalar value = 0.;
1256 
1257  // If the data is not on the
1258  // present processor, we can't
1259  // continue. Just print out zero
1260  // as discussed in the
1261  // documentation of this
1262  // function. if you want error
1263  // checking, use operator().
1264  if ((trilinos_i == -1 ) || (trilinos_j == -1))
1265  return 0.;
1266  else
1267  {
1268  // Check whether the matrix
1269  // already is transformed to
1270  // local indices.
1271  Assert (matrix->Filled(), ExcMatrixNotCompressed());
1272 
1273  // Prepare pointers for extraction
1274  // of a view of the row.
1275  int nnz_present = matrix->NumMyEntries(trilinos_i);
1276  int nnz_extracted;
1277  int *col_indices;
1278  TrilinosScalar *values;
1279 
1280  // Generate the view and make
1281  // sure that we have not generated
1282  // an error.
1283  int ierr = matrix->ExtractMyRowView(trilinos_i, nnz_extracted,
1284  values, col_indices);
1285  (void)ierr;
1286  Assert (ierr==0, ExcTrilinosError(ierr));
1287 
1288  Assert (nnz_present == nnz_extracted,
1289  ExcDimensionMismatch(nnz_present, nnz_extracted));
1290 
1291  // Search the index where we
1292  // look for the value, and then
1293  // finally get it.
1294  int *el_find = std::find(col_indices, col_indices + nnz_present, trilinos_j);
1295 
1296  int local_col_index = (int)(el_find - col_indices);
1297 
1298 
1299  // This is actually the only
1300  // difference to the () function
1301  // querying (i,j), where we throw an
1302  // exception instead of just
1303  // returning zero for an element
1304  // that is not present in the
1305  // sparsity pattern.
1306  if (local_col_index == nnz_present)
1307  value = 0;
1308  else
1309  value = values[local_col_index];
1310  }
1311 
1312  return value;
1313  }
1314 
1315 
1316 
1317  TrilinosScalar
1319  {
1320  Assert (m() == n(), ExcNotQuadratic());
1321 
1322 #ifdef DEBUG
1323  // use operator() in debug mode because
1324  // it checks if this is a valid element
1325  // (in parallel)
1326  return operator()(i,i);
1327 #else
1328  // Trilinos doesn't seem to have a
1329  // more efficient way to access the
1330  // diagonal than by just using the
1331  // standard el(i,j) function.
1332  return el(i,i);
1333 #endif
1334  }
1335 
1336 
1337 
1338  unsigned int
1340  {
1341  Assert (row < m(), ExcInternalError());
1342 
1343  // get a representation of the
1344  // present row
1345  int ncols = -1;
1346  int local_row = matrix->LRID(static_cast<TrilinosWrappers::types::int_type>(row));
1347 
1348  // on the processor who owns this
1349  // row, we'll have a non-negative
1350  // value.
1351  if (local_row >= 0)
1352  {
1353  int ierr = matrix->NumMyRowEntries (local_row, ncols);
1354  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
1355  }
1356 
1357  return ncols;
1358  }
1359 
1360 
1361 
1362  void
1363  SparseMatrix::set (const std::vector<size_type> &row_indices,
1364  const std::vector<size_type> &col_indices,
1365  const FullMatrix<TrilinosScalar> &values,
1366  const bool elide_zero_values)
1367  {
1368  Assert (row_indices.size() == values.m(),
1369  ExcDimensionMismatch(row_indices.size(), values.m()));
1370  Assert (col_indices.size() == values.n(),
1371  ExcDimensionMismatch(col_indices.size(), values.n()));
1372 
1373  for (size_type i=0; i<row_indices.size(); ++i)
1374  set (row_indices[i], col_indices.size(), &col_indices[0], &values(i,0),
1375  elide_zero_values);
1376  }
1377 
1378 
1379 
1380  void
1382  const std::vector<size_type> &col_indices,
1383  const std::vector<TrilinosScalar> &values,
1384  const bool elide_zero_values)
1385  {
1386  Assert (col_indices.size() == values.size(),
1387  ExcDimensionMismatch(col_indices.size(), values.size()));
1388 
1389  set (row, col_indices.size(), &col_indices[0], &values[0],
1390  elide_zero_values);
1391  }
1392 
1393 
1394 
1395  void
1397  const size_type n_cols,
1398  const size_type *col_indices,
1399  const TrilinosScalar *values,
1400  const bool elide_zero_values)
1401  {
1402  AssertIndexRange(row, this->m());
1403 
1404  int ierr;
1405  if (last_action == Add)
1406  {
1407  ierr = matrix->GlobalAssemble (*column_space_map, matrix->RowMap(),
1408  true);
1409 
1410  Assert (ierr == 0, ExcTrilinosError(ierr));
1411  }
1412 
1413  last_action = Insert;
1414 
1415  TrilinosWrappers::types::int_type *col_index_ptr;
1416  TrilinosScalar *col_value_ptr;
1417  TrilinosWrappers::types::int_type n_columns;
1418 
1419  TrilinosScalar short_val_array[100];
1420  TrilinosWrappers::types::int_type short_index_array[100];
1421  std::vector<TrilinosScalar> long_val_array;
1422  std::vector<TrilinosWrappers::types::int_type> long_index_array;
1423 
1424 
1425  // If we don't elide zeros, the pointers are already available... need to
1426  // cast to non-const pointers as that is the format taken by Trilinos (but
1427  // we will not modify const data)
1428  if (elide_zero_values == false)
1429  {
1430  col_index_ptr = (TrilinosWrappers::types::int_type *)col_indices;
1431  col_value_ptr = const_cast<TrilinosScalar *>(values);
1432  n_columns = n_cols;
1433  }
1434  else
1435  {
1436  // Otherwise, extract nonzero values in each row and get the
1437  // respective indices.
1438  if (n_cols > 100)
1439  {
1440  long_val_array.resize(n_cols);
1441  long_index_array.resize(n_cols);
1442  col_index_ptr = &long_index_array[0];
1443  col_value_ptr = &long_val_array[0];
1444  }
1445  else
1446  {
1447  col_index_ptr = &short_index_array[0];
1448  col_value_ptr = &short_val_array[0];
1449  }
1450 
1451  n_columns = 0;
1452  for (size_type j=0; j<n_cols; ++j)
1453  {
1454  const double value = values[j];
1455  AssertIsFinite(value);
1456  if (value != 0)
1457  {
1458  col_index_ptr[n_columns] = col_indices[j];
1459  col_value_ptr[n_columns] = value;
1460  n_columns++;
1461  }
1462  }
1463 
1464  Assert(n_columns <= (TrilinosWrappers::types::int_type)n_cols, ExcInternalError());
1465  }
1466 
1467 
1468  // If the calling matrix owns the row to which we want to insert values,
1469  // we can directly call the Epetra_CrsMatrix input function, which is much
1470  // faster than the Epetra_FECrsMatrix function. We distinguish between two
1471  // cases: the first one is when the matrix is not filled (i.e., it is
1472  // possible to add new elements to the sparsity pattern), and the second
1473  // one is when the pattern is already fixed. In the former case, we add
1474  // the possibility to insert new values, and in the second we just replace
1475  // data.
1476  if (matrix->RowMap().MyGID(static_cast<TrilinosWrappers::types::int_type>(row)) == true)
1477  {
1478  if (matrix->Filled() == false)
1479  {
1480  ierr = matrix->Epetra_CrsMatrix::InsertGlobalValues(
1481  static_cast<TrilinosWrappers::types::int_type>(row),
1482  static_cast<int>(n_columns),const_cast<double *>(col_value_ptr),
1483  col_index_ptr);
1484 
1485  // When inserting elements, we do not want to create exceptions in
1486  // the case when inserting non-local data (since that's what we
1487  // want to do right now).
1488  if (ierr > 0)
1489  ierr = 0;
1490  }
1491  else
1492  ierr = matrix->Epetra_CrsMatrix::ReplaceGlobalValues(row, n_columns,
1493  col_value_ptr,
1494  col_index_ptr);
1495  }
1496  else
1497  {
1498  // When we're at off-processor data, we have to stick with the
1499  // standard Insert/ReplaceGlobalValues function. Nevertheless, the way
1500  // we call it is the fastest one (any other will lead to repeated
1501  // allocation and deallocation of memory in order to call the function
1502  // we already use, which is very inefficient if writing one element at
1503  // a time).
1504  compressed = false;
1505 
1506  if (matrix->Filled() == false)
1507  {
1508  ierr = matrix->InsertGlobalValues (1,
1509  (TrilinosWrappers::types::int_type *)&row,
1510  n_columns, col_index_ptr,
1511  &col_value_ptr,
1512  Epetra_FECrsMatrix::ROW_MAJOR);
1513  if (ierr > 0)
1514  ierr = 0;
1515  }
1516  else
1517  ierr = matrix->ReplaceGlobalValues (1,
1518  (TrilinosWrappers::types::int_type *)&row,
1519  n_columns, col_index_ptr,
1520  &col_value_ptr,
1521  Epetra_FECrsMatrix::ROW_MAJOR);
1522  // use the FECrsMatrix facilities for set even in the case when we
1523  // have explicitly set the off-processor rows because that only works
1524  // properly when adding elements, not when setting them (since we want
1525  // to only touch elements that have been set explicitly, and there is
1526  // no way on the receiving processor to identify them otherwise)
1527  }
1528 
1529  Assert (ierr <= 0, ExcAccessToNonPresentElement(row, col_index_ptr[0]));
1530  AssertThrow (ierr >= 0, ExcTrilinosError(ierr));
1531  }
1532 
1533 
1534 
1535  void
1536  SparseMatrix::add (const std::vector<size_type> &indices,
1537  const FullMatrix<TrilinosScalar> &values,
1538  const bool elide_zero_values)
1539  {
1540  Assert (indices.size() == values.m(),
1541  ExcDimensionMismatch(indices.size(), values.m()));
1542  Assert (values.m() == values.n(), ExcNotQuadratic());
1543 
1544  for (size_type i=0; i<indices.size(); ++i)
1545  add (indices[i], indices.size(), &indices[0], &values(i,0),
1546  elide_zero_values);
1547  }
1548 
1549 
1550 
1551  void
1552  SparseMatrix::add (const std::vector<size_type> &row_indices,
1553  const std::vector<size_type> &col_indices,
1554  const FullMatrix<TrilinosScalar> &values,
1555  const bool elide_zero_values)
1556  {
1557  Assert (row_indices.size() == values.m(),
1558  ExcDimensionMismatch(row_indices.size(), values.m()));
1559  Assert (col_indices.size() == values.n(),
1560  ExcDimensionMismatch(col_indices.size(), values.n()));
1561 
1562  for (size_type i=0; i<row_indices.size(); ++i)
1563  add (row_indices[i], col_indices.size(), &col_indices[0],
1564  &values(i,0), elide_zero_values);
1565  }
1566 
1567 
1568 
1569  void
1571  const std::vector<size_type> &col_indices,
1572  const std::vector<TrilinosScalar> &values,
1573  const bool elide_zero_values)
1574  {
1575  Assert (col_indices.size() == values.size(),
1576  ExcDimensionMismatch(col_indices.size(), values.size()));
1577 
1578  add (row, col_indices.size(), &col_indices[0], &values[0],
1579  elide_zero_values);
1580  }
1581 
1582 
1583 
1584  void
1586  const size_type n_cols,
1587  const size_type *col_indices,
1588  const TrilinosScalar *values,
1589  const bool elide_zero_values,
1590  const bool /*col_indices_are_sorted*/)
1591  {
1592  AssertIndexRange(row, this->m());
1593  int ierr;
1594  if (last_action == Insert)
1595  {
1596  // TODO: this could lead to a dead lock when only one processor
1597  // calls GlobalAssemble.
1598  ierr = matrix->GlobalAssemble(*column_space_map,
1599  matrix->RowMap(), false);
1600 
1601  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
1602  }
1603 
1604  last_action = Add;
1605 
1606  TrilinosWrappers::types::int_type *col_index_ptr;
1607  TrilinosScalar *col_value_ptr;
1608  TrilinosWrappers::types::int_type n_columns;
1609 
1610  double short_val_array[100];
1611  TrilinosWrappers::types::int_type short_index_array[100];
1612  std::vector<TrilinosScalar> long_val_array;
1613  std::vector<TrilinosWrappers::types::int_type> long_index_array;
1614 
1615  // If we don't elide zeros, the pointers are already available... need to
1616  // cast to non-const pointers as that is the format taken by Trilinos (but
1617  // we will not modify const data)
1618  if (elide_zero_values == false)
1619  {
1620  col_index_ptr = (TrilinosWrappers::types::int_type *)col_indices;
1621  col_value_ptr = const_cast<TrilinosScalar *>(values);
1622  n_columns = n_cols;
1623 #ifdef DEBUG
1624  for (size_type j=0; j<n_cols; ++j)
1625  AssertIsFinite(values[j]);
1626 #endif
1627  }
1628  else
1629  {
1630  // Otherwise, extract nonzero values in each row and the corresponding
1631  // index.
1632  if (n_cols > 100)
1633  {
1634  long_val_array.resize(n_cols);
1635  long_index_array.resize(n_cols);
1636  col_index_ptr = &long_index_array[0];
1637  col_value_ptr = &long_val_array[0];
1638  }
1639  else
1640  {
1641  col_index_ptr = &short_index_array[0];
1642  col_value_ptr = &short_val_array[0];
1643  }
1644 
1645  n_columns = 0;
1646  for (size_type j=0; j<n_cols; ++j)
1647  {
1648  const double value = values[j];
1649 
1650  AssertIsFinite(value);
1651  if (value != 0)
1652  {
1653  col_index_ptr[n_columns] = col_indices[j];
1654  col_value_ptr[n_columns] = value;
1655  n_columns++;
1656  }
1657  }
1658 
1659  Assert(n_columns <= (TrilinosWrappers::types::int_type)n_cols, ExcInternalError());
1660 
1661  }
1662  // Exit early if there is nothing to do
1663  if (n_columns == 0)
1664  {
1665  return;
1666  }
1667 
1668  // If the calling processor owns the row to which we want to add values, we
1669  // can directly call the Epetra_CrsMatrix input function, which is much
1670  // faster than the Epetra_FECrsMatrix function.
1671  if (matrix->RowMap().MyGID(static_cast<TrilinosWrappers::types::int_type>(row)) == true)
1672  {
1673  ierr = matrix->Epetra_CrsMatrix::SumIntoGlobalValues(row, n_columns,
1674  col_value_ptr,
1675  col_index_ptr);
1676  }
1677  else if (nonlocal_matrix.get() != 0)
1678  {
1679  compressed = false;
1680  // this is the case when we have explicitly set the off-processor rows
1681  // and want to create a separate matrix object for them (to retain
1682  // thread-safety)
1683  Assert (nonlocal_matrix->RowMap().LID(static_cast<TrilinosWrappers::types::int_type>(row)) != -1,
1684  ExcMessage("Attempted to write into off-processor matrix row "
1685  "that has not be specified as being writable upon "
1686  "initialization"));
1687  ierr = nonlocal_matrix->SumIntoGlobalValues(row, n_columns,
1688  col_value_ptr,
1689  col_index_ptr);
1690  }
1691  else
1692  {
1693  // When we're at off-processor data, we have to stick with the
1694  // standard SumIntoGlobalValues function. Nevertheless, the way we
1695  // call it is the fastest one (any other will lead to repeated
1696  // allocation and deallocation of memory in order to call the function
1697  // we already use, which is very inefficient if writing one element at
1698  // a time).
1699  compressed = false;
1700 
1701  ierr = matrix->SumIntoGlobalValues (1,
1702  (TrilinosWrappers::types::int_type *)&row, n_columns,
1703  col_index_ptr,
1704  &col_value_ptr,
1705  Epetra_FECrsMatrix::ROW_MAJOR);
1706  }
1707 
1708 #ifdef DEBUG
1709  if (ierr > 0)
1710  {
1711  std::cout << "------------------------------------------"
1712  << std::endl;
1713  std::cout << "Got error " << ierr << " in row " << row
1714  << " of proc " << matrix->RowMap().Comm().MyPID()
1715  << " when trying to add the columns:" << std::endl;
1716  for (TrilinosWrappers::types::int_type i=0; i<n_columns; ++i)
1717  std::cout << col_index_ptr[i] << " ";
1718  std::cout << std::endl << std::endl;
1719  std::cout << "Matrix row "
1720  << (matrix->RowMap().MyGID(static_cast<TrilinosWrappers::types::int_type>(row)) == false ? "(nonlocal part)" : "")
1721  << " has the following indices:" << std::endl;
1722  std::vector<TrilinosWrappers::types::int_type> indices;
1723  const Epetra_CrsGraph *graph =
1724  (nonlocal_matrix.get() != 0 &&
1725  matrix->RowMap().MyGID(static_cast<TrilinosWrappers::types::int_type>(row)) == false) ?
1726  &nonlocal_matrix->Graph() : &matrix->Graph();
1727 
1728  indices.resize(graph->NumGlobalIndices(static_cast<TrilinosWrappers::types::int_type>(row)));
1729  int n_indices = 0;
1730  graph->ExtractGlobalRowCopy(static_cast<TrilinosWrappers::types::int_type>(row),
1731  indices.size(), n_indices, &indices[0]);
1732  AssertDimension(static_cast<unsigned int>(n_indices), indices.size());
1733 
1734  for (TrilinosWrappers::types::int_type i=0; i<n_indices; ++i)
1735  std::cout << indices[i] << " ";
1736  std::cout << std::endl << std::endl;
1737  Assert (ierr <= 0,
1738  ExcAccessToNonPresentElement(row, col_index_ptr[0]));
1739  }
1740 #endif
1741  Assert (ierr >= 0, ExcTrilinosError(ierr));
1742  }
1743 
1744 
1745 
1746  SparseMatrix &
1748  {
1750  compress (::VectorOperation::unknown); // TODO: why do we do this? Should we not check for is_compressed?
1751 
1752  const int ierr = matrix->PutScalar(d);
1753  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
1754  if (nonlocal_matrix.get() != 0)
1755  nonlocal_matrix->PutScalar(d);
1756 
1757  return *this;
1758  }
1759 
1760 
1761 
1762  void
1763  SparseMatrix::add (const TrilinosScalar factor,
1764  const SparseMatrix &rhs)
1765  {
1766  AssertDimension (rhs.m(), m());
1767  AssertDimension (rhs.n(), n());
1768  AssertDimension (rhs.local_range().first, local_range().first);
1769  AssertDimension (rhs.local_range().second, local_range().second);
1770  Assert(matrix->RowMap().SameAs(rhs.matrix->RowMap()),
1771  ExcMessage("Can only add matrices with same distribution of rows"));
1772  Assert(matrix->Filled() && rhs.matrix->Filled(),
1773  ExcMessage("Addition of matrices only allowed if matrices are "
1774  "filled, i.e., compress() has been called"));
1775 
1776  const std::pair<size_type, size_type>
1777  local_range = rhs.local_range();
1778  const bool same_col_map = matrix->ColMap().SameAs(rhs.matrix->ColMap());
1779 
1780  for (size_type row=local_range.first; row < local_range.second; ++row)
1781  {
1782  const int row_local =
1783  matrix->RowMap().LID(static_cast<TrilinosWrappers::types::int_type>(row));
1784 
1785  // First get a view to the matrix columns of both matrices. Note that
1786  // the data is in local index spaces so we need to be careful not only
1787  // to compare column indices in case they are derived from the same
1788  // map.
1789  int n_entries, rhs_n_entries;
1790  TrilinosScalar *value_ptr, *rhs_value_ptr;
1791  int *index_ptr, *rhs_index_ptr;
1792  int ierr = rhs.matrix->ExtractMyRowView (row_local, rhs_n_entries,
1793  rhs_value_ptr, rhs_index_ptr);
1794  (void)ierr;
1795  Assert (ierr == 0, ExcTrilinosError(ierr));
1796 
1797  ierr = matrix->ExtractMyRowView (row_local, n_entries, value_ptr,
1798  index_ptr);
1799  Assert (ierr == 0, ExcTrilinosError(ierr));
1800  bool expensive_checks = (n_entries != rhs_n_entries || !same_col_map);
1801  if (!expensive_checks)
1802  {
1803  // check if the column indices are the same. If yes, can simply
1804  // copy over the data.
1805  expensive_checks = std::memcmp(static_cast<void *>(index_ptr),
1806  static_cast<void *>(rhs_index_ptr),
1807  sizeof(int)*n_entries) != 0;
1808  if (!expensive_checks)
1809  for (int i=0; i<n_entries; ++i)
1810  value_ptr[i] += rhs_value_ptr[i] * factor;
1811  }
1812  // Now to the expensive case where we need to check all column indices
1813  // against each other (transformed into global index space) and where
1814  // we need to make sure that all entries we are about to add into the
1815  // lhs matrix actually exist
1816  if (expensive_checks)
1817  {
1818  for (int i=0; i<rhs_n_entries; ++i)
1819  {
1820  if (rhs_value_ptr[i] == 0.)
1821  continue;
1822  const TrilinosWrappers::types::int_type rhs_global_col =
1823  global_column_index(*rhs.matrix, rhs_index_ptr[i]);
1824  int local_col = matrix->ColMap().LID(rhs_global_col);
1825  int *local_index = Utilities::lower_bound(index_ptr,
1826  index_ptr+n_entries,
1827  local_col);
1828  Assert(local_index != index_ptr + n_entries &&
1829  *local_index == local_col,
1830  ExcMessage("Adding the entries from the other matrix "
1831  "failed, because the sparsity pattern "
1832  "of that matrix includes more elements than the "
1833  "calling matrix, which is not allowed."));
1834  value_ptr[local_index-index_ptr] += factor * rhs_value_ptr[i];
1835  }
1836  }
1837  }
1838  }
1839 
1840 
1841 
1842  void
1844  {
1845  // This only flips a flag that tells
1846  // Trilinos that any vmult operation
1847  // should be done with the
1848  // transpose. However, the matrix
1849  // structure is not reset.
1850  int ierr;
1851 
1852  if (!matrix->UseTranspose())
1853  {
1854  ierr = matrix->SetUseTranspose (true);
1855  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
1856  }
1857  else
1858  {
1859  ierr = matrix->SetUseTranspose (false);
1860  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
1861  }
1862  }
1863 
1864 
1865 
1866  SparseMatrix &
1867  SparseMatrix::operator *= (const TrilinosScalar a)
1868  {
1869  const int ierr = matrix->Scale (a);
1870  Assert (ierr == 0, ExcTrilinosError(ierr));
1871  (void)ierr; // removes -Wunused-variable in optimized mode
1872 
1873  return *this;
1874  }
1875 
1876 
1877 
1878  SparseMatrix &
1879  SparseMatrix::operator /= (const TrilinosScalar a)
1880  {
1881  Assert (a !=0, ExcDivideByZero());
1882 
1883  const TrilinosScalar factor = 1./a;
1884 
1885  const int ierr = matrix->Scale (factor);
1886  Assert (ierr == 0, ExcTrilinosError(ierr));
1887  (void)ierr; // removes -Wunused-variable in optimized mode
1888 
1889  return *this;
1890  }
1891 
1892 
1893 
1894  TrilinosScalar
1896  {
1897  Assert (matrix->Filled(), ExcMatrixNotCompressed());
1898  return matrix->NormOne();
1899  }
1900 
1901 
1902 
1903  TrilinosScalar
1905  {
1906  Assert (matrix->Filled(), ExcMatrixNotCompressed());
1907  return matrix->NormInf();
1908  }
1909 
1910 
1911 
1912  TrilinosScalar
1914  {
1915  Assert (matrix->Filled(), ExcMatrixNotCompressed());
1916  return matrix->NormFrobenius();
1917  }
1918 
1919 
1920 
1921  namespace internal
1922  {
1923  namespace SparseMatrix
1924  {
1925  template <typename VectorType>
1926  inline
1927  void check_vector_map_equality(const Epetra_CrsMatrix &,
1928  const VectorType &,
1929  const VectorType &)
1930  {
1931  }
1932 
1933  inline
1934  void check_vector_map_equality(const Epetra_CrsMatrix &m,
1936  const TrilinosWrappers::MPI::Vector &out)
1937  {
1938  Assert (in.vector_partitioner().SameAs(m.DomainMap()) == true,
1939  ExcMessage ("Column map of matrix does not fit with vector map!"));
1940  Assert (out.vector_partitioner().SameAs(m.RangeMap()) == true,
1941  ExcMessage ("Row map of matrix does not fit with vector map!"));
1942  (void)m;
1943  (void)in;
1944  (void)out;
1945  }
1946  }
1947  }
1948 
1949 
1950  template <typename VectorType>
1951  void
1952  SparseMatrix::vmult (VectorType &dst,
1953  const VectorType &src) const
1954  {
1955  Assert (&src != &dst, ExcSourceEqualsDestination());
1956  Assert (matrix->Filled(), ExcMatrixNotCompressed());
1957  (void)src;
1958  (void)dst;
1959 
1960  internal::SparseMatrix::check_vector_map_equality(*matrix, src, dst);
1961  const size_type dst_local_size = dst.end() - dst.begin();
1962  AssertDimension (dst_local_size, static_cast<size_type>(matrix->RangeMap().NumMyPoints()));
1963  const size_type src_local_size = src.end() - src.begin();
1964  AssertDimension (src_local_size, static_cast<size_type>(matrix->DomainMap().NumMyPoints()));
1965 
1966  Epetra_MultiVector tril_dst (View, matrix->RangeMap(), dst.begin(),
1967  dst_local_size, 1);
1968  Epetra_MultiVector tril_src (View, matrix->DomainMap(),
1969  const_cast<TrilinosScalar *>(src.begin()),
1970  src_local_size, 1);
1971 
1972  const int ierr = matrix->Multiply (false, tril_src, tril_dst);
1973  Assert (ierr == 0, ExcTrilinosError(ierr));
1974  (void)ierr; // removes -Wunused-variable in optimized mode
1975  }
1976 
1977 
1978 
1979  template <typename VectorType>
1980  void
1981  SparseMatrix::Tvmult (VectorType &dst,
1982  const VectorType &src) const
1983  {
1984  Assert (&src != &dst, ExcSourceEqualsDestination());
1985  Assert (matrix->Filled(), ExcMatrixNotCompressed());
1986 
1987  internal::SparseMatrix::check_vector_map_equality(*matrix, dst, src);
1988  const size_type dst_local_size = dst.end() - dst.begin();
1989  AssertDimension (dst_local_size, static_cast<size_type>(matrix->DomainMap().NumMyPoints()));
1990  const size_type src_local_size = src.end() - src.begin();
1991  AssertDimension (src_local_size, static_cast<size_type>(matrix->RangeMap().NumMyPoints()));
1992 
1993  Epetra_MultiVector tril_dst (View, matrix->DomainMap(), dst.begin(),
1994  dst_local_size, 1);
1995  Epetra_MultiVector tril_src (View, matrix->RangeMap(),
1996  const_cast<double *>(src.begin()),
1997  src_local_size, 1);
1998 
1999  const int ierr = matrix->Multiply (true, tril_src, tril_dst);
2000  Assert (ierr == 0, ExcTrilinosError(ierr));
2001  (void)ierr; // removes -Wunused-variable in optimized mode
2002  }
2003 
2004 
2005 
2006  template <typename VectorType>
2007  void
2008  SparseMatrix::vmult_add (VectorType &dst,
2009  const VectorType &src) const
2010  {
2011  Assert (&src != &dst, ExcSourceEqualsDestination());
2012 
2013  // Reinit a temporary vector with fast argument set, which does not
2014  // overwrite the content (to save time). However, the
2015  // TrilinosWrappers::Vector classes do not support this, so create a
2016  // deal.II local vector that has this fast setting. It will be accepted in
2017  // vmult because it only checks the local size.
2018  ::Vector<TrilinosScalar> temp_vector;
2019  temp_vector.reinit(dst.end()-dst.begin(), true);
2020  ::VectorView<TrilinosScalar> src_view(src.end()-src.begin(), src.begin());
2021  ::VectorView<TrilinosScalar> dst_view(dst.end()-dst.begin(), dst.begin());
2022  vmult (temp_vector, static_cast<const ::Vector<TrilinosScalar>&>(src_view));
2023  if (dst_view.size() > 0)
2024  dst_view += temp_vector;
2025  }
2026 
2027 
2028 
2029  template <typename VectorType>
2030  void
2031  SparseMatrix::Tvmult_add (VectorType &dst,
2032  const VectorType &src) const
2033  {
2034  Assert (&src != &dst, ExcSourceEqualsDestination());
2035 
2036  // Reinit a temporary vector with fast argument set, which does not
2037  // overwrite the content (to save time). However, the
2038  // TrilinosWrappers::Vector classes do not support this, so create a
2039  // deal.II local vector that has this fast setting. It will be accepted in
2040  // vmult because it only checks the local size.
2041  ::Vector<TrilinosScalar> temp_vector;
2042  temp_vector.reinit(dst.end()-dst.begin(), true);
2043  ::VectorView<TrilinosScalar> src_view(src.end()-src.begin(), src.begin());
2044  ::VectorView<TrilinosScalar> dst_view(dst.end()-dst.begin(), dst.begin());
2045  Tvmult (temp_vector, static_cast<const ::Vector<TrilinosScalar>&>(src_view));
2046  if (dst_view.size() > 0)
2047  dst_view += temp_vector;
2048  }
2049 
2050 
2051 
2052  TrilinosScalar
2054  {
2055  Assert (matrix->RowMap().SameAs(matrix->DomainMap()),
2056  ExcNotQuadratic());
2057 
2058  VectorBase temp_vector;
2059  temp_vector.reinit(v, true);
2060 
2061  vmult (temp_vector, v);
2062  return temp_vector*v;
2063  }
2064 
2065 
2066 
2067  TrilinosScalar
2069  const VectorBase &v) const
2070  {
2071  Assert (matrix->RowMap().SameAs(matrix->DomainMap()),
2072  ExcNotQuadratic());
2073 
2074  VectorBase temp_vector;
2075  temp_vector.reinit(v, true);
2076 
2077  vmult (temp_vector, v);
2078  return u*temp_vector;
2079  }
2080 
2081 
2082 
2083  TrilinosScalar
2085  const VectorBase &x,
2086  const VectorBase &b) const
2087  {
2088  vmult (dst, x);
2089  dst -= b;
2090  dst *= -1.;
2091 
2092  return dst.l2_norm();
2093  }
2094 
2095 
2096 
2097  namespace internals
2098  {
2099  typedef ::types::global_dof_index size_type;
2100 
2101  void perform_mmult (const SparseMatrix &inputleft,
2102  const SparseMatrix &inputright,
2103  SparseMatrix &result,
2104  const VectorBase &V,
2105  const bool transpose_left)
2106  {
2107 #ifdef DEAL_II_WITH_64BIT_INDICES
2108  Assert(false,ExcNotImplemented())
2109 #endif
2110  const bool use_vector = (V.size() == inputright.m() ? true : false);
2111  if (transpose_left == false)
2112  {
2113  Assert (inputleft.n() == inputright.m(),
2114  ExcDimensionMismatch(inputleft.n(), inputright.m()));
2115  Assert (inputleft.domain_partitioner().SameAs(inputright.range_partitioner()),
2116  ExcMessage ("Parallel partitioning of A and B does not fit."));
2117  }
2118  else
2119  {
2120  Assert (inputleft.m() == inputright.m(),
2121  ExcDimensionMismatch(inputleft.m(), inputright.m()));
2122  Assert (inputleft.range_partitioner().SameAs(inputright.range_partitioner()),
2123  ExcMessage ("Parallel partitioning of A and B does not fit."));
2124  }
2125 
2126  result.clear();
2127 
2128  // create a suitable operator B: in case
2129  // we do not use a vector, all we need to
2130  // do is to set the pointer. Otherwise,
2131  // we insert the data from B, but
2132  // multiply each row with the respective
2133  // vector element.
2134  Teuchos::RCP<Epetra_CrsMatrix> mod_B;
2135  if (use_vector == false)
2136  {
2137  mod_B = Teuchos::rcp(const_cast<Epetra_CrsMatrix *>
2138  (&inputright.trilinos_matrix()),
2139  false);
2140  }
2141  else
2142  {
2143  mod_B = Teuchos::rcp(new Epetra_CrsMatrix
2144  (Copy, inputright.trilinos_sparsity_pattern()),
2145  true);
2146  mod_B->FillComplete(inputright.domain_partitioner(),
2147  inputright.range_partitioner());
2148  Assert (inputright.local_range() == V.local_range(),
2149  ExcMessage ("Parallel distribution of matrix B and vector V "
2150  "does not match."));
2151 
2152  const int local_N = inputright.local_size();
2153  for (int i=0; i<local_N; ++i)
2154  {
2155  int N_entries = -1;
2156  double *new_data, *B_data;
2157  mod_B->ExtractMyRowView (i, N_entries, new_data);
2158  inputright.trilinos_matrix().ExtractMyRowView (i, N_entries, B_data);
2159  double value = V.trilinos_vector()[0][i];
2160  for (TrilinosWrappers::types::int_type j=0; j<N_entries; ++j)
2161  new_data[j] = value * B_data[j];
2162  }
2163  }
2164 
2165  // use ML built-in method for performing
2166  // the matrix-matrix product.
2167  // create ML operators on top of the
2168  // Epetra matrices. if we use a
2169  // transposed matrix, let ML know it
2170  ML_Comm *comm;
2171  ML_Comm_Create(&comm);
2172 #ifdef ML_MPI
2173  const Epetra_MpiComm *epcomm = dynamic_cast<const Epetra_MpiComm *>(&(inputleft.trilinos_matrix().Comm()));
2174  // Get the MPI communicator, as it may not be MPI_COMM_W0RLD, and update the ML comm object
2175  if (epcomm) ML_Comm_Set_UsrComm(comm,epcomm->Comm());
2176 #endif
2177  ML_Operator *A_ = ML_Operator_Create(comm);
2178  ML_Operator *B_ = ML_Operator_Create(comm);
2179  ML_Operator *C_ = ML_Operator_Create(comm);
2180  SparseMatrix transposed_mat;
2181 
2182  if (transpose_left == false)
2183  ML_Operator_WrapEpetraCrsMatrix
2184  (const_cast<Epetra_CrsMatrix *>(&inputleft.trilinos_matrix()),A_,
2185  false);
2186  else
2187  {
2188  // create transposed matrix
2189  SparsityPattern sparsity_transposed (inputleft.domain_partitioner(),
2190  inputleft.range_partitioner());
2191  Assert (inputleft.domain_partitioner().LinearMap() == true,
2192  ExcMessage("Matrix must be partitioned contiguously between procs."));
2193  for (unsigned int i=0; i<inputleft.local_size(); ++i)
2194  {
2195  int num_entries, * indices;
2196  inputleft.trilinos_sparsity_pattern().ExtractMyRowView(i, num_entries,
2197  indices);
2198  Assert (num_entries >= 0, ExcInternalError());
2199 #ifndef DEAL_II_WITH_64BIT_INDICES
2200  const size_type GID = inputleft.trilinos_matrix().RowMap().GID(i);
2201  for (TrilinosWrappers::types::int_type j=0; j<num_entries; ++j)
2202  sparsity_transposed.add (inputleft.trilinos_matrix().ColMap().GID(indices[j]),
2203  GID);
2204 #else
2205  const size_type GID = inputleft.trilinos_matrix().RowMap().GID64(i);
2206  for (TrilinosWrappers::types::int_type j=0; j<num_entries; ++j)
2207  sparsity_transposed.add (inputleft.trilinos_matrix().ColMap().GID64(indices[j]),
2208  GID);
2209 #endif
2210  }
2211 
2212  sparsity_transposed.compress();
2213  transposed_mat.reinit (sparsity_transposed);
2214  for (unsigned int i=0; i<inputleft.local_size(); ++i)
2215  {
2216  int num_entries, * indices;
2217  double *values;
2218  inputleft.trilinos_matrix().ExtractMyRowView(i, num_entries,
2219  values, indices);
2220  Assert (num_entries >= 0, ExcInternalError());
2221 #ifndef DEAL_II_WITH_64BIT_INDICES
2222  const size_type GID = inputleft.trilinos_matrix().RowMap().GID(i);
2223  for (TrilinosWrappers::types::int_type j=0; j<num_entries; ++j)
2224  transposed_mat.set (inputleft.trilinos_matrix().ColMap().GID(indices[j]),
2225  GID, values[j]);
2226 #else
2227  const size_type GID = inputleft.trilinos_matrix().RowMap().GID64(i);
2228  for (TrilinosWrappers::types::int_type j=0; j<num_entries; ++j)
2229  transposed_mat.set (inputleft.trilinos_matrix().ColMap().GID64(indices[j]),
2230  GID, values[j]);
2231 #endif
2232  }
2233  transposed_mat.compress(VectorOperation::insert);
2234  ML_Operator_WrapEpetraCrsMatrix
2235  (const_cast<Epetra_CrsMatrix *>(&transposed_mat.trilinos_matrix()),
2236  A_,false);
2237  }
2238  ML_Operator_WrapEpetraCrsMatrix(mod_B.get(),B_,false);
2239 
2240  // We implement the multiplication by
2241  // hand in a similar way as is done in
2242  // ml/src/Operator/ml_rap.c for a triple
2243  // matrix product. This means that the
2244  // code is very similar to the one found
2245  // in ml/src/Operator/ml_rap.c
2246 
2247  // import data if necessary
2248  ML_Operator *Btmp, *Ctmp, *Ctmp2, *tptr;
2249  ML_CommInfoOP *getrow_comm;
2250  int max_per_proc;
2251  TrilinosWrappers::types::int_type N_input_vector = B_->invec_leng;
2252  getrow_comm = B_->getrow->pre_comm;
2253  if ( getrow_comm != NULL)
2254  for (TrilinosWrappers::types::int_type i = 0; i < getrow_comm->N_neighbors; i++)
2255  for (TrilinosWrappers::types::int_type j = 0; j < getrow_comm->neighbors[i].N_send; j++)
2256  AssertThrow (getrow_comm->neighbors[i].send_list[j] < N_input_vector,
2257  ExcInternalError());
2258 
2259  ML_create_unique_col_id(N_input_vector, &(B_->getrow->loc_glob_map),
2260  getrow_comm, &max_per_proc, B_->comm);
2261  B_->getrow->use_loc_glob_map = ML_YES;
2262  if (A_->getrow->pre_comm != NULL)
2263  ML_exchange_rows( B_, &Btmp, A_->getrow->pre_comm);
2264  else Btmp = B_;
2265 
2266  // perform matrix-matrix product
2267  ML_matmat_mult(A_, Btmp , &Ctmp);
2268 
2269  // release temporary structures we needed
2270  // for multiplication
2271  ML_free(B_->getrow->loc_glob_map);
2272  B_->getrow->loc_glob_map = NULL;
2273  B_->getrow->use_loc_glob_map = ML_NO;
2274  if (A_->getrow->pre_comm != NULL)
2275  {
2276  tptr = Btmp;
2277  while ( (tptr!= NULL) && (tptr->sub_matrix != B_))
2278  tptr = tptr->sub_matrix;
2279  if (tptr != NULL) tptr->sub_matrix = NULL;
2280  ML_RECUR_CSR_MSRdata_Destroy(Btmp);
2281  ML_Operator_Destroy(&Btmp);
2282  }
2283 
2284  // make correct data structures
2285  if (A_->getrow->post_comm != NULL)
2286  ML_exchange_rows(Ctmp, &Ctmp2, A_->getrow->post_comm);
2287  else
2288  Ctmp2 = Ctmp;
2289 
2290  ML_back_to_csrlocal(Ctmp2, C_, max_per_proc);
2291 
2292  ML_RECUR_CSR_MSRdata_Destroy (Ctmp);
2293  ML_Operator_Destroy (&Ctmp);
2294 
2295  if (A_->getrow->post_comm != NULL)
2296  {
2297  ML_RECUR_CSR_MSRdata_Destroy(Ctmp2);
2298  ML_Operator_Destroy (&Ctmp2);
2299  }
2300 
2301  // create an Epetra matrix from the ML
2302  // matrix that we got as a result.
2303  Epetra_CrsMatrix *C_mat;
2304  ML_Operator2EpetraCrsMatrix(C_, C_mat);
2305  C_mat->FillComplete(mod_B->DomainMap(),
2306  transpose_left ?
2307  inputleft.trilinos_matrix().DomainMap() :
2308  inputleft.trilinos_matrix().RangeMap());
2309  C_mat->OptimizeStorage();
2310  result.reinit (*C_mat);
2311 
2312  // destroy allocated memory
2313  delete C_mat;
2314  ML_Operator_Destroy (&A_);
2315  ML_Operator_Destroy (&B_);
2316  ML_Operator_Destroy (&C_);
2317  ML_Comm_Destroy (&comm);
2318  }
2319  }
2320 
2321 
2322  void
2324  const SparseMatrix &B,
2325  const VectorBase &V) const
2326  {
2327 #ifdef DEAL_II_WITH_64BIT_INDICES
2328  Assert(false,ExcNotImplemented())
2329 #endif
2330  internals::perform_mmult (*this, B, C, V, false);
2331  }
2332 
2333 
2334 
2335  void
2337  const SparseMatrix &B,
2338  const VectorBase &V) const
2339  {
2340 #ifdef DEAL_II_WITH_64BIT_INDICES
2341  Assert(false,ExcNotImplemented())
2342 #endif
2343  internals::perform_mmult (*this, B, C, V, true);
2344  }
2345 
2346 
2347 
2348  void
2350  {
2351  Assert (false, ExcNotImplemented());
2352  }
2353 
2354 
2355 
2356  // As of now, no particularly neat
2357  // ouput is generated in case of
2358  // multiple processors.
2359  void
2360  SparseMatrix::print (std::ostream &out,
2361  const bool print_detailed_trilinos_information) const
2362  {
2363  if (print_detailed_trilinos_information == true)
2364  out << *matrix;
2365  else
2366  {
2367  double *values;
2368  int *indices;
2369  int num_entries;
2370 
2371  for (int i=0; i<matrix->NumMyRows(); ++i)
2372  {
2373  matrix->ExtractMyRowView (i, num_entries, values, indices);
2374  for (TrilinosWrappers::types::int_type j=0; j<num_entries; ++j)
2375  out << "(" << global_row_index(*matrix,i) << ","
2376  << global_column_index(*matrix,indices[j]) << ") "
2377  << values[j] << std::endl;
2378  }
2379  }
2380 
2381  AssertThrow (out, ExcIO());
2382  }
2383 
2384 
2385 
2388  {
2389  size_type static_memory = sizeof(*this) + sizeof (*matrix)
2390  + sizeof(*matrix->Graph().DataPtr());
2391  return ((sizeof(TrilinosScalar)+sizeof(TrilinosWrappers::types::int_type))*
2392  matrix->NumMyNonzeros() + sizeof(int)*local_size() + static_memory);
2393  }
2394 
2395 
2396 
2397  const Epetra_Map &
2399  {
2400  return matrix->DomainMap();
2401  }
2402 
2403 
2404 
2405  const Epetra_Map &
2407  {
2408  return matrix->RangeMap();
2409  }
2410 
2411 
2412 
2413  const Epetra_Map &
2415  {
2416  return matrix->RowMap();
2417  }
2418 
2419 
2420 
2421  const Epetra_Map &
2423  {
2424  return matrix->ColMap();
2425  }
2426 
2427 
2428 
2430  {
2431 
2432 #ifdef DEAL_II_WITH_MPI
2433 
2434  const Epetra_MpiComm *mpi_comm
2435  = dynamic_cast<const Epetra_MpiComm *>(&matrix->RangeMap().Comm());
2436  Assert(mpi_comm != 0, ExcInternalError());
2437  return mpi_comm->Comm();
2438 #else
2439 
2440  return MPI_COMM_SELF;
2441 
2442 #endif
2443 
2444  }
2445 }
2446 
2447 #ifdef DEAL_II_WITH_CXX11
2448 
2449 namespace TrilinosWrappers
2450 {
2451  namespace internal
2452  {
2453  namespace
2454  {
2455 #ifndef DEAL_II_WITH_MPI
2456  Epetra_Map
2457  make_serial_Epetra_map (const IndexSet &serial_partitioning)
2458  {
2459  // See IndexSet::make_trilinos_map
2460  return Epetra_Map(TrilinosWrappers::types::int_type(serial_partitioning.size()),
2461  TrilinosWrappers::types::int_type(serial_partitioning.n_elements()),
2462  0, Epetra_SerialComm());
2463  }
2464 #endif
2465  }
2466 
2467  namespace LinearOperator
2468  {
2469 
2471  : use_transpose (false),
2472 #ifdef DEAL_II_WITH_MPI
2473  communicator (MPI_COMM_SELF),
2474  domain_map (IndexSet().make_trilinos_map(communicator.Comm())),
2475  range_map (IndexSet().make_trilinos_map(communicator.Comm()))
2476 #else
2477  domain_map (internal::make_serial_Epetra_map(IndexSet())),
2478  range_map (internal::make_serial_Epetra_map(IndexSet()))
2479 #endif
2480  {
2481  vmult = [](Range &, const Domain &)
2482  {
2483  Assert(false,
2484  ExcMessage("Uninitialized TrilinosPayload::vmult called"
2485  "(Default constructor)"));
2486  };
2487 
2488  Tvmult = [](Domain &, const Range &)
2489  {
2490  Assert(false,
2491  ExcMessage("Uninitialized TrilinosPayload::Tvmult called"
2492  "(Default constructor)"));
2493  };
2494 
2495  inv_vmult = [](Domain &, const Range &)
2496  {
2497  Assert(false,
2498  ExcMessage("Uninitialized TrilinosPayload::inv_vmult called"
2499  "(Default constructor)"));
2500  };
2501 
2502  inv_Tvmult = [](Range &, const Domain &)
2503  {
2504  Assert(false,
2505  ExcMessage("Uninitialized TrilinosPayload::inv_Tvmult called"
2506  "(Default constructor)"));
2507  };
2508  }
2509 
2510 
2511 
2513  const TrilinosWrappers::SparseMatrix &matrix)
2514  : use_transpose (matrix_exemplar.trilinos_matrix().UseTranspose()),
2515 #ifdef DEAL_II_WITH_MPI
2516  communicator (matrix_exemplar.get_mpi_communicator()),
2517  domain_map (matrix_exemplar.locally_owned_domain_indices().make_trilinos_map(communicator.Comm())),
2518  range_map (matrix_exemplar.locally_owned_range_indices().make_trilinos_map(communicator.Comm()))
2519 #else
2520  domain_map (internal::make_serial_Epetra_map(matrix_exemplar.locally_owned_domain_indices())),
2521  range_map (internal::make_serial_Epetra_map(matrix_exemplar.locally_owned_range_indices()))
2522 #endif
2523  {
2524  vmult = [&matrix_exemplar,&matrix](Range &tril_dst, const Domain &tril_src)
2525  {
2526  // Duplicated from TrilinosWrappers::SparseMatrix::vmult
2528  Assert (matrix.trilinos_matrix().Filled(), TrilinosWrappers::SparseMatrix::ExcMatrixNotCompressed());
2529  internal::check_vector_map_equality(matrix_exemplar.trilinos_matrix(),
2530  tril_src, tril_dst,
2531  matrix_exemplar.trilinos_matrix().UseTranspose());
2532  internal::check_vector_map_equality(matrix.trilinos_matrix(),
2533  tril_src, tril_dst,
2534  matrix.trilinos_matrix().UseTranspose());
2535 
2536  const int ierr = matrix.trilinos_matrix().Apply (tril_src, tril_dst);
2537  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
2538  };
2539 
2540  Tvmult = [&matrix_exemplar,&matrix](Domain &tril_dst, const Range &tril_src)
2541  {
2542  // Duplicated from TrilinosWrappers::SparseMatrix::Tvmult
2544  Assert (matrix.trilinos_matrix().Filled(), TrilinosWrappers::SparseMatrix::ExcMatrixNotCompressed());
2545  internal::check_vector_map_equality(matrix_exemplar.trilinos_matrix(),
2546  tril_src, tril_dst,
2547  !matrix_exemplar.trilinos_matrix().UseTranspose());
2548  internal::check_vector_map_equality(matrix.trilinos_matrix(),
2549  tril_src, tril_dst,
2550  !matrix.trilinos_matrix().UseTranspose());
2551 
2552  Epetra_CrsMatrix &tril_mtrx_non_const = const_cast<Epetra_CrsMatrix &>(matrix.trilinos_matrix());
2553  tril_mtrx_non_const.SetUseTranspose(!matrix.trilinos_matrix().UseTranspose());
2554  const int ierr = matrix.trilinos_matrix().Apply (tril_src, tril_dst);
2555  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
2556  tril_mtrx_non_const.SetUseTranspose(!matrix.trilinos_matrix().UseTranspose());
2557  };
2558 
2559  inv_vmult = [](Domain &, const Range &)
2560  {
2561  Assert(false,
2562  ExcMessage("Uninitialized TrilinosPayload::inv_vmult called"
2563  "(Matrix constructor with matrix exemplar)"));
2564  };
2565 
2566  inv_Tvmult = [](Range &, const Domain &)
2567  {
2568  Assert(false,
2569  ExcMessage("Uninitialized TrilinosPayload::inv_Tvmult called"
2570  "(Matrix constructor with matrix exemplar)"));
2571  };
2572  }
2573 
2574 
2575 
2577  const TrilinosWrappers::PreconditionBase &preconditioner)
2578  : use_transpose (matrix_exemplar.trilinos_matrix().UseTranspose()),
2579 #ifdef DEAL_II_WITH_MPI
2580  communicator (matrix_exemplar.get_mpi_communicator()),
2581  domain_map (matrix_exemplar.locally_owned_domain_indices().make_trilinos_map(communicator.Comm())),
2582  range_map (matrix_exemplar.locally_owned_range_indices().make_trilinos_map(communicator.Comm()))
2583 #else
2584  domain_map (internal::make_serial_Epetra_map(matrix_exemplar.locally_owned_domain_indices())),
2585  range_map (internal::make_serial_Epetra_map(matrix_exemplar.locally_owned_range_indices()))
2586 #endif
2587  {
2588  vmult = [&matrix_exemplar,&preconditioner](Range &tril_dst, const Domain &tril_src)
2589  {
2590  // Duplicated from TrilinosWrappers::PreconditionBase::vmult
2591  // as well as from TrilinosWrappers::SparseMatrix::Tvmult
2593  internal::check_vector_map_equality(matrix_exemplar.trilinos_matrix(),
2594  tril_src, tril_dst,
2595  matrix_exemplar.trilinos_matrix().UseTranspose());
2596  internal::check_vector_map_equality(preconditioner.trilinos_operator(),
2597  tril_src, tril_dst,
2598  preconditioner.trilinos_operator().UseTranspose());
2599 
2600  const int ierr = preconditioner.trilinos_operator().ApplyInverse (tril_src,tril_dst);
2601  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
2602  };
2603 
2604  Tvmult = [&matrix_exemplar,&preconditioner](Domain &tril_dst, const Range &tril_src)
2605  {
2606  // Duplicated from TrilinosWrappers::PreconditionBase::vmult
2607  // as well as from TrilinosWrappers::SparseMatrix::Tvmult
2609  internal::check_vector_map_equality(matrix_exemplar.trilinos_matrix(),
2610  tril_src, tril_dst,
2611  !matrix_exemplar.trilinos_matrix().UseTranspose());
2612  internal::check_vector_map_equality(preconditioner.trilinos_operator(),
2613  tril_src, tril_dst,
2614  !preconditioner.trilinos_operator().UseTranspose());
2615 
2616  preconditioner.trilinos_operator().SetUseTranspose(!preconditioner.trilinos_operator().UseTranspose());
2617  const int ierr = preconditioner.trilinos_operator().ApplyInverse (tril_src,tril_dst);
2618  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
2619  preconditioner.trilinos_operator().SetUseTranspose(!preconditioner.trilinos_operator().UseTranspose());
2620  };
2621 
2622  inv_vmult = [&matrix_exemplar,&preconditioner](Domain &tril_dst, const Range &tril_src)
2623  {
2624  // Duplicated from TrilinosWrappers::PreconditionBase::vmult
2625  // as well as from TrilinosWrappers::SparseMatrix::Tvmult
2627  internal::check_vector_map_equality(matrix_exemplar.trilinos_matrix(),
2628  tril_src, tril_dst,
2629  !matrix_exemplar.trilinos_matrix().UseTranspose());
2630  internal::check_vector_map_equality(preconditioner.trilinos_operator(),
2631  tril_src, tril_dst,
2632  !preconditioner.trilinos_operator().UseTranspose());
2633 
2634  const int ierr = preconditioner.trilinos_operator().ApplyInverse (tril_src,tril_dst);
2635  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
2636  };
2637 
2638  inv_Tvmult = [&matrix_exemplar,&preconditioner](Range &tril_dst, const Domain &tril_src)
2639  {
2640  // Duplicated from TrilinosWrappers::PreconditionBase::vmult
2641  // as well as from TrilinosWrappers::SparseMatrix::Tvmult
2643  internal::check_vector_map_equality(matrix_exemplar.trilinos_matrix(),
2644  tril_src, tril_dst,
2645  matrix_exemplar.trilinos_matrix().UseTranspose());
2646  internal::check_vector_map_equality(preconditioner.trilinos_operator(),
2647  tril_src, tril_dst,
2648  preconditioner.trilinos_operator().UseTranspose());
2649 
2650  preconditioner.trilinos_operator().SetUseTranspose(!preconditioner.trilinos_operator().UseTranspose());
2651  const int ierr = preconditioner.trilinos_operator().ApplyInverse (tril_src,tril_dst);
2652  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
2653  preconditioner.trilinos_operator().SetUseTranspose(!preconditioner.trilinos_operator().UseTranspose());
2654  };
2655  }
2656 
2657 
2658 
2660  const TrilinosWrappers::PreconditionBase &preconditioner)
2661  : use_transpose (preconditioner_exemplar.trilinos_operator().UseTranspose()),
2662 #ifdef DEAL_II_WITH_MPI
2663  communicator (preconditioner_exemplar.get_mpi_communicator()),
2664  domain_map (preconditioner_exemplar.locally_owned_domain_indices().make_trilinos_map(communicator.Comm())),
2665  range_map (preconditioner_exemplar.locally_owned_range_indices().make_trilinos_map(communicator.Comm()))
2666 #else
2667  domain_map (internal::make_serial_Epetra_map(preconditioner_exemplar.locally_owned_domain_indices())),
2668  range_map (internal::make_serial_Epetra_map(preconditioner_exemplar.locally_owned_range_indices()))
2669 #endif
2670  {
2671  vmult = [&preconditioner_exemplar,&preconditioner](Range &tril_dst, const Domain &tril_src)
2672  {
2673  // Duplicated from TrilinosWrappers::PreconditionBase::vmult
2674  // as well as from TrilinosWrappers::SparseMatrix::Tvmult
2676  internal::check_vector_map_equality(preconditioner_exemplar.trilinos_operator(),
2677  tril_src, tril_dst,
2678  preconditioner_exemplar.trilinos_operator().UseTranspose());
2679  internal::check_vector_map_equality(preconditioner.trilinos_operator(),
2680  tril_src, tril_dst,
2681  preconditioner.trilinos_operator().UseTranspose());
2682 
2683  const int ierr = preconditioner.trilinos_operator().Apply (tril_src,tril_dst);
2684  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
2685  };
2686 
2687  Tvmult = [&preconditioner_exemplar,&preconditioner](Domain &tril_dst, const Range &tril_src)
2688  {
2689  // Duplicated from TrilinosWrappers::PreconditionBase::vmult
2690  // as well as from TrilinosWrappers::SparseMatrix::Tvmult
2692  internal::check_vector_map_equality(preconditioner_exemplar.trilinos_operator(),
2693  tril_src, tril_dst,
2694  !preconditioner_exemplar.trilinos_operator().UseTranspose());
2695  internal::check_vector_map_equality(preconditioner.trilinos_operator(),
2696  tril_src, tril_dst,
2697  !preconditioner.trilinos_operator().UseTranspose());
2698 
2699  preconditioner.trilinos_operator().SetUseTranspose(!preconditioner.trilinos_operator().UseTranspose());
2700  const int ierr = preconditioner.trilinos_operator().Apply (tril_src,tril_dst);
2701  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
2702  preconditioner.trilinos_operator().SetUseTranspose(!preconditioner.trilinos_operator().UseTranspose());
2703  };
2704 
2705  inv_vmult = [&preconditioner_exemplar,&preconditioner](Domain &tril_dst, const Range &tril_src)
2706  {
2707  // Duplicated from TrilinosWrappers::PreconditionBase::vmult
2708  // as well as from TrilinosWrappers::SparseMatrix::Tvmult
2710  internal::check_vector_map_equality(preconditioner_exemplar.trilinos_operator(),
2711  tril_src, tril_dst,
2712  !preconditioner_exemplar.trilinos_operator().UseTranspose());
2713  internal::check_vector_map_equality(preconditioner.trilinos_operator(),
2714  tril_src, tril_dst,
2715  !preconditioner.trilinos_operator().UseTranspose());
2716 
2717  const int ierr = preconditioner.trilinos_operator().ApplyInverse (tril_src,tril_dst);
2718  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
2719  };
2720 
2721  inv_Tvmult = [&preconditioner_exemplar,&preconditioner](Range &tril_dst, const Domain &tril_src)
2722  {
2723  // Duplicated from TrilinosWrappers::PreconditionBase::vmult
2724  // as well as from TrilinosWrappers::SparseMatrix::Tvmult
2726  internal::check_vector_map_equality(preconditioner_exemplar.trilinos_operator(),
2727  tril_src, tril_dst,
2728  preconditioner_exemplar.trilinos_operator().UseTranspose());
2729  internal::check_vector_map_equality(preconditioner.trilinos_operator(),
2730  tril_src, tril_dst,
2731  preconditioner.trilinos_operator().UseTranspose());
2732 
2733  preconditioner.trilinos_operator().SetUseTranspose(!preconditioner.trilinos_operator().UseTranspose());
2734  const int ierr = preconditioner.trilinos_operator().ApplyInverse (tril_src,tril_dst);
2735  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
2736  preconditioner.trilinos_operator().SetUseTranspose(!preconditioner.trilinos_operator().UseTranspose());
2737  };
2738  }
2739 
2740 
2741 
2743  : vmult (payload.vmult),
2744  Tvmult (payload.Tvmult),
2745  inv_vmult (payload.inv_vmult),
2746  inv_Tvmult (payload.inv_Tvmult),
2747  use_transpose (payload.use_transpose),
2748  communicator (payload.communicator),
2749  domain_map (payload.domain_map),
2750  range_map (payload.range_map)
2751  { }
2752 
2753 
2754 
2755  // Composite copy constructor
2756  // This is required for PackagedOperations
2758  const TrilinosPayload &second_op)
2759  : use_transpose (false), // The combination of operators provides the exact definition of the operation
2760  communicator (first_op.communicator),
2761  domain_map (second_op.domain_map),
2762  range_map (first_op.range_map)
2763  { }
2764 
2765 
2766 
2767  // Destructor
2769  { }
2770 
2771 
2772 
2775  {
2776  TrilinosPayload return_op (*this);
2777 
2778  return_op.vmult = [](Range &tril_dst,
2779  const Range &tril_src)
2780  {
2781  tril_dst = tril_src;
2782  };
2783 
2784  return_op.Tvmult = [](Range &tril_dst,
2785  const Range &tril_src)
2786  {
2787  tril_dst = tril_src;
2788  };
2789 
2790  return_op.inv_vmult = [](Range &tril_dst,
2791  const Range &tril_src)
2792  {
2793  tril_dst = tril_src;
2794  };
2795 
2796  return_op.inv_Tvmult = [](Range &tril_dst,
2797  const Range &tril_src)
2798  {
2799  tril_dst = tril_src;
2800  };
2801 
2802  return return_op;
2803  }
2804 
2805 
2806 
2809  {
2810  TrilinosPayload return_op (*this);
2811 
2812  return_op.vmult = [](Range &tril_dst,
2813  const Domain &)
2814  {
2815  const int ierr = tril_dst.PutScalar(0.0);
2816 
2817  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
2818  };
2819 
2820  return_op.Tvmult = [](Domain &tril_dst,
2821  const Range &)
2822  {
2823  const int ierr = tril_dst.PutScalar(0.0);
2824 
2825  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
2826  };
2827 
2828  return_op.inv_vmult = [](Domain &tril_dst,
2829  const Range &)
2830  {
2831  AssertThrow(false, ExcMessage("Cannot compute inverse of null operator"));
2832 
2833  const int ierr = tril_dst.PutScalar(0.0);
2834 
2835  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
2836  };
2837 
2838  return_op.inv_Tvmult = [](Range &tril_dst,
2839  const Domain &)
2840  {
2841  AssertThrow(false, ExcMessage("Cannot compute inverse of null operator"));
2842 
2843  const int ierr = tril_dst.PutScalar(0.0);
2844 
2845  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
2846  };
2847 
2848  return return_op;
2849  }
2850 
2851 
2852 
2855  {
2856  TrilinosPayload return_op (*this);
2857  return_op.transpose();
2858  return return_op;
2859  }
2860 
2861 
2862 
2863  IndexSet
2865  {
2866  return IndexSet(domain_map);
2867  }
2868 
2869 
2870 
2871  IndexSet
2873  {
2874  return IndexSet(range_map);
2875  }
2876 
2877 
2878 
2879  MPI_Comm
2881  {
2882 #ifdef DEAL_II_WITH_MPI
2883  return communicator.Comm();
2884 #else
2885  return MPI_COMM_SELF;
2886 #endif
2887  }
2888 
2889 
2890 
2891  void
2893  {
2895  }
2896 
2897 
2898 
2899  bool
2901  {
2902  return use_transpose;
2903  }
2904 
2905 
2906 
2907  int
2909  {
2910  if (use_transpose != UseTranspose)
2911  {
2913  std::swap(domain_map,range_map);
2914  std::swap(vmult,Tvmult);
2915  std::swap(inv_vmult,inv_Tvmult);
2916  }
2917  return 0;
2918  }
2919 
2920 
2921 
2922  int
2924  VectorType &Y) const
2925  {
2926  // The transposedness of the operations is taken care of
2927  // when we hit the transpose flag.
2928  vmult(Y,X);
2929  return 0;
2930  }
2931 
2932 
2933 
2934  int
2936  VectorType &Y) const
2937  {
2938  // The transposedness of the operations is taken care of
2939  // when we hit the transpose flag.
2940  inv_vmult(Y,X);
2941  return 0;
2942  }
2943 
2944 
2945 
2946  const char *
2948  {
2949  return "TrilinosPayload";
2950  }
2951 
2952 
2953 
2954  const Epetra_Comm &
2956  {
2957  return communicator;
2958  }
2959 
2960 
2961 
2962  const Epetra_Map &
2964  {
2965  return domain_map;
2966  }
2967 
2968 
2969 
2970  const Epetra_Map &
2972  {
2973  return range_map;
2974  }
2975 
2976 
2977 
2978  bool
2980  {
2981  return false;
2982  }
2983 
2984 
2985 
2986  double
2988  {
2989  AssertThrow(false, ExcNotImplemented());
2990  return 0.0;
2991  }
2992 
2993 
2994 
2995  TrilinosPayload operator+(const TrilinosPayload &first_op,
2996  const TrilinosPayload &second_op)
2997  {
2998  typedef typename TrilinosPayload::Domain Domain;
2999  typedef typename TrilinosPayload::Range Range;
3000  typedef typename TrilinosPayload::VectorType Intermediate;
3001  typedef TrilinosWrappers::MPI::Vector GVMVectorType;
3002 
3004  ExcMessage("Operators are set to work on incompatible IndexSets."));
3006  ExcMessage("Operators are set to work on incompatible IndexSets."));
3007 
3008  TrilinosPayload return_op (first_op, second_op);
3009 
3010  // Capture by copy so the payloads are always valid
3011  return_op.vmult = [first_op, second_op](Range &tril_dst,
3012  const Domain &tril_src)
3013  {
3014  // Duplicated from LinearOperator::operator*
3015  // TODO: Template the constructor on GrowingVectorMemory vector type?
3016  static GrowingVectorMemory<GVMVectorType> vector_memory;
3017  GVMVectorType *i = vector_memory.alloc();
3018 
3019  // Initialise intermediate vector
3020  const Epetra_Map &first_op_init_map = first_op.OperatorDomainMap();
3021  i->reinit(IndexSet(first_op_init_map),
3022  first_op.get_mpi_communicator(),
3023  /*bool omit_zeroing_entries =*/true);
3024 
3025  // Duplicated from TrilinosWrappers::SparseMatrix::vmult
3026  const size_type i_local_size = i->end() - i->begin();
3027  AssertDimension (i_local_size, static_cast<size_type>(first_op_init_map.NumMyPoints()));
3028  const Epetra_Map &second_op_init_map = second_op.OperatorDomainMap();
3029  AssertDimension (i_local_size, static_cast<size_type>(second_op_init_map.NumMyPoints()));
3030  (void)second_op_init_map;
3031  Intermediate tril_int (View, first_op_init_map,
3032  const_cast<TrilinosScalar *>(i->begin()),
3033  i_local_size, 1);
3034 
3035  // These operators may themselves be transposed or not, so we let them
3036  // decide what the intended outcome is
3037  second_op.Apply(tril_src, tril_int);
3038  first_op.Apply(tril_src, tril_dst);
3039  const int ierr = tril_dst.Update (1.0, tril_int, 1.0);
3040  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
3041 
3042  vector_memory.free(i);
3043  };
3044 
3045  return_op.Tvmult = [first_op, second_op](Domain &tril_dst,
3046  const Range &tril_src)
3047  {
3048  // Duplicated from LinearOperator::operator*
3049  // TODO: Template the constructor on GrowingVectorMemory vector type?
3050  static GrowingVectorMemory<GVMVectorType> vector_memory;
3051  GVMVectorType *i = vector_memory.alloc();
3052 
3053  // These operators may themselves be transposed or not, so we let them
3054  // decide what the intended outcome is
3055  // We must first transpose the operators to get the right IndexSets
3056  // for the input, intermediate and result vectors
3057  const_cast<TrilinosPayload &>(first_op).transpose();
3058  const_cast<TrilinosPayload &>(second_op).transpose();
3059 
3060  // Initialise intermediate vector
3061  const Epetra_Map &first_op_init_map = first_op.OperatorRangeMap();
3062  i->reinit(IndexSet(first_op_init_map),
3063  first_op.get_mpi_communicator(),
3064  /*bool omit_zeroing_entries =*/true);
3065 
3066  // Duplicated from TrilinosWrappers::SparseMatrix::vmult
3067  const size_type i_local_size = i->end() - i->begin();
3068  AssertDimension (i_local_size, static_cast<size_type>(first_op_init_map.NumMyPoints()));
3069  const Epetra_Map &second_op_init_map = second_op.OperatorRangeMap();
3070  AssertDimension (i_local_size, static_cast<size_type>(second_op_init_map.NumMyPoints()));
3071  (void)second_op_init_map;
3072  Intermediate tril_int (View, first_op_init_map,
3073  const_cast<TrilinosScalar *>(i->begin()),
3074  i_local_size, 1);
3075 
3076  // These operators may themselves be transposed or not, so we let them
3077  // decide what the intended outcome is
3078  second_op.Apply(tril_src, tril_int);
3079  first_op.Apply(tril_src, tril_dst);
3080  const int ierr = tril_dst.Update (1.0, tril_int, 1.0);
3081  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
3082 
3083  // Reset transpose flag
3084  const_cast<TrilinosPayload &>(first_op).transpose();
3085  const_cast<TrilinosPayload &>(second_op).transpose();
3086 
3087  vector_memory.free(i);
3088  };
3089 
3090  return_op.inv_vmult = [first_op, second_op](Domain &tril_dst,
3091  const Range &tril_src)
3092  {
3093  // Duplicated from LinearOperator::operator*
3094  // TODO: Template the constructor on GrowingVectorMemory vector type?
3095  static GrowingVectorMemory<GVMVectorType> vector_memory;
3096  GVMVectorType *i = vector_memory.alloc();
3097 
3098  // Initialise intermediate vector
3099  const Epetra_Map &first_op_init_map = first_op.OperatorRangeMap();
3100  i->reinit(IndexSet(first_op_init_map),
3101  first_op.get_mpi_communicator(),
3102  /*bool omit_zeroing_entries =*/true);
3103 
3104  // Duplicated from TrilinosWrappers::SparseMatrix::vmult
3105  const size_type i_local_size = i->end() - i->begin();
3106  AssertDimension (i_local_size, static_cast<size_type>(first_op_init_map.NumMyPoints()));
3107  const Epetra_Map &second_op_init_map = second_op.OperatorRangeMap();
3108  AssertDimension (i_local_size, static_cast<size_type>(second_op_init_map.NumMyPoints()));
3109  (void)second_op_init_map;
3110  Intermediate tril_int (View, first_op_init_map,
3111  const_cast<TrilinosScalar *>(i->begin()),
3112  i_local_size, 1);
3113 
3114  // These operators may themselves be transposed or not, so we let them
3115  // decide what the intended outcome is
3116  second_op.ApplyInverse(tril_src, tril_int);
3117  first_op.ApplyInverse(tril_src, tril_dst);
3118  const int ierr = tril_dst.Update (1.0, tril_int, 1.0);
3119  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
3120 
3121  vector_memory.free(i);
3122  };
3123 
3124  return_op.inv_Tvmult = [first_op, second_op](Range &tril_dst,
3125  const Domain &tril_src)
3126  {
3127  // Duplicated from LinearOperator::operator*
3128  // TODO: Template the constructor on GrowingVectorMemory vector type?
3129  static GrowingVectorMemory<GVMVectorType> vector_memory;
3130  GVMVectorType *i = vector_memory.alloc();
3131 
3132  // These operators may themselves be transposed or not, so we let them
3133  // decide what the intended outcome is
3134  // We must first transpose the operators to get the right IndexSets
3135  // for the input, intermediate and result vectors
3136  const_cast<TrilinosPayload &>(first_op).transpose();
3137  const_cast<TrilinosPayload &>(second_op).transpose();
3138 
3139  // Initialise intermediate vector
3140  const Epetra_Map &first_op_init_map = first_op.OperatorDomainMap();
3141  i->reinit(IndexSet(first_op_init_map),
3142  first_op.get_mpi_communicator(),
3143  /*bool omit_zeroing_entries =*/true);
3144 
3145  // Duplicated from TrilinosWrappers::SparseMatrix::vmult
3146  const size_type i_local_size = i->end() - i->begin();
3147  AssertDimension (i_local_size, static_cast<size_type>(first_op_init_map.NumMyPoints()));
3148  const Epetra_Map &second_op_init_map = second_op.OperatorDomainMap();
3149  AssertDimension (i_local_size, static_cast<size_type>(second_op_init_map.NumMyPoints()));
3150  (void)second_op_init_map;
3151  Intermediate tril_int (View, first_op_init_map,
3152  const_cast<TrilinosScalar *>(i->begin()),
3153  i_local_size, 1);
3154 
3155  // These operators may themselves be transposed or not, so we let them
3156  // decide what the intended outcome is
3157  second_op.ApplyInverse(tril_src, tril_int);
3158  first_op.ApplyInverse(tril_src, tril_dst);
3159  const int ierr = tril_dst.Update (1.0, tril_int, 1.0);
3160  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
3161 
3162  // Reset transpose flag
3163  const_cast<TrilinosPayload &>(first_op).transpose();
3164  const_cast<TrilinosPayload &>(second_op).transpose();
3165 
3166  vector_memory.free(i);
3167  };
3168 
3169  return return_op;
3170  }
3171 
3172 
3173 
3174  TrilinosPayload operator*(const TrilinosPayload &first_op,
3175  const TrilinosPayload &second_op)
3176  {
3177  typedef typename TrilinosPayload::Domain Domain;
3178  typedef typename TrilinosPayload::Range Range;
3179  typedef typename TrilinosPayload::VectorType Intermediate;
3180  typedef TrilinosWrappers::MPI::Vector GVMVectorType;
3181 
3182  AssertThrow(first_op.locally_owned_domain_indices() == second_op.locally_owned_range_indices(),
3183  ExcMessage("Operators are set to work on incompatible IndexSets."));
3184 
3185  TrilinosPayload return_op (first_op, second_op);
3186 
3187  // Capture by copy so the payloads are always valid
3188  return_op.vmult = [first_op, second_op](Range &tril_dst,
3189  const Domain &tril_src)
3190  {
3191  // Duplicated from LinearOperator::operator*
3192  // TODO: Template the constructor on GrowingVectorMemory vector type?
3193  static GrowingVectorMemory<GVMVectorType> vector_memory;
3194  GVMVectorType *i = vector_memory.alloc();
3195 
3196  // Initialise intermediate vector
3197  const Epetra_Map &first_op_init_map = first_op.OperatorDomainMap();
3198  i->reinit(IndexSet(first_op_init_map),
3199  first_op.get_mpi_communicator(),
3200  /*bool omit_zeroing_entries =*/true);
3201 
3202  // Duplicated from TrilinosWrappers::SparseMatrix::vmult
3203  const size_type i_local_size = i->end() - i->begin();
3204  AssertDimension (i_local_size, static_cast<size_type>(first_op_init_map.NumMyPoints()));
3205  const Epetra_Map &second_op_init_map = second_op.OperatorRangeMap();
3206  AssertDimension (i_local_size, static_cast<size_type>(second_op_init_map.NumMyPoints()));
3207  (void)second_op_init_map;
3208  Intermediate tril_int (View, first_op_init_map,
3209  const_cast<TrilinosScalar *>(i->begin()),
3210  i_local_size, 1);
3211 
3212  // These operators may themselves be transposed or not, so we let them
3213  // decide what the intended outcome is
3214  second_op.Apply(tril_src, tril_int);
3215  first_op.Apply(tril_int, tril_dst);
3216 
3217  vector_memory.free(i);
3218  };
3219 
3220  return_op.Tvmult = [first_op, second_op](Domain &tril_dst,
3221  const Range &tril_src)
3222  {
3223  // Duplicated from LinearOperator::operator*
3224  // TODO: Template the constructor on GrowingVectorMemory vector type?
3225  static GrowingVectorMemory<GVMVectorType> vector_memory;
3226  GVMVectorType *i = vector_memory.alloc();
3227 
3228  // These operators may themselves be transposed or not, so we let them
3229  // decide what the intended outcome is
3230  // We must first transpose the operators to get the right IndexSets
3231  // for the input, intermediate and result vectors
3232  const_cast<TrilinosPayload &>(first_op).transpose();
3233  const_cast<TrilinosPayload &>(second_op).transpose();
3234 
3235  // Initialise intermediate vector
3236  const Epetra_Map &first_op_init_map = first_op.OperatorRangeMap();
3237  i->reinit(IndexSet(first_op_init_map),
3238  first_op.get_mpi_communicator(),
3239  /*bool omit_zeroing_entries =*/true);
3240 
3241  // Duplicated from TrilinosWrappers::SparseMatrix::vmult
3242  const size_type i_local_size = i->end() - i->begin();
3243  AssertDimension (i_local_size, static_cast<size_type>(first_op_init_map.NumMyPoints()));
3244  const Epetra_Map &second_op_init_map = second_op.OperatorDomainMap();
3245  AssertDimension (i_local_size, static_cast<size_type>(second_op_init_map.NumMyPoints()));
3246  (void)second_op_init_map;
3247  Intermediate tril_int (View, first_op_init_map,
3248  const_cast<TrilinosScalar *>(i->begin()),
3249  i_local_size, 1);
3250 
3251  // Apply the operators in the reverse order to vmult
3252  first_op.Apply(tril_src, tril_int);
3253  second_op.Apply(tril_int, tril_dst);
3254 
3255  // Reset transpose flag
3256  const_cast<TrilinosPayload &>(first_op).transpose();
3257  const_cast<TrilinosPayload &>(second_op).transpose();
3258 
3259  vector_memory.free(i);
3260  };
3261 
3262  return_op.inv_vmult = [first_op, second_op](Domain &tril_dst,
3263  const Range &tril_src)
3264  {
3265  // Duplicated from LinearOperator::operator*
3266  // TODO: Template the constructor on GrowingVectorMemory vector type?
3267  static GrowingVectorMemory<GVMVectorType> vector_memory;
3268  GVMVectorType *i = vector_memory.alloc();
3269 
3270  // Initialise intermediate vector
3271  const Epetra_Map &first_op_init_map = first_op.OperatorRangeMap();
3272  i->reinit(IndexSet(first_op_init_map),
3273  first_op.get_mpi_communicator(),
3274  /*bool omit_zeroing_entries =*/true);
3275 
3276  // Duplicated from TrilinosWrappers::SparseMatrix::vmult
3277  const size_type i_local_size = i->end() - i->begin();
3278  AssertDimension (i_local_size, static_cast<size_type>(first_op_init_map.NumMyPoints()));
3279  const Epetra_Map &second_op_init_map = second_op.OperatorDomainMap();
3280  AssertDimension (i_local_size, static_cast<size_type>(second_op_init_map.NumMyPoints()));
3281  (void)second_op_init_map;
3282  Intermediate tril_int (View, first_op_init_map,
3283  const_cast<TrilinosScalar *>(i->begin()),
3284  i_local_size, 1);
3285 
3286  // Apply the operators in the reverse order to vmult
3287  // and the same order as Tvmult
3288  first_op.ApplyInverse(tril_src, tril_int);
3289  second_op.ApplyInverse(tril_int, tril_dst);
3290 
3291  vector_memory.free(i);
3292  };
3293 
3294  return_op.inv_Tvmult = [first_op, second_op](Range &tril_dst,
3295  const Domain &tril_src)
3296  {
3297  // Duplicated from LinearOperator::operator*
3298  // TODO: Template the constructor on GrowingVectorMemory vector type?
3299  static GrowingVectorMemory<GVMVectorType> vector_memory;
3300  GVMVectorType *i = vector_memory.alloc();
3301 
3302  // These operators may themselves be transposed or not, so we let them
3303  // decide what the intended outcome is
3304  // We must first transpose the operators to get the right IndexSets
3305  // for the input, intermediate and result vectors
3306  const_cast<TrilinosPayload &>(first_op).transpose();
3307  const_cast<TrilinosPayload &>(second_op).transpose();
3308 
3309  // Initialise intermediate vector
3310  const Epetra_Map &first_op_init_map = first_op.OperatorDomainMap();
3311  i->reinit(IndexSet(first_op_init_map),
3312  first_op.get_mpi_communicator(),
3313  /*bool omit_zeroing_entries =*/true);
3314 
3315  // Duplicated from TrilinosWrappers::SparseMatrix::vmult
3316  const size_type i_local_size = i->end() - i->begin();
3317  AssertDimension (i_local_size, static_cast<size_type>(first_op_init_map.NumMyPoints()));
3318  const Epetra_Map &second_op_init_map = second_op.OperatorRangeMap();
3319  AssertDimension (i_local_size, static_cast<size_type>(second_op_init_map.NumMyPoints()));
3320  (void)second_op_init_map;
3321  Intermediate tril_int (View, first_op_init_map,
3322  const_cast<TrilinosScalar *>(i->begin()),
3323  i_local_size, 1);
3324 
3325  // These operators may themselves be transposed or not, so we let them
3326  // decide what the intended outcome is
3327  // Apply the operators in the reverse order to Tvmult
3328  // and the same order as vmult
3329  second_op.ApplyInverse(tril_src, tril_int);
3330  first_op.ApplyInverse(tril_int, tril_dst);
3331 
3332  // Reset transpose flag
3333  const_cast<TrilinosPayload &>(first_op).transpose();
3334  const_cast<TrilinosPayload &>(second_op).transpose();
3335 
3336  vector_memory.free(i);
3337  };
3338 
3339  return return_op;
3340  }
3341 
3342  } /* namespace LinearOperator */
3343  } /* namespace internal */
3344 } /* namespace TrilinosWrappers */
3345 
3346 #endif // DEAL_II_WITH_CXX11
3347 
3348 
3349 // explicit instantiations
3350 #include "trilinos_sparse_matrix.inst"
3351 
3352 
3353 // TODO: put these instantiations into generic file
3354 namespace TrilinosWrappers
3355 {
3356  template void
3357  SparseMatrix::reinit (const ::SparsityPattern &);
3358 
3359  template void
3361 
3362  template void
3363  SparseMatrix::reinit (const Epetra_Map &,
3364  const ::SparsityPattern &,
3365  const bool);
3366  template void
3367  SparseMatrix::reinit (const Epetra_Map &,
3368  const DynamicSparsityPattern &,
3369  const bool);
3370  template void
3371  SparseMatrix::reinit (const Epetra_Map &,
3372  const Epetra_Map &,
3373  const ::SparsityPattern &,
3374  const bool);
3375  template void
3376  SparseMatrix::reinit (const Epetra_Map &,
3377  const Epetra_Map &,
3378  const DynamicSparsityPattern &,
3379  const bool);
3380  template void
3381  SparseMatrix::reinit (const IndexSet &,
3382  const IndexSet &,
3383  const ::SparsityPattern &,
3384  const MPI_Comm &,
3385  const bool);
3386  template void
3387  SparseMatrix::reinit (const IndexSet &,
3388  const IndexSet &,
3389  const DynamicSparsityPattern &,
3390  const MPI_Comm &,
3391  const bool);
3392 
3393  template void
3394  SparseMatrix::vmult (VectorBase &,
3395  const VectorBase &) const;
3396  template void
3398  const Vector &) const;
3399  template void
3400  SparseMatrix::vmult (MPI::Vector &,
3401  const MPI::Vector &) const;
3402  template void
3404  const ::Vector<double> &) const;
3405  template void
3407  const ::LinearAlgebra::distributed::Vector<double> &) const;
3408  template void
3409  SparseMatrix::Tvmult (VectorBase &,
3410  const VectorBase &) const;
3411  template void
3413  const Vector &) const;
3414  template void
3415  SparseMatrix::Tvmult (MPI::Vector &,
3416  const MPI::Vector &) const;
3417  template void
3419  const ::Vector<double> &) const;
3420  template void
3422  const ::LinearAlgebra::distributed::Vector<double> &) const;
3423  template void
3424  SparseMatrix::vmult_add (VectorBase &,
3425  const VectorBase &) const;
3426  template void
3428  const Vector &) const;
3429  template void
3430  SparseMatrix::vmult_add (MPI::Vector &,
3431  const MPI::Vector &) const;
3432  template void
3434  const ::Vector<double> &) const;
3435  template void
3437  const ::LinearAlgebra::distributed::Vector<double> &) const;
3438  template void
3439  SparseMatrix::Tvmult_add (VectorBase &,
3440  const VectorBase &) const;
3441  template void
3443  const Vector &) const;
3444  template void
3445  SparseMatrix::Tvmult_add (MPI::Vector &,
3446  const MPI::Vector &) const;
3447  template void
3449  const ::Vector<double> &) const;
3450  template void
3452  const ::LinearAlgebra::distributed::Vector<double> &) const;
3453 
3454 }
3455 
3456 DEAL_II_NAMESPACE_CLOSE
3457 
3458 #endif // DEAL_II_WITH_TRILINOS
std_cxx11::shared_ptr< std::vector< TrilinosScalar > > value_cache
TrilinosScalar diag_element(const size_type i) const
Iterator lower_bound(Iterator first, Iterator last, const T &val)
Definition: utilities.h:618
static ::ExceptionBase & ExcTrilinosError(int arg1)
TrilinosScalar el(const size_type i, const size_type j) const
size_type m() const
std_cxx11::shared_ptr< std::vector< size_type > > colnum_cache
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1146
static ::ExceptionBase & ExcSourceEqualsDestination()
static ::ExceptionBase & ExcIO()
void clear_row(const size_type row, const TrilinosScalar new_diag_value=0)
size_type column_number(const size_type row, const size_type index) const
static ::ExceptionBase & ExcScalarAssignmentOnlyForZeroValue()
void vmult_add(VectorType &dst, const VectorType &src) const
const Epetra_FECrsGraph & trilinos_sparsity_pattern() const
void set(const size_type i, const size_type j, const number value)
::types::global_dof_index size_type
#define AssertIndexRange(index, range)
Definition: exceptions.h:1170
TrilinosScalar residual(VectorBase &dst, const VectorBase &x, const VectorBase &b) const
std::pair< size_type, size_type > local_range() const
std_cxx11::shared_ptr< Epetra_CrsGraph > nonlocal_graph
TrilinosScalar operator()(const size_type i, const size_type j) const
void reinit(const VectorBase &v, const bool omit_zeroing_entries=false)
static ::ExceptionBase & ExcInvalidIndex(size_type arg1, size_type arg2)
#define AssertThrow(cond, exc)
Definition: exceptions.h:369
static ::ExceptionBase & ExcAccessToNonPresentElement(size_type arg1, size_type arg2)
const Epetra_Comm & comm_self()
Definition: utilities.cc:736
static ::ExceptionBase & ExcTrilinosError(int arg1)
virtual void reinit(const SparsityPattern &sparsity)
const Epetra_Map & col_partitioner() const 1
static ::ExceptionBase & ExcDivideByZero()
void clear_rows(const std::vector< size_type > &rows, const TrilinosScalar new_diag_value=0)
void Tvmult(VectorType &dst, const VectorType &src) const
SparseMatrix & operator=(const double d)
std_cxx11::shared_ptr< Epetra_FECrsMatrix > matrix
Epetra_Map make_trilinos_map(const MPI_Comm &communicator=MPI_COMM_WORLD, const bool overlapping=false) const
Definition: index_set.cc:535
SparseMatrix & operator*=(const TrilinosScalar factor)
size_type size() const
Definition: index_set.h:1419
virtual int ApplyInverse(const VectorType &Y, VectorType &X) const
bool in_local_range(const size_type index) const
static ::ExceptionBase & ExcMessage(std::string arg1)
std_cxx11::shared_ptr< Epetra_CrsMatrix > nonlocal_matrix
size_type n() const
Definition: types.h:30
void add(const size_type i, const size_type j, const TrilinosScalar value)
virtual VectorType * alloc()
std::function< void(VectorType &, const VectorType &)> Tvmult
#define Assert(cond, exc)
Definition: exceptions.h:313
std_cxx11::shared_ptr< Epetra_Map > column_space_map
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
void Tmmult(SparseMatrix &C, const SparseMatrix &B, const VectorBase &V=VectorBase()) const
const Epetra_Map & vector_partitioner() const
const IndexSet & row_index_set() const
void print(std::ostream &out, const bool write_extended_trilinos_info=false) const
void mmult(SparseMatrix &C, const SparseMatrix &B, const VectorBase &V=VectorBase()) const
virtual void free(const VectorType *const)
const Epetra_Map & domain_partitioner() const 1
const Epetra_CrsMatrix & trilinos_matrix() const
Epetra_Operator & trilinos_operator() const
const Epetra_Map & range_partitioner() const 1
void vmult(VectorType &dst, const VectorType &src) const
std::function< void(VectorType &, const VectorType &)> inv_vmult
size_type n_nonzero_elements() const
void reinit(const size_type m, const size_type n, const unsigned int max_per_row)
static ::ExceptionBase & ExcNotQuadratic()
unsigned int row_length(const size_type row) const
void set_size(const size_type size)
Definition: index_set.h:1406
void Tvmult_add(VectorType &dst, const VectorType &src) const
Definition: mpi.h:48
const Epetra_Map & domain_partitioner() const 1
void reinit(const SparsityPatternType &sparsity_pattern)
TrilinosScalar matrix_norm_square(const VectorBase &v) const
size_type row_length(const size_type row) const
std::function< void(VectorType &, const VectorType &)> vmult
void copy_from(const SparseMatrix &source)
SparseMatrix & operator/=(const TrilinosScalar factor)
virtual int Apply(const VectorType &X, VectorType &Y) const
std::pair< size_type, size_type > local_range() const
const Epetra_MultiVector & trilinos_vector() const
static ::ExceptionBase & ExcNotImplemented()
TrilinosScalar matrix_scalar_product(const VectorBase &u, const VectorBase &v) const
static ::ExceptionBase & ExcMatrixNotCompressed()
const Epetra_CrsGraph & trilinos_sparsity_pattern() const
bool is_element(const size_type index) const
Definition: index_set.h:1489
void reinit(const size_type n, const bool omit_zeroing_entries=false)
void compress(::VectorOperation::values operation)
static ::ExceptionBase & ExcAccessToNonLocalElement(size_type arg1, size_type arg2, size_type arg3, size_type arg4)
void set(const size_type i, const size_type j, const TrilinosScalar value)
size_type n_elements() const
Definition: index_set.h:1560
#define AssertIsFinite(number)
Definition: exceptions.h:1197
unsigned int local_size() const
real_type l2_norm() const
void compress(::VectorOperation::values)
std::function< void(VectorType &, const VectorType &)> inv_Tvmult
static ::ExceptionBase & ExcInternalError()
const Epetra_Map & row_partitioner() const 1
std_cxx11::shared_ptr< Epetra_Export > nonlocal_matrix_exporter