Reference documentation for deal.II version 9.3.3
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
trilinos_sparse_matrix.cc
Go to the documentation of this file.
1// ---------------------------------------------------------------------
2//
3// Copyright (C) 2008 - 2021 by the deal.II authors
4//
5// This file is part of the deal.II library.
6//
7// The deal.II library is free software; you can use it, redistribute
8// it, and/or modify it under the terms of the GNU Lesser General
9// Public License as published by the Free Software Foundation; either
10// version 2.1 of the License, or (at your option) any later version.
11// The full text of the license can be found in the file LICENSE.md at
12// the top level directory of deal.II.
13//
14// ---------------------------------------------------------------------
15
18
19#ifdef DEAL_II_WITH_TRILINOS
20
22
30
32# include <boost/container/small_vector.hpp>
34
35# ifdef DEAL_II_TRILINOS_WITH_EPETRAEXT
36# include <EpetraExt_MatrixMatrix.h>
37# endif
38# include <Epetra_Export.h>
39# include <Teuchos_RCP.hpp>
40# include <ml_epetra_utils.h>
41# include <ml_struct.h>
42
43# include <memory>
44
46
47namespace TrilinosWrappers
48{
49 namespace internal
50 {
51 template <typename VectorType>
52 typename VectorType::value_type *
53 begin(VectorType &V)
54 {
55 return V.begin();
56 }
57
58 template <typename VectorType>
59 const typename VectorType::value_type *
60 begin(const VectorType &V)
61 {
62 return V.begin();
63 }
64
65 template <typename VectorType>
66 typename VectorType::value_type *
67 end(VectorType &V)
68 {
69 return V.end();
70 }
71
72 template <typename VectorType>
73 const typename VectorType::value_type *
74 end(const VectorType &V)
75 {
76 return V.end();
77 }
78
79# ifdef DEAL_II_WITH_MPI
80 template <>
81 double *
83 {
84 return V.trilinos_vector()[0];
85 }
86
87 template <>
88 const double *
90 {
91 return V.trilinos_vector()[0];
92 }
93
94 template <>
95 double *
97 {
98 return V.trilinos_vector()[0] + V.trilinos_vector().MyLength();
99 }
100
101 template <>
102 const double *
104 {
105 return V.trilinos_vector()[0] + V.trilinos_vector().MyLength();
106 }
107
108# ifdef DEAL_II_TRILINOS_WITH_TPETRA
109 template <typename Number>
110 Number *
112 {
113 return V.trilinos_vector().getDataNonConst().get();
114 }
115
116 template <typename Number>
117 const Number *
119 {
120 return V.trilinos_vector().getData().get();
121 }
122
123 template <typename Number>
124 Number *
126 {
127 return V.trilinos_vector().getDataNonConst().get() +
128 V.trilinos_vector().getLocalLength();
129 }
130
131 template <typename Number>
132 const Number *
134 {
135 return V.trilinos_vector().getData().get() +
136 V.trilinos_vector().getLocalLength();
137 }
138# endif
139# endif
140 } // namespace internal
141
142
143 namespace SparseMatrixIterators
144 {
145 void
147 {
148 // if we are asked to visit the past-the-end line, then simply
149 // release all our caches and go on with life.
150 //
151 // do the same if the row we're supposed to visit is not locally
152 // owned. this is simply going to make non-locally owned rows
153 // look like they're empty
154 if ((this->a_row == matrix->m()) ||
155 (matrix->in_local_range(this->a_row) == false))
156 {
157 colnum_cache.reset();
158 value_cache.reset();
159
160 return;
161 }
162
163 // get a representation of the present row
164 int ncols;
166 if (value_cache.get() == nullptr)
167 {
169 std::make_shared<std::vector<TrilinosScalar>>(matrix->n());
170 colnum_cache = std::make_shared<std::vector<size_type>>(matrix->n());
171 }
172 else
173 {
174 value_cache->resize(matrix->n());
175 colnum_cache->resize(matrix->n());
176 }
177
178 int ierr = matrix->trilinos_matrix().ExtractGlobalRowCopy(
179 this->a_row,
180 colnums,
181 ncols,
182 value_cache->data(),
183 reinterpret_cast<TrilinosWrappers::types::int_type *>(
184 colnum_cache->data()));
185 value_cache->resize(ncols);
186 colnum_cache->resize(ncols);
187 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
188
189 // copy it into our caches if the
190 // line isn't empty. if it is, then
191 // we've done something wrong, since
192 // we shouldn't have initialized an
193 // iterator for an empty line (what
194 // would it point to?)
195 }
196 } // namespace SparseMatrixIterators
197
198
199 // The constructor is actually the
200 // only point where we have to check
201 // whether we build a serial or a
202 // parallel Trilinos matrix.
203 // Actually, it does not even matter
204 // how many threads there are, but
205 // only if we use an MPI compiler or
206 // a standard compiler. So, even one
207 // thread on a configuration with
208 // MPI will still get a parallel
209 // interface.
211 : column_space_map(new Epetra_Map(0, 0, Utilities::Trilinos::comm_self()))
212 , matrix(
213 new Epetra_FECrsMatrix(View, *column_space_map, *column_space_map, 0))
214 , last_action(Zero)
215 , compressed(true)
216 {
217 matrix->FillComplete();
218 }
219
220
221
223 const size_type n,
224 const unsigned int n_max_entries_per_row)
225 : column_space_map(
226 new Epetra_Map(static_cast<TrilinosWrappers::types::int_type>(n),
227 0,
228 Utilities::Trilinos::comm_self()))
229 ,
230
231 // on one processor only, we know how the
232 // columns of the matrix will be
233 // distributed (everything on one
234 // processor), so we can hand in this
235 // information to the constructor. we
236 // can't do so in parallel, where the
237 // information from columns is only
238 // available when entries have been added
239 matrix(new Epetra_FECrsMatrix(
240 Copy,
241 Epetra_Map(static_cast<TrilinosWrappers::types::int_type>(m),
242 0,
243 Utilities::Trilinos::comm_self()),
244 *column_space_map,
245 n_max_entries_per_row,
246 false))
247 , last_action(Zero)
248 , compressed(false)
249 {}
250
251
252
254 const size_type n,
255 const std::vector<unsigned int> &n_entries_per_row)
256 : column_space_map(
257 new Epetra_Map(static_cast<TrilinosWrappers::types::int_type>(n),
258 0,
259 Utilities::Trilinos::comm_self()))
260 , matrix(new Epetra_FECrsMatrix(
261 Copy,
262 Epetra_Map(static_cast<TrilinosWrappers::types::int_type>(m),
263 0,
264 Utilities::Trilinos::comm_self()),
265 *column_space_map,
266 reinterpret_cast<int *>(
267 const_cast<unsigned int *>(n_entries_per_row.data())),
268 false))
269 , last_action(Zero)
270 , compressed(false)
271 {}
272
273
274
275 SparseMatrix::SparseMatrix(const IndexSet & parallel_partitioning,
276 const MPI_Comm & communicator,
277 const unsigned int n_max_entries_per_row)
278 : column_space_map(new Epetra_Map(
279 parallel_partitioning.make_trilinos_map(communicator, false)))
280 , matrix(new Epetra_FECrsMatrix(Copy,
281 *column_space_map,
282 n_max_entries_per_row,
283 false))
284 , last_action(Zero)
285 , compressed(false)
286 {}
287
288
289
290 SparseMatrix::SparseMatrix(const IndexSet &parallel_partitioning,
291 const MPI_Comm &communicator,
292 const std::vector<unsigned int> &n_entries_per_row)
293 : column_space_map(new Epetra_Map(
294 parallel_partitioning.make_trilinos_map(communicator, false)))
295 , matrix(new Epetra_FECrsMatrix(Copy,
296 *column_space_map,
297 reinterpret_cast<int *>(
298 const_cast<unsigned int *>(
299 n_entries_per_row.data())),
300 false))
301 , last_action(Zero)
302 , compressed(false)
303 {}
304
305
306
307 SparseMatrix::SparseMatrix(const IndexSet &row_parallel_partitioning,
308 const IndexSet &col_parallel_partitioning,
309 const MPI_Comm &communicator,
310 const size_type n_max_entries_per_row)
311 : column_space_map(new Epetra_Map(
312 col_parallel_partitioning.make_trilinos_map(communicator, false)))
313 , matrix(new Epetra_FECrsMatrix(
314 Copy,
315 row_parallel_partitioning.make_trilinos_map(communicator, false),
316 n_max_entries_per_row,
317 false))
318 , last_action(Zero)
319 , compressed(false)
320 {}
321
322
323
324 SparseMatrix::SparseMatrix(const IndexSet &row_parallel_partitioning,
325 const IndexSet &col_parallel_partitioning,
326 const MPI_Comm &communicator,
327 const std::vector<unsigned int> &n_entries_per_row)
328 : column_space_map(new Epetra_Map(
329 col_parallel_partitioning.make_trilinos_map(communicator, false)))
330 , matrix(new Epetra_FECrsMatrix(
331 Copy,
332 row_parallel_partitioning.make_trilinos_map(communicator, false),
333 reinterpret_cast<int *>(
334 const_cast<unsigned int *>(n_entries_per_row.data())),
335 false))
336 , last_action(Zero)
337 , compressed(false)
338 {}
339
340
341
343 : column_space_map(new Epetra_Map(sparsity_pattern.domain_partitioner()))
344 , matrix(
345 new Epetra_FECrsMatrix(Copy,
346 sparsity_pattern.trilinos_sparsity_pattern(),
347 false))
348 , last_action(Zero)
349 , compressed(true)
350 {
351 Assert(sparsity_pattern.trilinos_sparsity_pattern().Filled() == true,
353 "The Trilinos sparsity pattern has not been compressed."));
355 }
356
357
358
360 : column_space_map(std::move(other.column_space_map))
361 , matrix(std::move(other.matrix))
362 , nonlocal_matrix(std::move(other.nonlocal_matrix))
363 , nonlocal_matrix_exporter(std::move(other.nonlocal_matrix_exporter))
364 , last_action(other.last_action)
365 , compressed(other.compressed)
366 {
367 other.last_action = Zero;
368 other.compressed = false;
369 }
370
371
372
373 void
375 {
376 if (this == &rhs)
377 return;
378
379 nonlocal_matrix.reset();
381
382 // check whether we need to update the whole matrix layout (we have
383 // different maps or if we detect a row where the columns of the two
384 // matrices do not match)
385 bool needs_deep_copy =
386 !matrix->RowMap().SameAs(rhs.matrix->RowMap()) ||
387 !matrix->ColMap().SameAs(rhs.matrix->ColMap()) ||
388 !matrix->DomainMap().SameAs(rhs.matrix->DomainMap()) ||
390 if (!needs_deep_copy)
391 {
392 // Try to copy all the rows of the matrix one by one. In case of error
393 // (i.e., the column indices are different), we need to abort and blow
394 // away the matrix.
395 for (const auto row : locally_owned_range_indices())
396 {
397 const int row_local = matrix->RowMap().LID(
398 static_cast<TrilinosWrappers::types::int_type>(row));
399 Assert((row_local >= 0), ExcAccessToNonlocalRow(row));
400
401 int n_entries, rhs_n_entries;
402 TrilinosScalar *value_ptr, *rhs_value_ptr;
403 int * index_ptr, *rhs_index_ptr;
404 int ierr = rhs.matrix->ExtractMyRowView(row_local,
405 rhs_n_entries,
406 rhs_value_ptr,
407 rhs_index_ptr);
408 (void)ierr;
409 Assert(ierr == 0, ExcTrilinosError(ierr));
410
411 ierr = matrix->ExtractMyRowView(row_local,
412 n_entries,
413 value_ptr,
414 index_ptr);
415 Assert(ierr == 0, ExcTrilinosError(ierr));
416
417 if (n_entries != rhs_n_entries ||
418 std::memcmp(static_cast<void *>(index_ptr),
419 static_cast<void *>(rhs_index_ptr),
420 sizeof(int) * n_entries) != 0)
421 {
422 needs_deep_copy = true;
423 break;
424 }
425
426 for (int i = 0; i < n_entries; ++i)
427 value_ptr[i] = rhs_value_ptr[i];
428 }
429 }
430
431 if (needs_deep_copy)
432 {
434 std::make_unique<Epetra_Map>(rhs.trilinos_matrix().DomainMap());
435
436 // release memory before reallocation
437 matrix = std::make_unique<Epetra_FECrsMatrix>(*rhs.matrix);
438
439 matrix->FillComplete(*column_space_map, matrix->RowMap());
440 }
441
442 if (rhs.nonlocal_matrix.get() != nullptr)
444 std::make_unique<Epetra_CrsMatrix>(Copy, rhs.nonlocal_matrix->Graph());
445 }
446
447
448
449 namespace
450 {
452
453 template <typename SparsityPatternType>
454 void
455 reinit_matrix(const IndexSet & row_parallel_partitioning,
456 const IndexSet & column_parallel_partitioning,
457 const SparsityPatternType & sparsity_pattern,
458 const bool exchange_data,
459 const MPI_Comm & communicator,
460 std::unique_ptr<Epetra_Map> &column_space_map,
461 std::unique_ptr<Epetra_FECrsMatrix> &matrix,
462 std::unique_ptr<Epetra_CrsMatrix> & nonlocal_matrix,
463 std::unique_ptr<Epetra_Export> & nonlocal_matrix_exporter)
464 {
465 // release memory before reallocation
466 matrix.reset();
467 nonlocal_matrix.reset();
468 nonlocal_matrix_exporter.reset();
469
470 column_space_map = std::make_unique<Epetra_Map>(
471 column_parallel_partitioning.make_trilinos_map(communicator, false));
472
473 if (column_space_map->Comm().MyPID() == 0)
474 {
475 AssertDimension(sparsity_pattern.n_rows(),
476 row_parallel_partitioning.size());
477 AssertDimension(sparsity_pattern.n_cols(),
478 column_parallel_partitioning.size());
479 }
480
481 Epetra_Map row_space_map =
482 row_parallel_partitioning.make_trilinos_map(communicator, false);
483
484 // if we want to exchange data, build a usual Trilinos sparsity pattern
485 // and let that handle the exchange. otherwise, manually create a
486 // CrsGraph, which consumes considerably less memory because it can set
487 // correct number of indices right from the start
488 if (exchange_data)
489 {
490 SparsityPattern trilinos_sparsity;
491 trilinos_sparsity.reinit(row_parallel_partitioning,
492 column_parallel_partitioning,
493 sparsity_pattern,
494 communicator,
495 exchange_data);
496 matrix = std::make_unique<Epetra_FECrsMatrix>(
497 Copy, trilinos_sparsity.trilinos_sparsity_pattern(), false);
498
499 return;
500 }
501
502 const size_type first_row = TrilinosWrappers::min_my_gid(row_space_map),
503 last_row =
504 TrilinosWrappers::max_my_gid(row_space_map) + 1;
505 std::vector<int> n_entries_per_row(last_row - first_row);
506
507 for (size_type row = first_row; row < last_row; ++row)
508 n_entries_per_row[row - first_row] = sparsity_pattern.row_length(row);
509
510 // The deal.II notation of a Sparsity pattern corresponds to the Epetra
511 // concept of a Graph. Hence, we generate a graph by copying the
512 // sparsity pattern into it, and then build up the matrix from the
513 // graph. This is considerable faster than directly filling elements
514 // into the matrix. Moreover, it consumes less memory, since the
515 // internal reordering is done on ints only, and we can leave the
516 // doubles aside.
517
518 // for more than one processor, need to specify only row map first and
519 // let the matrix entries decide about the column map (which says which
520 // columns are present in the matrix, not to be confused with the
521 // col_map that tells how the domain dofs of the matrix will be
522 // distributed). for only one processor, we can directly assign the
523 // columns as well. Compare this with bug # 4123 in the Sandia Bugzilla.
524 std::unique_ptr<Epetra_CrsGraph> graph;
525 if (row_space_map.Comm().NumProc() > 1)
526 graph = std::make_unique<Epetra_CrsGraph>(Copy,
527 row_space_map,
528 n_entries_per_row.data(),
529 true);
530 else
531 graph = std::make_unique<Epetra_CrsGraph>(Copy,
532 row_space_map,
533 *column_space_map,
534 n_entries_per_row.data(),
535 true);
536
537 // This functions assumes that the sparsity pattern sits on all
538 // processors (completely). The parallel version uses an Epetra graph
539 // that is already distributed.
540
541 // now insert the indices
542 std::vector<TrilinosWrappers::types::int_type> row_indices;
543
544 for (size_type row = first_row; row < last_row; ++row)
545 {
546 const int row_length = sparsity_pattern.row_length(row);
547 if (row_length == 0)
548 continue;
549
550 row_indices.resize(row_length, -1);
551 {
552 typename SparsityPatternType::iterator p =
553 sparsity_pattern.begin(row);
554 for (size_type col = 0; p != sparsity_pattern.end(row); ++p, ++col)
555 row_indices[col] = p->column();
556 }
557 graph->Epetra_CrsGraph::InsertGlobalIndices(row,
558 row_length,
559 row_indices.data());
560 }
561
562 // Eventually, optimize the graph structure (sort indices, make memory
563 // contiguous, etc). note that the documentation of the function indeed
564 // states that we first need to provide the column (domain) map and then
565 // the row (range) map
566 graph->FillComplete(*column_space_map, row_space_map);
567 graph->OptimizeStorage();
568
569 // check whether we got the number of columns right.
570 AssertDimension(sparsity_pattern.n_cols(),
572 (void)n_global_cols;
573
574 // And now finally generate the matrix.
575 matrix = std::make_unique<Epetra_FECrsMatrix>(Copy, *graph, false);
576 }
577
578
579
580 // for the non-local graph, we need to circumvent the problem that some
581 // processors will not add into the non-local graph at all: We do not want
582 // to insert dummy elements on >5000 processors because that gets very
583 // slow. Thus, we set a flag in Epetra_CrsGraph that sets the correct
584 // flag. Since it is protected, we need to expose this information by
585 // deriving a class from Epetra_CrsGraph for the purpose of creating the
586 // data structure
587 class Epetra_CrsGraphMod : public Epetra_CrsGraph
588 {
589 public:
590 Epetra_CrsGraphMod(const Epetra_Map &row_map,
591 const int * n_entries_per_row)
592 : Epetra_CrsGraph(Copy, row_map, n_entries_per_row, true)
593 {}
594
595 void
596 SetIndicesAreGlobal()
597 {
598 this->Epetra_CrsGraph::SetIndicesAreGlobal(true);
599 }
600 };
601
602
603
604 // specialization for DynamicSparsityPattern which can provide us with
605 // more information about the non-locally owned rows
606 template <>
607 void
608 reinit_matrix(const IndexSet & row_parallel_partitioning,
609 const IndexSet & column_parallel_partitioning,
610 const DynamicSparsityPattern &sparsity_pattern,
611 const bool exchange_data,
612 const MPI_Comm & communicator,
613 std::unique_ptr<Epetra_Map> & column_space_map,
614 std::unique_ptr<Epetra_FECrsMatrix> &matrix,
615 std::unique_ptr<Epetra_CrsMatrix> & nonlocal_matrix,
616 std::unique_ptr<Epetra_Export> & nonlocal_matrix_exporter)
617 {
618 matrix.reset();
619 nonlocal_matrix.reset();
620 nonlocal_matrix_exporter.reset();
621
622 column_space_map = std::make_unique<Epetra_Map>(
623 column_parallel_partitioning.make_trilinos_map(communicator, false));
624
625 AssertDimension(sparsity_pattern.n_rows(),
626 row_parallel_partitioning.size());
627 AssertDimension(sparsity_pattern.n_cols(),
628 column_parallel_partitioning.size());
629
630 Epetra_Map row_space_map =
631 row_parallel_partitioning.make_trilinos_map(communicator, false);
632
633 IndexSet relevant_rows(sparsity_pattern.row_index_set());
634 // serial case
635 if (relevant_rows.size() == 0)
636 {
637 relevant_rows.set_size(
639 relevant_rows.add_range(
640 0, TrilinosWrappers::n_global_elements(row_space_map));
641 }
642 relevant_rows.compress();
643 Assert(relevant_rows.n_elements() >=
644 static_cast<unsigned int>(row_space_map.NumMyElements()),
646 "Locally relevant rows of sparsity pattern must contain "
647 "all locally owned rows"));
648
649 // check whether the relevant rows correspond to exactly the same map as
650 // the owned rows. In that case, do not create the nonlocal graph and
651 // fill the columns by demand
652 const bool have_ghost_rows = [&]() {
653 std::vector<::types::global_dof_index> indices;
654 relevant_rows.fill_index_vector(indices);
655 Epetra_Map relevant_map(
657 TrilinosWrappers::types::int_type(relevant_rows.n_elements()),
658 (indices.empty() ?
659 nullptr :
660 reinterpret_cast<TrilinosWrappers::types::int_type *>(
661 indices.data())),
662 0,
663 row_space_map.Comm());
664 return !relevant_map.SameAs(row_space_map);
665 }();
666
667 const unsigned int n_rows = relevant_rows.n_elements();
668 std::vector<TrilinosWrappers::types::int_type> ghost_rows;
669 std::vector<int> n_entries_per_row(row_space_map.NumMyElements());
670 std::vector<int> n_entries_per_ghost_row;
671 for (unsigned int i = 0, own = 0; i < n_rows; ++i)
672 {
673 const TrilinosWrappers::types::int_type global_row =
674 relevant_rows.nth_index_in_set(i);
675 if (row_space_map.MyGID(global_row))
676 n_entries_per_row[own++] = sparsity_pattern.row_length(global_row);
677 else if (sparsity_pattern.row_length(global_row) > 0)
678 {
679 ghost_rows.push_back(global_row);
680 n_entries_per_ghost_row.push_back(
681 sparsity_pattern.row_length(global_row));
682 }
683 }
684
685 Epetra_Map off_processor_map(-1,
686 ghost_rows.size(),
687 (ghost_rows.size() > 0) ?
688 (ghost_rows.data()) :
689 nullptr,
690 0,
691 row_space_map.Comm());
692
693 std::unique_ptr<Epetra_CrsGraph> graph;
694 std::unique_ptr<Epetra_CrsGraphMod> nonlocal_graph;
695 if (row_space_map.Comm().NumProc() > 1)
696 {
697 graph =
698 std::make_unique<Epetra_CrsGraph>(Copy,
699 row_space_map,
700 (n_entries_per_row.size() > 0) ?
701 (n_entries_per_row.data()) :
702 nullptr,
703 exchange_data ? false : true);
704 if (have_ghost_rows == true)
705 nonlocal_graph = std::make_unique<Epetra_CrsGraphMod>(
706 off_processor_map, n_entries_per_ghost_row.data());
707 }
708 else
709 graph =
710 std::make_unique<Epetra_CrsGraph>(Copy,
711 row_space_map,
712 *column_space_map,
713 (n_entries_per_row.size() > 0) ?
714 (n_entries_per_row.data()) :
715 nullptr,
716 true);
717
718 // now insert the indices, select between the right matrix
719 std::vector<TrilinosWrappers::types::int_type> row_indices;
720
721 for (unsigned int i = 0; i < n_rows; ++i)
722 {
723 const TrilinosWrappers::types::int_type global_row =
724 relevant_rows.nth_index_in_set(i);
725 const int row_length = sparsity_pattern.row_length(global_row);
726 if (row_length == 0)
727 continue;
728
729 row_indices.resize(row_length, -1);
730 for (int col = 0; col < row_length; ++col)
731 row_indices[col] = sparsity_pattern.column_number(global_row, col);
732
733 if (row_space_map.MyGID(global_row))
734 graph->InsertGlobalIndices(global_row,
735 row_length,
736 row_indices.data());
737 else
738 {
739 Assert(nonlocal_graph.get() != nullptr, ExcInternalError());
740 nonlocal_graph->InsertGlobalIndices(global_row,
741 row_length,
742 row_indices.data());
743 }
744 }
745
746 // finalize nonlocal graph and create nonlocal matrix
747 if (nonlocal_graph.get() != nullptr)
748 {
749 // must make sure the IndicesAreGlobal flag is set on all processors
750 // because some processors might not call InsertGlobalIndices (and
751 // we do not want to insert dummy indices on all processors for
752 // large-scale simulations due to the bad impact on performance)
753 nonlocal_graph->SetIndicesAreGlobal();
754 Assert(nonlocal_graph->IndicesAreGlobal() == true,
756 nonlocal_graph->FillComplete(*column_space_map, row_space_map);
757 nonlocal_graph->OptimizeStorage();
758
759 // insert data from nonlocal graph into the final sparsity pattern
760 if (exchange_data)
761 {
762 Epetra_Export exporter(nonlocal_graph->RowMap(), row_space_map);
763 int ierr = graph->Export(*nonlocal_graph, exporter, Add);
764 (void)ierr;
765 Assert(ierr == 0, ExcTrilinosError(ierr));
766 }
767
768 nonlocal_matrix =
769 std::make_unique<Epetra_CrsMatrix>(Copy, *nonlocal_graph);
770 }
771
772 graph->FillComplete(*column_space_map, row_space_map);
773 graph->OptimizeStorage();
774
775 AssertDimension(sparsity_pattern.n_cols(),
777
778 matrix = std::make_unique<Epetra_FECrsMatrix>(Copy, *graph, false);
779 }
780 } // namespace
781
782
783
784 template <typename SparsityPatternType>
785 void
786 SparseMatrix::reinit(const SparsityPatternType &sparsity_pattern)
787 {
788 reinit_matrix(complete_index_set(sparsity_pattern.n_rows()),
789 complete_index_set(sparsity_pattern.n_cols()),
790 sparsity_pattern,
791 false,
792 MPI_COMM_SELF,
794 matrix,
797 }
798
799
800
801 template <typename SparsityPatternType>
802 inline typename std::enable_if<
803 !std::is_same<SparsityPatternType,
804 ::SparseMatrix<double>>::value>::type
805 SparseMatrix::reinit(const IndexSet & row_parallel_partitioning,
806 const IndexSet & col_parallel_partitioning,
807 const SparsityPatternType &sparsity_pattern,
808 const MPI_Comm & communicator,
809 const bool exchange_data)
810 {
811 reinit_matrix(row_parallel_partitioning,
812 col_parallel_partitioning,
813 sparsity_pattern,
814 exchange_data,
815 communicator,
817 matrix,
820
821 // In the end, the matrix needs to be compressed in order to be really
822 // ready.
823 last_action = Zero;
825 }
826
827
828
829 void
830 SparseMatrix::reinit(const SparsityPattern &sparsity_pattern)
831 {
832 matrix.reset();
834
835 // reinit with a (parallel) Trilinos sparsity pattern.
837 std::make_unique<Epetra_Map>(sparsity_pattern.domain_partitioner());
838 matrix = std::make_unique<Epetra_FECrsMatrix>(
839 Copy, sparsity_pattern.trilinos_sparsity_pattern(), false);
840
841 if (sparsity_pattern.nonlocal_graph.get() != nullptr)
843 std::make_unique<Epetra_CrsMatrix>(Copy,
844 *sparsity_pattern.nonlocal_graph);
845 else
846 nonlocal_matrix.reset();
847
848 last_action = Zero;
850 }
851
852
853
854 void
855 SparseMatrix::reinit(const SparseMatrix &sparse_matrix)
856 {
857 if (this == &sparse_matrix)
858 return;
859
861 std::make_unique<Epetra_Map>(sparse_matrix.trilinos_matrix().DomainMap());
862 matrix.reset();
864 matrix = std::make_unique<Epetra_FECrsMatrix>(
865 Copy, sparse_matrix.trilinos_sparsity_pattern(), false);
866
867 if (sparse_matrix.nonlocal_matrix != nullptr)
868 nonlocal_matrix = std::make_unique<Epetra_CrsMatrix>(
869 Copy, sparse_matrix.nonlocal_matrix->Graph());
870 else
871 nonlocal_matrix.reset();
872
873 last_action = Zero;
875 }
876
877
878
879 template <typename number>
880 inline void
882 const IndexSet & row_parallel_partitioning,
883 const IndexSet & col_parallel_partitioning,
884 const ::SparseMatrix<number> &dealii_sparse_matrix,
885 const MPI_Comm & communicator,
886 const double drop_tolerance,
887 const bool copy_values,
888 const ::SparsityPattern * use_this_sparsity)
889 {
890 if (copy_values == false)
891 {
892 // in case we do not copy values, just
893 // call the other function.
894 if (use_this_sparsity == nullptr)
895 reinit(row_parallel_partitioning,
896 col_parallel_partitioning,
897 dealii_sparse_matrix.get_sparsity_pattern(),
898 communicator,
899 false);
900 else
901 reinit(row_parallel_partitioning,
902 col_parallel_partitioning,
903 *use_this_sparsity,
904 communicator,
905 false);
906 return;
907 }
908
909 const size_type n_rows = dealii_sparse_matrix.m();
910
911 AssertDimension(row_parallel_partitioning.size(), n_rows);
912 AssertDimension(col_parallel_partitioning.size(), dealii_sparse_matrix.n());
913
914 const ::SparsityPattern &sparsity_pattern =
915 (use_this_sparsity != nullptr) ?
916 *use_this_sparsity :
917 dealii_sparse_matrix.get_sparsity_pattern();
918
919 if (matrix.get() == nullptr || m() != n_rows ||
920 n_nonzero_elements() != sparsity_pattern.n_nonzero_elements())
921 {
922 reinit(row_parallel_partitioning,
923 col_parallel_partitioning,
924 sparsity_pattern,
925 communicator,
926 false);
927 }
928
929 // fill the values. the same as above: go through all rows of the
930 // matrix, and then all columns. since the sparsity patterns of the
931 // input matrix and the specified sparsity pattern might be different,
932 // need to go through the row for both these sparsity structures
933 // simultaneously in order to really set the correct values.
934 size_type maximum_row_length = matrix->MaxNumEntries();
935 std::vector<size_type> row_indices(maximum_row_length);
936 std::vector<TrilinosScalar> values(maximum_row_length);
937
938 for (size_type row = 0; row < n_rows; ++row)
939 // see if the row is locally stored on this processor
940 if (row_parallel_partitioning.is_element(row) == true)
941 {
942 ::SparsityPattern::iterator select_index =
943 sparsity_pattern.begin(row);
944 typename ::SparseMatrix<number>::const_iterator it =
945 dealii_sparse_matrix.begin(row);
946 size_type col = 0;
947 if (sparsity_pattern.n_rows() == sparsity_pattern.n_cols())
948 {
949 // optimized diagonal
950 AssertDimension(it->column(), row);
951 if (std::fabs(it->value()) > drop_tolerance)
952 {
953 values[col] = it->value();
954 row_indices[col++] = it->column();
955 }
956 ++select_index;
957 ++it;
958 }
959
960 while (it != dealii_sparse_matrix.end(row) &&
961 select_index != sparsity_pattern.end(row))
962 {
963 while (select_index->column() < it->column() &&
964 select_index != sparsity_pattern.end(row))
965 ++select_index;
966 while (it->column() < select_index->column() &&
967 it != dealii_sparse_matrix.end(row))
968 ++it;
969
970 if (it == dealii_sparse_matrix.end(row))
971 break;
972 if (std::fabs(it->value()) > drop_tolerance)
973 {
974 values[col] = it->value();
975 row_indices[col++] = it->column();
976 }
977 ++select_index;
978 ++it;
979 }
980 set(row,
981 col,
982 reinterpret_cast<size_type *>(row_indices.data()),
983 values.data(),
984 false);
985 }
987 }
988
989
990
991 template <typename number>
992 void
994 const ::SparseMatrix<number> &dealii_sparse_matrix,
995 const double drop_tolerance,
996 const bool copy_values,
997 const ::SparsityPattern * use_this_sparsity)
998 {
999 reinit(complete_index_set(dealii_sparse_matrix.m()),
1000 complete_index_set(dealii_sparse_matrix.n()),
1001 dealii_sparse_matrix,
1002 MPI_COMM_SELF,
1003 drop_tolerance,
1004 copy_values,
1005 use_this_sparsity);
1006 }
1007
1008
1009
1010 void
1011 SparseMatrix::reinit(const Epetra_CrsMatrix &input_matrix,
1012 const bool copy_values)
1013 {
1014 Assert(input_matrix.Filled() == true,
1015 ExcMessage("Input CrsMatrix has not called FillComplete()!"));
1016
1017 column_space_map = std::make_unique<Epetra_Map>(input_matrix.DomainMap());
1018
1019 const Epetra_CrsGraph *graph = &input_matrix.Graph();
1020
1021 nonlocal_matrix.reset();
1023 matrix.reset();
1024 matrix = std::make_unique<Epetra_FECrsMatrix>(Copy, *graph, false);
1025
1026 matrix->FillComplete(*column_space_map, input_matrix.RangeMap(), true);
1027
1028 if (copy_values == true)
1029 {
1030 // point to the first data entry in the two
1031 // matrices and copy the content
1032 const TrilinosScalar *in_values = input_matrix[0];
1033 TrilinosScalar * values = (*matrix)[0];
1034 const size_type my_nonzeros = input_matrix.NumMyNonzeros();
1035 std::memcpy(values, in_values, my_nonzeros * sizeof(TrilinosScalar));
1036 }
1037
1038 last_action = Zero;
1040 }
1041
1042
1043
1044 void
1046 {
1047 Epetra_CombineMode mode = last_action;
1048 if (last_action == Zero)
1049 {
1050 if ((operation == ::VectorOperation::add) ||
1051 (operation == ::VectorOperation::unknown))
1052 mode = Add;
1053 else if (operation == ::VectorOperation::insert)
1054 mode = Insert;
1055 else
1056 Assert(
1057 false,
1058 ExcMessage(
1059 "compress() can only be called with VectorOperation add, insert, or unknown"));
1060 }
1061 else
1062 {
1063 Assert(((last_action == Add) &&
1064 (operation != ::VectorOperation::insert)) ||
1065 ((last_action == Insert) &&
1066 (operation != ::VectorOperation::add)),
1067 ExcMessage("Operation and argument to compress() do not match"));
1068 }
1069
1070 // flush buffers
1071 int ierr;
1072 if (nonlocal_matrix.get() != nullptr && mode == Add)
1073 {
1074 // do only export in case of an add() operation, otherwise the owning
1075 // processor must have set the correct entry
1076 nonlocal_matrix->FillComplete(*column_space_map, matrix->RowMap());
1077 if (nonlocal_matrix_exporter.get() == nullptr)
1079 std::make_unique<Epetra_Export>(nonlocal_matrix->RowMap(),
1080 matrix->RowMap());
1081 ierr =
1083 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1084 ierr = matrix->FillComplete(*column_space_map, matrix->RowMap());
1085 nonlocal_matrix->PutScalar(0);
1086 }
1087 else
1088 ierr =
1089 matrix->GlobalAssemble(*column_space_map, matrix->RowMap(), true, mode);
1090
1091 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1092
1093 ierr = matrix->OptimizeStorage();
1094 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1095
1096 last_action = Zero;
1097
1098 compressed = true;
1099 }
1100
1101
1102
1103 void
1105 {
1106 // When we clear the matrix, reset
1107 // the pointer and generate an
1108 // empty matrix.
1110 std::make_unique<Epetra_Map>(0, 0, Utilities::Trilinos::comm_self());
1111 matrix = std::make_unique<Epetra_FECrsMatrix>(View, *column_space_map, 0);
1112 nonlocal_matrix.reset();
1114
1115 matrix->FillComplete();
1116
1117 compressed = true;
1118 }
1119
1120
1121
1122 void
1124 const TrilinosScalar new_diag_value)
1125 {
1126 Assert(matrix->Filled() == true, ExcMatrixNotCompressed());
1127
1128 // Only do this on the rows owned
1129 // locally on this processor.
1130 int local_row =
1131 matrix->LRID(static_cast<TrilinosWrappers::types::int_type>(row));
1132 if (local_row >= 0)
1133 {
1135 int * col_indices;
1136 int num_entries;
1137 const int ierr =
1138 matrix->ExtractMyRowView(local_row, num_entries, values, col_indices);
1139 (void)ierr;
1140
1141 Assert(ierr == 0, ExcTrilinosError(ierr));
1142
1143 const std::ptrdiff_t diag_index =
1144 std::find(col_indices, col_indices + num_entries, local_row) -
1145 col_indices;
1146
1147 for (TrilinosWrappers::types::int_type j = 0; j < num_entries; ++j)
1148 if (diag_index != j || new_diag_value == 0)
1149 values[j] = 0.;
1150
1151 if (diag_index != num_entries)
1152 values[diag_index] = new_diag_value;
1153 }
1154 }
1155
1156
1157
1158 void
1159 SparseMatrix::clear_rows(const std::vector<size_type> &rows,
1160 const TrilinosScalar new_diag_value)
1161 {
1162 for (const auto row : rows)
1163 clear_row(row, new_diag_value);
1164 }
1165
1166
1167
1170 {
1171 // Extract local indices in
1172 // the matrix.
1173 int trilinos_i =
1174 matrix->LRID(static_cast<TrilinosWrappers::types::int_type>(i)),
1175 trilinos_j =
1176 matrix->LCID(static_cast<TrilinosWrappers::types::int_type>(j));
1177 TrilinosScalar value = 0.;
1178
1179 // If the data is not on the
1180 // present processor, we throw
1181 // an exception. This is one of
1182 // the two tiny differences to
1183 // the el(i,j) call, which does
1184 // not throw any assertions.
1185 if (trilinos_i == -1)
1186 {
1187 Assert(false,
1189 i, j, local_range().first, local_range().second - 1));
1190 }
1191 else
1192 {
1193 // Check whether the matrix has
1194 // already been transformed to local
1195 // indices.
1196 Assert(matrix->Filled(), ExcMatrixNotCompressed());
1197
1198 // Prepare pointers for extraction
1199 // of a view of the row.
1200 int nnz_present = matrix->NumMyEntries(trilinos_i);
1201 int nnz_extracted;
1202 int * col_indices;
1204
1205 // Generate the view and make
1206 // sure that we have not generated
1207 // an error.
1208 // TODO Check that col_indices are int and not long long
1209 int ierr = matrix->ExtractMyRowView(trilinos_i,
1210 nnz_extracted,
1211 values,
1212 col_indices);
1213 (void)ierr;
1214 Assert(ierr == 0, ExcTrilinosError(ierr));
1215
1216 Assert(nnz_present == nnz_extracted,
1217 ExcDimensionMismatch(nnz_present, nnz_extracted));
1218
1219 // Search the index where we
1220 // look for the value, and then
1221 // finally get it.
1222 const std::ptrdiff_t local_col_index =
1223 std::find(col_indices, col_indices + nnz_present, trilinos_j) -
1224 col_indices;
1225
1226 // This is actually the only
1227 // difference to the el(i,j)
1228 // function, which means that
1229 // we throw an exception in
1230 // this case instead of just
1231 // returning zero for an
1232 // element that is not present
1233 // in the sparsity pattern.
1234 if (local_col_index == nnz_present)
1235 {
1236 Assert(false, ExcInvalidIndex(i, j));
1237 }
1238 else
1239 value = values[local_col_index];
1240 }
1241
1242 return value;
1243 }
1244
1245
1246
1248 SparseMatrix::el(const size_type i, const size_type j) const
1249 {
1250 // Extract local indices in
1251 // the matrix.
1252 int trilinos_i =
1253 matrix->LRID(static_cast<TrilinosWrappers::types::int_type>(i)),
1254 trilinos_j =
1255 matrix->LCID(static_cast<TrilinosWrappers::types::int_type>(j));
1256 TrilinosScalar value = 0.;
1257
1258 // If the data is not on the
1259 // present processor, we can't
1260 // continue. Just print out zero
1261 // as discussed in the
1262 // documentation of this
1263 // function. if you want error
1264 // checking, use operator().
1265 if ((trilinos_i == -1) || (trilinos_j == -1))
1266 return 0.;
1267 else
1268 {
1269 // Check whether the matrix
1270 // already is transformed to
1271 // local indices.
1272 Assert(matrix->Filled(), ExcMatrixNotCompressed());
1273
1274 // Prepare pointers for extraction
1275 // of a view of the row.
1276 int nnz_present = matrix->NumMyEntries(trilinos_i);
1277 int nnz_extracted;
1278 int * col_indices;
1280
1281 // Generate the view and make
1282 // sure that we have not generated
1283 // an error.
1284 int ierr = matrix->ExtractMyRowView(trilinos_i,
1285 nnz_extracted,
1286 values,
1287 col_indices);
1288 (void)ierr;
1289 Assert(ierr == 0, ExcTrilinosError(ierr));
1290
1291 Assert(nnz_present == nnz_extracted,
1292 ExcDimensionMismatch(nnz_present, nnz_extracted));
1293
1294 // Search the index where we
1295 // look for the value, and then
1296 // finally get it.
1297 const std::ptrdiff_t local_col_index =
1298 std::find(col_indices, col_indices + nnz_present, trilinos_j) -
1299 col_indices;
1300
1301 // This is actually the only
1302 // difference to the () function
1303 // querying (i,j), where we throw an
1304 // exception instead of just
1305 // returning zero for an element
1306 // that is not present in the
1307 // sparsity pattern.
1308 if (local_col_index == nnz_present)
1309 value = 0;
1310 else
1311 value = values[local_col_index];
1312 }
1313
1314 return value;
1315 }
1316
1317
1318
1321 {
1322 Assert(m() == n(), ExcNotQuadratic());
1323
1324# ifdef DEBUG
1325 // use operator() in debug mode because
1326 // it checks if this is a valid element
1327 // (in parallel)
1328 return operator()(i, i);
1329# else
1330 // Trilinos doesn't seem to have a
1331 // more efficient way to access the
1332 // diagonal than by just using the
1333 // standard el(i,j) function.
1334 return el(i, i);
1335# endif
1336 }
1337
1338
1339
1340 unsigned int
1342 {
1343 Assert(row < m(), ExcInternalError());
1344
1345 // get a representation of the
1346 // present row
1347 int ncols = -1;
1348 int local_row =
1349 matrix->LRID(static_cast<TrilinosWrappers::types::int_type>(row));
1350 Assert((local_row >= 0), ExcAccessToNonlocalRow(row));
1351
1352 // on the processor who owns this
1353 // row, we'll have a non-negative
1354 // value.
1355 if (local_row >= 0)
1356 {
1357 int ierr = matrix->NumMyRowEntries(local_row, ncols);
1358 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1359 }
1360
1361 return static_cast<unsigned int>(ncols);
1362 }
1363
1364
1365
1366 void
1367 SparseMatrix::set(const std::vector<size_type> & row_indices,
1368 const std::vector<size_type> & col_indices,
1370 const bool elide_zero_values)
1371 {
1372 Assert(row_indices.size() == values.m(),
1373 ExcDimensionMismatch(row_indices.size(), values.m()));
1374 Assert(col_indices.size() == values.n(),
1375 ExcDimensionMismatch(col_indices.size(), values.n()));
1376
1377 for (size_type i = 0; i < row_indices.size(); ++i)
1378 set(row_indices[i],
1379 col_indices.size(),
1380 col_indices.data(),
1381 &values(i, 0),
1382 elide_zero_values);
1383 }
1384
1385
1386
1387 void
1389 const std::vector<size_type> & col_indices,
1390 const std::vector<TrilinosScalar> &values,
1391 const bool elide_zero_values)
1392 {
1393 Assert(col_indices.size() == values.size(),
1394 ExcDimensionMismatch(col_indices.size(), values.size()));
1395
1396 set(row,
1397 col_indices.size(),
1398 col_indices.data(),
1399 values.data(),
1400 elide_zero_values);
1401 }
1402
1403
1404
1405 template <>
1406 void
1407 SparseMatrix::set<TrilinosScalar>(const size_type row,
1408 const size_type n_cols,
1409 const size_type * col_indices,
1410 const TrilinosScalar *values,
1411 const bool elide_zero_values)
1412 {
1413 AssertIndexRange(row, this->m());
1414
1415 int ierr;
1416 if (last_action == Add)
1417 {
1418 ierr =
1419 matrix->GlobalAssemble(*column_space_map, matrix->RowMap(), true);
1420
1421 Assert(ierr == 0, ExcTrilinosError(ierr));
1422 }
1423
1424 last_action = Insert;
1425
1426 const TrilinosWrappers::types::int_type *col_index_ptr;
1427 const TrilinosScalar * col_value_ptr;
1428 const TrilinosWrappers::types::int_type trilinos_row = row;
1430
1431 boost::container::small_vector<TrilinosScalar, 200> local_value_array(
1432 n_cols);
1433 boost::container::small_vector<TrilinosWrappers::types::int_type, 200>
1434 local_index_array(n_cols);
1435
1436 // If we don't elide zeros, the pointers are already available... need to
1437 // cast to non-const pointers as that is the format taken by Trilinos (but
1438 // we will not modify const data)
1439 if (elide_zero_values == false)
1440 {
1441 col_index_ptr =
1442 reinterpret_cast<const TrilinosWrappers::types::int_type *>(
1443 col_indices);
1444 col_value_ptr = values;
1445 n_columns = n_cols;
1446 }
1447 else
1448 {
1449 // Otherwise, extract nonzero values in each row and get the
1450 // respective indices.
1451 col_index_ptr = local_index_array.data();
1452 col_value_ptr = local_value_array.data();
1453
1454 n_columns = 0;
1455 for (size_type j = 0; j < n_cols; ++j)
1456 {
1457 const double value = values[j];
1458 AssertIsFinite(value);
1459 if (value != 0)
1460 {
1461 local_index_array[n_columns] = col_indices[j];
1462 local_value_array[n_columns] = value;
1463 n_columns++;
1464 }
1465 }
1466
1467 AssertIndexRange(n_columns, n_cols + 1);
1468 }
1469
1470
1471 // If the calling matrix owns the row to which we want to insert values,
1472 // we can directly call the Epetra_CrsMatrix input function, which is much
1473 // faster than the Epetra_FECrsMatrix function. We distinguish between two
1474 // cases: the first one is when the matrix is not filled (i.e., it is
1475 // possible to add new elements to the sparsity pattern), and the second
1476 // one is when the pattern is already fixed. In the former case, we add
1477 // the possibility to insert new values, and in the second we just replace
1478 // data.
1479 if (matrix->RowMap().MyGID(
1480 static_cast<TrilinosWrappers::types::int_type>(row)) == true)
1481 {
1482 if (matrix->Filled() == false)
1483 {
1484 ierr = matrix->Epetra_CrsMatrix::InsertGlobalValues(
1485 row, static_cast<int>(n_columns), col_value_ptr, col_index_ptr);
1486
1487 // When inserting elements, we do not want to create exceptions in
1488 // the case when inserting non-local data (since that's what we
1489 // want to do right now).
1490 if (ierr > 0)
1491 ierr = 0;
1492 }
1493 else
1494 ierr = matrix->Epetra_CrsMatrix::ReplaceGlobalValues(row,
1495 n_columns,
1496 col_value_ptr,
1497 col_index_ptr);
1498 }
1499 else
1500 {
1501 // When we're at off-processor data, we have to stick with the
1502 // standard Insert/ReplaceGlobalValues function. Nevertheless, the way
1503 // we call it is the fastest one (any other will lead to repeated
1504 // allocation and deallocation of memory in order to call the function
1505 // we already use, which is very inefficient if writing one element at
1506 // a time).
1507 compressed = false;
1508
1509 if (matrix->Filled() == false)
1510 {
1511 ierr = matrix->InsertGlobalValues(1,
1512 &trilinos_row,
1513 n_columns,
1514 col_index_ptr,
1515 &col_value_ptr,
1516 Epetra_FECrsMatrix::ROW_MAJOR);
1517 if (ierr > 0)
1518 ierr = 0;
1519 }
1520 else
1521 ierr = matrix->ReplaceGlobalValues(1,
1522 &trilinos_row,
1523 n_columns,
1524 col_index_ptr,
1525 &col_value_ptr,
1526 Epetra_FECrsMatrix::ROW_MAJOR);
1527 // use the FECrsMatrix facilities for set even in the case when we
1528 // have explicitly set the off-processor rows because that only works
1529 // properly when adding elements, not when setting them (since we want
1530 // to only touch elements that have been set explicitly, and there is
1531 // no way on the receiving processor to identify them otherwise)
1532 }
1533
1534 Assert(ierr <= 0, ExcAccessToNonPresentElement(row, col_index_ptr[0]));
1535 AssertThrow(ierr >= 0, ExcTrilinosError(ierr));
1536 }
1537
1538
1539
1540 void
1541 SparseMatrix::add(const std::vector<size_type> & indices,
1543 const bool elide_zero_values)
1544 {
1545 Assert(indices.size() == values.m(),
1546 ExcDimensionMismatch(indices.size(), values.m()));
1547 Assert(values.m() == values.n(), ExcNotQuadratic());
1548
1549 for (size_type i = 0; i < indices.size(); ++i)
1550 add(indices[i],
1551 indices.size(),
1552 indices.data(),
1553 &values(i, 0),
1554 elide_zero_values);
1555 }
1556
1557
1558
1559 void
1560 SparseMatrix::add(const std::vector<size_type> & row_indices,
1561 const std::vector<size_type> & col_indices,
1563 const bool elide_zero_values)
1564 {
1565 Assert(row_indices.size() == values.m(),
1566 ExcDimensionMismatch(row_indices.size(), values.m()));
1567 Assert(col_indices.size() == values.n(),
1568 ExcDimensionMismatch(col_indices.size(), values.n()));
1569
1570 for (size_type i = 0; i < row_indices.size(); ++i)
1571 add(row_indices[i],
1572 col_indices.size(),
1573 col_indices.data(),
1574 &values(i, 0),
1575 elide_zero_values);
1576 }
1577
1578
1579
1580 void
1582 const std::vector<size_type> & col_indices,
1583 const std::vector<TrilinosScalar> &values,
1584 const bool elide_zero_values)
1585 {
1586 Assert(col_indices.size() == values.size(),
1587 ExcDimensionMismatch(col_indices.size(), values.size()));
1588
1589 add(row,
1590 col_indices.size(),
1591 col_indices.data(),
1592 values.data(),
1593 elide_zero_values);
1594 }
1595
1596
1597
1598 void
1600 const size_type n_cols,
1601 const size_type * col_indices,
1602 const TrilinosScalar *values,
1603 const bool elide_zero_values,
1604 const bool /*col_indices_are_sorted*/)
1605 {
1606 AssertIndexRange(row, this->m());
1607 int ierr;
1608 if (last_action == Insert)
1609 {
1610 // TODO: this could lead to a dead lock when only one processor
1611 // calls GlobalAssemble.
1612 ierr =
1613 matrix->GlobalAssemble(*column_space_map, matrix->RowMap(), false);
1614
1615 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1616 }
1617
1618 last_action = Add;
1619
1620 const TrilinosWrappers::types::int_type *col_index_ptr;
1621 const TrilinosScalar * col_value_ptr;
1622 const TrilinosWrappers::types::int_type trilinos_row = row;
1624
1625 boost::container::small_vector<TrilinosScalar, 100> local_value_array(
1626 n_cols);
1627 boost::container::small_vector<TrilinosWrappers::types::int_type, 100>
1628 local_index_array(n_cols);
1629
1630 // If we don't elide zeros, the pointers are already available... need to
1631 // cast to non-const pointers as that is the format taken by Trilinos (but
1632 // we will not modify const data)
1633 if (elide_zero_values == false)
1634 {
1635 col_index_ptr =
1636 reinterpret_cast<const TrilinosWrappers::types::int_type *>(
1637 col_indices);
1638 col_value_ptr = values;
1639 n_columns = n_cols;
1640# ifdef DEBUG
1641 for (size_type j = 0; j < n_cols; ++j)
1643# endif
1644 }
1645 else
1646 {
1647 // Otherwise, extract nonzero values in each row and the corresponding
1648 // index.
1649 col_index_ptr = local_index_array.data();
1650 col_value_ptr = local_value_array.data();
1651
1652 n_columns = 0;
1653 for (size_type j = 0; j < n_cols; ++j)
1654 {
1655 const double value = values[j];
1656
1657 AssertIsFinite(value);
1658 if (value != 0)
1659 {
1660 local_index_array[n_columns] = col_indices[j];
1661 local_value_array[n_columns] = value;
1662 ++n_columns;
1663 }
1664 }
1665
1666 AssertIndexRange(n_columns, n_cols + 1);
1667 }
1668 // Exit early if there is nothing to do
1669 if (n_columns == 0)
1670 {
1671 return;
1672 }
1673
1674 // If the calling processor owns the row to which we want to add values, we
1675 // can directly call the Epetra_CrsMatrix input function, which is much
1676 // faster than the Epetra_FECrsMatrix function.
1677 if (matrix->RowMap().MyGID(
1678 static_cast<TrilinosWrappers::types::int_type>(row)) == true)
1679 {
1680 ierr = matrix->Epetra_CrsMatrix::SumIntoGlobalValues(row,
1681 n_columns,
1682 col_value_ptr,
1683 col_index_ptr);
1684 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1685 }
1686 else if (nonlocal_matrix.get() != nullptr)
1687 {
1688 compressed = false;
1689 // this is the case when we have explicitly set the off-processor rows
1690 // and want to create a separate matrix object for them (to retain
1691 // thread-safety)
1692 Assert(nonlocal_matrix->RowMap().LID(
1693 static_cast<TrilinosWrappers::types::int_type>(row)) != -1,
1694 ExcMessage("Attempted to write into off-processor matrix row "
1695 "that has not be specified as being writable upon "
1696 "initialization"));
1697 ierr = nonlocal_matrix->SumIntoGlobalValues(row,
1698 n_columns,
1699 col_value_ptr,
1700 col_index_ptr);
1701 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1702 }
1703 else
1704 {
1705 // When we're at off-processor data, we have to stick with the
1706 // standard SumIntoGlobalValues function. Nevertheless, the way we
1707 // call it is the fastest one (any other will lead to repeated
1708 // allocation and deallocation of memory in order to call the function
1709 // we already use, which is very inefficient if writing one element at
1710 // a time).
1711 compressed = false;
1712
1713 ierr = matrix->SumIntoGlobalValues(1,
1714 &trilinos_row,
1715 n_columns,
1716 col_index_ptr,
1717 &col_value_ptr,
1718 Epetra_FECrsMatrix::ROW_MAJOR);
1719 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1720 }
1721
1722# ifdef DEBUG
1723 if (ierr > 0)
1724 {
1725 std::cout << "------------------------------------------" << std::endl;
1726 std::cout << "Got error " << ierr << " in row " << row << " of proc "
1727 << matrix->RowMap().Comm().MyPID()
1728 << " when trying to add the columns:" << std::endl;
1729 for (TrilinosWrappers::types::int_type i = 0; i < n_columns; ++i)
1730 std::cout << col_index_ptr[i] << " ";
1731 std::cout << std::endl << std::endl;
1732 std::cout << "Matrix row "
1733 << (matrix->RowMap().MyGID(
1734 static_cast<TrilinosWrappers::types::int_type>(row)) ==
1735 false ?
1736 "(nonlocal part)" :
1737 "")
1738 << " has the following indices:" << std::endl;
1739 std::vector<TrilinosWrappers::types::int_type> indices;
1740 const Epetra_CrsGraph * graph =
1741 (nonlocal_matrix.get() != nullptr &&
1742 matrix->RowMap().MyGID(
1743 static_cast<TrilinosWrappers::types::int_type>(row)) == false) ?
1744 &nonlocal_matrix->Graph() :
1745 &matrix->Graph();
1746
1747 indices.resize(graph->NumGlobalIndices(row));
1748 int n_indices = 0;
1749 graph->ExtractGlobalRowCopy(row,
1750 indices.size(),
1751 n_indices,
1752 indices.data());
1753 AssertDimension(n_indices, indices.size());
1754
1755 for (TrilinosWrappers::types::int_type i = 0; i < n_indices; ++i)
1756 std::cout << indices[i] << " ";
1757 std::cout << std::endl << std::endl;
1758 Assert(ierr <= 0, ExcAccessToNonPresentElement(row, col_index_ptr[0]));
1759 }
1760# endif
1761 Assert(ierr >= 0, ExcTrilinosError(ierr));
1762 }
1763
1764
1765
1766 SparseMatrix &
1768 {
1770 compress(
1771 ::VectorOperation::unknown); // TODO: why do we do this? Should we
1772 // not check for is_compressed?
1773
1774 const int ierr = matrix->PutScalar(d);
1775 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1776 if (nonlocal_matrix.get() != nullptr)
1777 nonlocal_matrix->PutScalar(d);
1778
1779 return *this;
1780 }
1781
1782
1783
1784 void
1786 {
1787 AssertDimension(rhs.m(), m());
1788 AssertDimension(rhs.n(), n());
1791 Assert(matrix->RowMap().SameAs(rhs.matrix->RowMap()),
1792 ExcMessage("Can only add matrices with same distribution of rows"));
1793 Assert(matrix->Filled() && rhs.matrix->Filled(),
1794 ExcMessage("Addition of matrices only allowed if matrices are "
1795 "filled, i.e., compress() has been called"));
1796
1797 const bool same_col_map = matrix->ColMap().SameAs(rhs.matrix->ColMap());
1798
1799 for (const auto row : locally_owned_range_indices())
1800 {
1801 const int row_local = matrix->RowMap().LID(
1802 static_cast<TrilinosWrappers::types::int_type>(row));
1803 Assert((row_local >= 0), ExcAccessToNonlocalRow(row));
1804
1805 // First get a view to the matrix columns of both matrices. Note that
1806 // the data is in local index spaces so we need to be careful not only
1807 // to compare column indices in case they are derived from the same
1808 // map.
1809 int n_entries, rhs_n_entries;
1810 TrilinosScalar *value_ptr, *rhs_value_ptr;
1811 int * index_ptr, *rhs_index_ptr;
1812 int ierr = rhs.matrix->ExtractMyRowView(row_local,
1813 rhs_n_entries,
1814 rhs_value_ptr,
1815 rhs_index_ptr);
1816 (void)ierr;
1817 Assert(ierr == 0, ExcTrilinosError(ierr));
1818
1819 ierr =
1820 matrix->ExtractMyRowView(row_local, n_entries, value_ptr, index_ptr);
1821 Assert(ierr == 0, ExcTrilinosError(ierr));
1822 bool expensive_checks = (n_entries != rhs_n_entries || !same_col_map);
1823 if (!expensive_checks)
1824 {
1825 // check if the column indices are the same. If yes, can simply
1826 // copy over the data.
1827 expensive_checks = std::memcmp(static_cast<void *>(index_ptr),
1828 static_cast<void *>(rhs_index_ptr),
1829 sizeof(int) * n_entries) != 0;
1830 if (!expensive_checks)
1831 for (int i = 0; i < n_entries; ++i)
1832 value_ptr[i] += rhs_value_ptr[i] * factor;
1833 }
1834 // Now to the expensive case where we need to check all column indices
1835 // against each other (transformed into global index space) and where
1836 // we need to make sure that all entries we are about to add into the
1837 // lhs matrix actually exist
1838 if (expensive_checks)
1839 {
1840 for (int i = 0; i < rhs_n_entries; ++i)
1841 {
1842 if (rhs_value_ptr[i] == 0.)
1843 continue;
1844 const TrilinosWrappers::types::int_type rhs_global_col =
1845 global_column_index(*rhs.matrix, rhs_index_ptr[i]);
1846 int local_col = matrix->ColMap().LID(rhs_global_col);
1847 int *local_index = Utilities::lower_bound(index_ptr,
1848 index_ptr + n_entries,
1849 local_col);
1850 Assert(local_index != index_ptr + n_entries &&
1851 *local_index == local_col,
1852 ExcMessage(
1853 "Adding the entries from the other matrix "
1854 "failed, because the sparsity pattern "
1855 "of that matrix includes more elements than the "
1856 "calling matrix, which is not allowed."));
1857 value_ptr[local_index - index_ptr] += factor * rhs_value_ptr[i];
1858 }
1859 }
1860 }
1861 }
1862
1863
1864
1865 void
1867 {
1868 // This only flips a flag that tells
1869 // Trilinos that any vmult operation
1870 // should be done with the
1871 // transpose. However, the matrix
1872 // structure is not reset.
1873 int ierr;
1874
1875 if (!matrix->UseTranspose())
1876 {
1877 ierr = matrix->SetUseTranspose(true);
1878 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1879 }
1880 else
1881 {
1882 ierr = matrix->SetUseTranspose(false);
1883 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1884 }
1885 }
1886
1887
1888
1889 SparseMatrix &
1891 {
1892 const int ierr = matrix->Scale(a);
1893 Assert(ierr == 0, ExcTrilinosError(ierr));
1894 (void)ierr; // removes -Wunused-variable in optimized mode
1895
1896 return *this;
1897 }
1898
1899
1900
1901 SparseMatrix &
1903 {
1904 Assert(a != 0, ExcDivideByZero());
1905
1906 const TrilinosScalar factor = 1. / a;
1907
1908 const int ierr = matrix->Scale(factor);
1909 Assert(ierr == 0, ExcTrilinosError(ierr));
1910 (void)ierr; // removes -Wunused-variable in optimized mode
1911
1912 return *this;
1913 }
1914
1915
1916
1919 {
1920 Assert(matrix->Filled(), ExcMatrixNotCompressed());
1921 return matrix->NormOne();
1922 }
1923
1924
1925
1928 {
1929 Assert(matrix->Filled(), ExcMatrixNotCompressed());
1930 return matrix->NormInf();
1931 }
1932
1933
1934
1937 {
1938 Assert(matrix->Filled(), ExcMatrixNotCompressed());
1939 return matrix->NormFrobenius();
1940 }
1941
1942
1943
1944 namespace internal
1945 {
1946 namespace SparseMatrixImplementation
1947 {
1948 template <typename VectorType>
1949 inline void
1950 check_vector_map_equality(const Epetra_CrsMatrix &,
1951 const VectorType &,
1952 const VectorType &)
1953 {}
1954
1955 inline void
1956 check_vector_map_equality(const Epetra_CrsMatrix & m,
1959 {
1960 Assert(in.trilinos_partitioner().SameAs(m.DomainMap()) == true,
1961 ExcMessage(
1962 "Column map of matrix does not fit with vector map!"));
1963 Assert(out.trilinos_partitioner().SameAs(m.RangeMap()) == true,
1964 ExcMessage("Row map of matrix does not fit with vector map!"));
1965 (void)m;
1966 (void)in;
1967 (void)out;
1968 }
1969 } // namespace SparseMatrixImplementation
1970 } // namespace internal
1971
1972
1973 template <typename VectorType>
1974 typename std::enable_if<
1975 std::is_same<typename VectorType::value_type, TrilinosScalar>::value>::type
1976 SparseMatrix::vmult(VectorType &dst, const VectorType &src) const
1977 {
1978 Assert(&src != &dst, ExcSourceEqualsDestination());
1979 Assert(matrix->Filled(), ExcMatrixNotCompressed());
1980 (void)src;
1981 (void)dst;
1982
1984 src,
1985 dst);
1986 const size_type dst_local_size = internal::end(dst) - internal::begin(dst);
1987 AssertDimension(dst_local_size, matrix->RangeMap().NumMyPoints());
1988 const size_type src_local_size = internal::end(src) - internal::begin(src);
1989 AssertDimension(src_local_size, matrix->DomainMap().NumMyPoints());
1990
1991 Epetra_MultiVector tril_dst(
1992 View, matrix->RangeMap(), internal::begin(dst), dst_local_size, 1);
1993 Epetra_MultiVector tril_src(View,
1994 matrix->DomainMap(),
1995 const_cast<TrilinosScalar *>(
1996 internal::begin(src)),
1997 src_local_size,
1998 1);
1999
2000 const int ierr = matrix->Multiply(false, tril_src, tril_dst);
2001 Assert(ierr == 0, ExcTrilinosError(ierr));
2002 (void)ierr; // removes -Wunused-variable in optimized mode
2003 }
2004
2005
2006
2007 template <typename VectorType>
2008 typename std::enable_if<
2009 !std::is_same<typename VectorType::value_type, TrilinosScalar>::value>::type
2010 SparseMatrix::vmult(VectorType & /*dst*/, const VectorType & /*src*/) const
2011 {
2013 }
2014
2015
2016
2017 template <typename VectorType>
2018 typename std::enable_if<
2019 std::is_same<typename VectorType::value_type, TrilinosScalar>::value>::type
2020 SparseMatrix::Tvmult(VectorType &dst, const VectorType &src) const
2021 {
2022 Assert(&src != &dst, ExcSourceEqualsDestination());
2023 Assert(matrix->Filled(), ExcMatrixNotCompressed());
2024
2026 dst,
2027 src);
2028 const size_type dst_local_size = internal::end(dst) - internal::begin(dst);
2029 AssertDimension(dst_local_size, matrix->DomainMap().NumMyPoints());
2030 const size_type src_local_size = internal::end(src) - internal::begin(src);
2031 AssertDimension(src_local_size, matrix->RangeMap().NumMyPoints());
2032
2033 Epetra_MultiVector tril_dst(
2034 View, matrix->DomainMap(), internal::begin(dst), dst_local_size, 1);
2035 Epetra_MultiVector tril_src(View,
2036 matrix->RangeMap(),
2037 const_cast<double *>(internal::begin(src)),
2038 src_local_size,
2039 1);
2040
2041 const int ierr = matrix->Multiply(true, tril_src, tril_dst);
2042 Assert(ierr == 0, ExcTrilinosError(ierr));
2043 (void)ierr; // removes -Wunused-variable in optimized mode
2044 }
2045
2046
2047
2048 template <typename VectorType>
2049 typename std::enable_if<
2050 !std::is_same<typename VectorType::value_type, TrilinosScalar>::value>::type
2051 SparseMatrix::Tvmult(VectorType & /*dst*/, const VectorType & /*src*/) const
2052 {
2054 }
2055
2056
2057
2058 template <typename VectorType>
2059 void
2060 SparseMatrix::vmult_add(VectorType &dst, const VectorType &src) const
2061 {
2062 Assert(&src != &dst, ExcSourceEqualsDestination());
2063
2064 // Reinit a temporary vector with fast argument set, which does not
2065 // overwrite the content (to save time).
2066 VectorType tmp_vector;
2067 tmp_vector.reinit(dst, true);
2068 vmult(tmp_vector, src);
2069 dst += tmp_vector;
2070 }
2071
2072
2073
2074 template <typename VectorType>
2075 void
2076 SparseMatrix::Tvmult_add(VectorType &dst, const VectorType &src) const
2077 {
2078 Assert(&src != &dst, ExcSourceEqualsDestination());
2079
2080 // Reinit a temporary vector with fast argument set, which does not
2081 // overwrite the content (to save time).
2082 VectorType tmp_vector;
2083 tmp_vector.reinit(dst, true);
2084 Tvmult(tmp_vector, src);
2085 dst += tmp_vector;
2086 }
2087
2088
2089
2092 {
2093 Assert(matrix->RowMap().SameAs(matrix->DomainMap()), ExcNotQuadratic());
2094
2095 MPI::Vector temp_vector;
2096 temp_vector.reinit(v, true);
2097
2098 vmult(temp_vector, v);
2099 return temp_vector * v;
2100 }
2101
2102
2103
2106 const MPI::Vector &v) const
2107 {
2108 Assert(matrix->RowMap().SameAs(matrix->DomainMap()), ExcNotQuadratic());
2109
2110 MPI::Vector temp_vector;
2111 temp_vector.reinit(v, true);
2112
2113 vmult(temp_vector, v);
2114 return u * temp_vector;
2115 }
2116
2117
2118
2121 const MPI::Vector &x,
2122 const MPI::Vector &b) const
2123 {
2124 vmult(dst, x);
2125 dst -= b;
2126 dst *= -1.;
2127
2128 return dst.l2_norm();
2129 }
2130
2131
2132
2133 namespace internals
2134 {
2136
2137 void
2138 perform_mmult(const SparseMatrix &inputleft,
2139 const SparseMatrix &inputright,
2140 SparseMatrix & result,
2141 const MPI::Vector & V,
2142 const bool transpose_left)
2143 {
2144 const bool use_vector = (V.size() == inputright.m() ? true : false);
2145 if (transpose_left == false)
2146 {
2147 Assert(inputleft.n() == inputright.m(),
2148 ExcDimensionMismatch(inputleft.n(), inputright.m()));
2149 Assert(inputleft.trilinos_matrix().DomainMap().SameAs(
2150 inputright.trilinos_matrix().RangeMap()),
2151 ExcMessage("Parallel partitioning of A and B does not fit."));
2152 }
2153 else
2154 {
2155 Assert(inputleft.m() == inputright.m(),
2156 ExcDimensionMismatch(inputleft.m(), inputright.m()));
2157 Assert(inputleft.trilinos_matrix().RangeMap().SameAs(
2158 inputright.trilinos_matrix().RangeMap()),
2159 ExcMessage("Parallel partitioning of A and B does not fit."));
2160 }
2161
2162 result.clear();
2163
2164 // create a suitable operator B: in case
2165 // we do not use a vector, all we need to
2166 // do is to set the pointer. Otherwise,
2167 // we insert the data from B, but
2168 // multiply each row with the respective
2169 // vector element.
2170 Teuchos::RCP<Epetra_CrsMatrix> mod_B;
2171 if (use_vector == false)
2172 {
2173 mod_B = Teuchos::rcp(const_cast<Epetra_CrsMatrix *>(
2174 &inputright.trilinos_matrix()),
2175 false);
2176 }
2177 else
2178 {
2179 mod_B = Teuchos::rcp(
2180 new Epetra_CrsMatrix(Copy, inputright.trilinos_sparsity_pattern()),
2181 true);
2182 mod_B->FillComplete(inputright.trilinos_matrix().DomainMap(),
2183 inputright.trilinos_matrix().RangeMap());
2184 Assert(inputright.local_range() == V.local_range(),
2185 ExcMessage("Parallel distribution of matrix B and vector V "
2186 "does not match."));
2187
2188 const int local_N = inputright.local_size();
2189 for (int i = 0; i < local_N; ++i)
2190 {
2191 int N_entries = -1;
2192 double *new_data, *B_data;
2193 mod_B->ExtractMyRowView(i, N_entries, new_data);
2194 inputright.trilinos_matrix().ExtractMyRowView(i,
2195 N_entries,
2196 B_data);
2197 double value = V.trilinos_vector()[0][i];
2198 for (TrilinosWrappers::types::int_type j = 0; j < N_entries; ++j)
2199 new_data[j] = value * B_data[j];
2200 }
2201 }
2202
2203
2204 SparseMatrix tmp_result(transpose_left ?
2205 inputleft.locally_owned_domain_indices() :
2206 inputleft.locally_owned_range_indices(),
2207 inputright.locally_owned_domain_indices(),
2208 inputleft.get_mpi_communicator());
2209
2210# ifdef DEAL_II_TRILINOS_WITH_EPETRAEXT
2211 EpetraExt::MatrixMatrix::Multiply(inputleft.trilinos_matrix(),
2212 transpose_left,
2213 *mod_B,
2214 false,
2215 const_cast<Epetra_CrsMatrix &>(
2216 tmp_result.trilinos_matrix()));
2217# else
2218 Assert(false,
2219 ExcMessage("This function requires that the Trilinos "
2220 "installation found while running the deal.II "
2221 "CMake scripts contains the optional Trilinos "
2222 "package 'EpetraExt'. However, this optional "
2223 "part of Trilinos was not found."));
2224# endif
2225 result.reinit(tmp_result.trilinos_matrix());
2226 }
2227 } // namespace internals
2228
2229
2230 void
2232 const SparseMatrix &B,
2233 const MPI::Vector & V) const
2234 {
2235 internals::perform_mmult(*this, B, C, V, false);
2236 }
2237
2238
2239
2240 void
2242 const SparseMatrix &B,
2243 const MPI::Vector & V) const
2244 {
2245 internals::perform_mmult(*this, B, C, V, true);
2246 }
2247
2248
2249
2250 void
2252 {
2253 Assert(false, ExcNotImplemented());
2254 }
2255
2256
2257
2258 // As of now, no particularly neat
2259 // output is generated in case of
2260 // multiple processors.
2261 void
2262 SparseMatrix::print(std::ostream &out,
2263 const bool print_detailed_trilinos_information) const
2264 {
2265 if (print_detailed_trilinos_information == true)
2266 out << *matrix;
2267 else
2268 {
2269 double *values;
2270 int * indices;
2271 int num_entries;
2272
2273 for (int i = 0; i < matrix->NumMyRows(); ++i)
2274 {
2275 const int ierr =
2276 matrix->ExtractMyRowView(i, num_entries, values, indices);
2277 (void)ierr;
2278 Assert(ierr == 0, ExcTrilinosError(ierr));
2279
2280 for (TrilinosWrappers::types::int_type j = 0; j < num_entries; ++j)
2282 << ","
2284 << ") " << values[j] << std::endl;
2285 }
2286 }
2287
2288 AssertThrow(out, ExcIO());
2289 }
2290
2291
2292
2295 {
2296 size_type static_memory =
2297 sizeof(*this) + sizeof(*matrix) + sizeof(*matrix->Graph().DataPtr());
2298 return (
2300 matrix->NumMyNonzeros() +
2301 sizeof(int) * local_size() + static_memory);
2302 }
2303
2304
2305
2306 MPI_Comm
2308 {
2309# ifdef DEAL_II_WITH_MPI
2310
2311 const Epetra_MpiComm *mpi_comm =
2312 dynamic_cast<const Epetra_MpiComm *>(&matrix->RangeMap().Comm());
2313 Assert(mpi_comm != nullptr, ExcInternalError());
2314 return mpi_comm->Comm();
2315# else
2316
2317 return MPI_COMM_SELF;
2318
2319# endif
2320 }
2321} // namespace TrilinosWrappers
2322
2323
2324namespace TrilinosWrappers
2325{
2326 namespace internal
2327 {
2328 namespace
2329 {
2330# ifndef DEAL_II_WITH_MPI
2331 Epetra_Map
2332 make_serial_Epetra_map(const IndexSet &serial_partitioning)
2333 {
2334 // See IndexSet::make_trilinos_map
2335 return Epetra_Map(
2336 TrilinosWrappers::types::int_type(serial_partitioning.size()),
2337 TrilinosWrappers::types::int_type(serial_partitioning.n_elements()),
2338 0,
2339 Epetra_SerialComm());
2340 }
2341# endif
2342 } // namespace
2343
2344 namespace LinearOperatorImplementation
2345 {
2347 : use_transpose(false)
2348 ,
2349# ifdef DEAL_II_WITH_MPI
2350 communicator(MPI_COMM_SELF)
2351 , domain_map(IndexSet().make_trilinos_map(communicator.Comm()))
2352 , range_map(IndexSet().make_trilinos_map(communicator.Comm()))
2353# else
2354 domain_map(internal::make_serial_Epetra_map(IndexSet()))
2355 , range_map(internal::make_serial_Epetra_map(IndexSet()))
2356# endif
2357 {
2358 vmult = [](Range &, const Domain &) {
2359 Assert(false,
2360 ExcMessage("Uninitialized TrilinosPayload::vmult called "
2361 "(Default constructor)"));
2362 };
2363
2364 Tvmult = [](Domain &, const Range &) {
2365 Assert(false,
2366 ExcMessage("Uninitialized TrilinosPayload::Tvmult called "
2367 "(Default constructor)"));
2368 };
2369
2370 inv_vmult = [](Domain &, const Range &) {
2371 Assert(false,
2372 ExcMessage("Uninitialized TrilinosPayload::inv_vmult called "
2373 "(Default constructor)"));
2374 };
2375
2376 inv_Tvmult = [](Range &, const Domain &) {
2377 Assert(false,
2378 ExcMessage("Uninitialized TrilinosPayload::inv_Tvmult called "
2379 "(Default constructor)"));
2380 };
2381 }
2382
2383
2384
2386 const TrilinosWrappers::SparseMatrix &matrix_exemplar,
2388 : use_transpose(matrix_exemplar.trilinos_matrix().UseTranspose())
2389 ,
2390# ifdef DEAL_II_WITH_MPI
2391 communicator(matrix_exemplar.get_mpi_communicator())
2392 , domain_map(
2393 matrix_exemplar.locally_owned_domain_indices().make_trilinos_map(
2394 communicator.Comm()))
2395 , range_map(
2396 matrix_exemplar.locally_owned_range_indices().make_trilinos_map(
2397 communicator.Comm()))
2398# else
2399 domain_map(internal::make_serial_Epetra_map(
2400 matrix_exemplar.locally_owned_domain_indices()))
2401 , range_map(internal::make_serial_Epetra_map(
2402 matrix_exemplar.locally_owned_range_indices()))
2403# endif
2404 {
2405 vmult = [&matrix_exemplar, &matrix](Range & tril_dst,
2406 const Domain &tril_src) {
2407 // Duplicated from TrilinosWrappers::SparseMatrix::vmult
2408 Assert(&tril_src != &tril_dst,
2410 Assert(matrix.trilinos_matrix().Filled(),
2413 matrix_exemplar.trilinos_matrix(),
2414 tril_src,
2415 tril_dst,
2416 matrix_exemplar.trilinos_matrix().UseTranspose());
2418 matrix.trilinos_matrix(),
2419 tril_src,
2420 tril_dst,
2421 matrix.trilinos_matrix().UseTranspose());
2422
2423 const int ierr = matrix.trilinos_matrix().Apply(tril_src, tril_dst);
2424 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2425 };
2426
2427 Tvmult = [&matrix_exemplar, &matrix](Domain & tril_dst,
2428 const Range &tril_src) {
2429 // Duplicated from TrilinosWrappers::SparseMatrix::Tvmult
2430 Assert(&tril_src != &tril_dst,
2432 Assert(matrix.trilinos_matrix().Filled(),
2435 matrix_exemplar.trilinos_matrix(),
2436 tril_src,
2437 tril_dst,
2438 !matrix_exemplar.trilinos_matrix().UseTranspose());
2440 matrix.trilinos_matrix(),
2441 tril_src,
2442 tril_dst,
2443 !matrix.trilinos_matrix().UseTranspose());
2444
2445 Epetra_CrsMatrix &tril_mtrx_non_const =
2446 const_cast<Epetra_CrsMatrix &>(matrix.trilinos_matrix());
2447 tril_mtrx_non_const.SetUseTranspose(
2448 !matrix.trilinos_matrix().UseTranspose());
2449 const int ierr = matrix.trilinos_matrix().Apply(tril_src, tril_dst);
2450 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2451 tril_mtrx_non_const.SetUseTranspose(
2452 !matrix.trilinos_matrix().UseTranspose());
2453 };
2454
2455 inv_vmult = [](Domain &, const Range &) {
2456 Assert(false,
2457 ExcMessage("Uninitialized TrilinosPayload::inv_vmult called "
2458 "(Matrix constructor with matrix exemplar)"));
2459 };
2460
2461 inv_Tvmult = [](Range &, const Domain &) {
2462 Assert(false,
2463 ExcMessage("Uninitialized TrilinosPayload::inv_Tvmult called "
2464 "(Matrix constructor with matrix exemplar)"));
2465 };
2466 }
2467
2468
2469
2471 const TrilinosWrappers::SparseMatrix & matrix_exemplar,
2472 const TrilinosWrappers::PreconditionBase &preconditioner)
2473 : use_transpose(matrix_exemplar.trilinos_matrix().UseTranspose())
2474 ,
2475# ifdef DEAL_II_WITH_MPI
2476 communicator(matrix_exemplar.get_mpi_communicator())
2477 , domain_map(
2478 matrix_exemplar.locally_owned_domain_indices().make_trilinos_map(
2479 communicator.Comm()))
2480 , range_map(
2481 matrix_exemplar.locally_owned_range_indices().make_trilinos_map(
2482 communicator.Comm()))
2483# else
2484 domain_map(internal::make_serial_Epetra_map(
2485 matrix_exemplar.locally_owned_domain_indices()))
2486 , range_map(internal::make_serial_Epetra_map(
2487 matrix_exemplar.locally_owned_range_indices()))
2488# endif
2489 {
2490 vmult = [&matrix_exemplar, &preconditioner](Range & tril_dst,
2491 const Domain &tril_src) {
2492 // Duplicated from TrilinosWrappers::PreconditionBase::vmult
2493 // as well as from TrilinosWrappers::SparseMatrix::Tvmult
2494 Assert(&tril_src != &tril_dst,
2497 matrix_exemplar.trilinos_matrix(),
2498 tril_src,
2499 tril_dst,
2500 matrix_exemplar.trilinos_matrix().UseTranspose());
2502 preconditioner.trilinos_operator(),
2503 tril_src,
2504 tril_dst,
2505 preconditioner.trilinos_operator().UseTranspose());
2506
2507 const int ierr =
2508 preconditioner.trilinos_operator().ApplyInverse(tril_src, tril_dst);
2509 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2510 };
2511
2512 Tvmult = [&matrix_exemplar, &preconditioner](Domain & tril_dst,
2513 const Range &tril_src) {
2514 // Duplicated from TrilinosWrappers::PreconditionBase::vmult
2515 // as well as from TrilinosWrappers::SparseMatrix::Tvmult
2516 Assert(&tril_src != &tril_dst,
2519 matrix_exemplar.trilinos_matrix(),
2520 tril_src,
2521 tril_dst,
2522 !matrix_exemplar.trilinos_matrix().UseTranspose());
2524 preconditioner.trilinos_operator(),
2525 tril_src,
2526 tril_dst,
2527 !preconditioner.trilinos_operator().UseTranspose());
2528
2529 preconditioner.trilinos_operator().SetUseTranspose(
2530 !preconditioner.trilinos_operator().UseTranspose());
2531 const int ierr =
2532 preconditioner.trilinos_operator().ApplyInverse(tril_src, tril_dst);
2533 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2534 preconditioner.trilinos_operator().SetUseTranspose(
2535 !preconditioner.trilinos_operator().UseTranspose());
2536 };
2537
2538 inv_vmult = [&matrix_exemplar, &preconditioner](Domain & tril_dst,
2539 const Range &tril_src) {
2540 // Duplicated from TrilinosWrappers::PreconditionBase::vmult
2541 // as well as from TrilinosWrappers::SparseMatrix::Tvmult
2542 Assert(&tril_src != &tril_dst,
2545 matrix_exemplar.trilinos_matrix(),
2546 tril_src,
2547 tril_dst,
2548 !matrix_exemplar.trilinos_matrix().UseTranspose());
2550 preconditioner.trilinos_operator(),
2551 tril_src,
2552 tril_dst,
2553 !preconditioner.trilinos_operator().UseTranspose());
2554
2555 const int ierr =
2556 preconditioner.trilinos_operator().ApplyInverse(tril_src, tril_dst);
2557 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2558 };
2559
2560 inv_Tvmult = [&matrix_exemplar,
2561 &preconditioner](Range & tril_dst,
2562 const Domain &tril_src) {
2563 // Duplicated from TrilinosWrappers::PreconditionBase::vmult
2564 // as well as from TrilinosWrappers::SparseMatrix::Tvmult
2565 Assert(&tril_src != &tril_dst,
2568 matrix_exemplar.trilinos_matrix(),
2569 tril_src,
2570 tril_dst,
2571 matrix_exemplar.trilinos_matrix().UseTranspose());
2573 preconditioner.trilinos_operator(),
2574 tril_src,
2575 tril_dst,
2576 preconditioner.trilinos_operator().UseTranspose());
2577
2578 preconditioner.trilinos_operator().SetUseTranspose(
2579 !preconditioner.trilinos_operator().UseTranspose());
2580 const int ierr =
2581 preconditioner.trilinos_operator().ApplyInverse(tril_src, tril_dst);
2582 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2583 preconditioner.trilinos_operator().SetUseTranspose(
2584 !preconditioner.trilinos_operator().UseTranspose());
2585 };
2586 }
2587
2588
2589
2591 const TrilinosWrappers::PreconditionBase &preconditioner_exemplar,
2592 const TrilinosWrappers::PreconditionBase &preconditioner)
2593 : use_transpose(
2594 preconditioner_exemplar.trilinos_operator().UseTranspose())
2595 ,
2596# ifdef DEAL_II_WITH_MPI
2597 communicator(preconditioner_exemplar.get_mpi_communicator())
2598 , domain_map(preconditioner_exemplar.locally_owned_domain_indices()
2599 .make_trilinos_map(communicator.Comm()))
2600 , range_map(preconditioner_exemplar.locally_owned_range_indices()
2601 .make_trilinos_map(communicator.Comm()))
2602# else
2603 domain_map(internal::make_serial_Epetra_map(
2604 preconditioner_exemplar.locally_owned_domain_indices()))
2605 , range_map(internal::make_serial_Epetra_map(
2606 preconditioner_exemplar.locally_owned_range_indices()))
2607# endif
2608 {
2609 vmult = [&preconditioner_exemplar,
2610 &preconditioner](Range &tril_dst, const Domain &tril_src) {
2611 // Duplicated from TrilinosWrappers::PreconditionBase::vmult
2612 // as well as from TrilinosWrappers::SparseMatrix::Tvmult
2613 Assert(&tril_src != &tril_dst,
2616 preconditioner_exemplar.trilinos_operator(),
2617 tril_src,
2618 tril_dst,
2619 preconditioner_exemplar.trilinos_operator().UseTranspose());
2621 preconditioner.trilinos_operator(),
2622 tril_src,
2623 tril_dst,
2624 preconditioner.trilinos_operator().UseTranspose());
2625
2626 const int ierr =
2627 preconditioner.trilinos_operator().Apply(tril_src, tril_dst);
2628 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2629 };
2630
2631 Tvmult = [&preconditioner_exemplar,
2632 &preconditioner](Domain &tril_dst, const Range &tril_src) {
2633 // Duplicated from TrilinosWrappers::PreconditionBase::vmult
2634 // as well as from TrilinosWrappers::SparseMatrix::Tvmult
2635 Assert(&tril_src != &tril_dst,
2638 preconditioner_exemplar.trilinos_operator(),
2639 tril_src,
2640 tril_dst,
2641 !preconditioner_exemplar.trilinos_operator().UseTranspose());
2643 preconditioner.trilinos_operator(),
2644 tril_src,
2645 tril_dst,
2646 !preconditioner.trilinos_operator().UseTranspose());
2647
2648 preconditioner.trilinos_operator().SetUseTranspose(
2649 !preconditioner.trilinos_operator().UseTranspose());
2650 const int ierr =
2651 preconditioner.trilinos_operator().Apply(tril_src, tril_dst);
2652 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2653 preconditioner.trilinos_operator().SetUseTranspose(
2654 !preconditioner.trilinos_operator().UseTranspose());
2655 };
2656
2657 inv_vmult = [&preconditioner_exemplar,
2658 &preconditioner](Domain &tril_dst, const Range &tril_src) {
2659 // Duplicated from TrilinosWrappers::PreconditionBase::vmult
2660 // as well as from TrilinosWrappers::SparseMatrix::Tvmult
2661 Assert(&tril_src != &tril_dst,
2664 preconditioner_exemplar.trilinos_operator(),
2665 tril_src,
2666 tril_dst,
2667 !preconditioner_exemplar.trilinos_operator().UseTranspose());
2669 preconditioner.trilinos_operator(),
2670 tril_src,
2671 tril_dst,
2672 !preconditioner.trilinos_operator().UseTranspose());
2673
2674 const int ierr =
2675 preconditioner.trilinos_operator().ApplyInverse(tril_src, tril_dst);
2676 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2677 };
2678
2679 inv_Tvmult = [&preconditioner_exemplar,
2680 &preconditioner](Range & tril_dst,
2681 const Domain &tril_src) {
2682 // Duplicated from TrilinosWrappers::PreconditionBase::vmult
2683 // as well as from TrilinosWrappers::SparseMatrix::Tvmult
2684 Assert(&tril_src != &tril_dst,
2687 preconditioner_exemplar.trilinos_operator(),
2688 tril_src,
2689 tril_dst,
2690 preconditioner_exemplar.trilinos_operator().UseTranspose());
2692 preconditioner.trilinos_operator(),
2693 tril_src,
2694 tril_dst,
2695 preconditioner.trilinos_operator().UseTranspose());
2696
2697 preconditioner.trilinos_operator().SetUseTranspose(
2698 !preconditioner.trilinos_operator().UseTranspose());
2699 const int ierr =
2700 preconditioner.trilinos_operator().ApplyInverse(tril_src, tril_dst);
2701 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2702 preconditioner.trilinos_operator().SetUseTranspose(
2703 !preconditioner.trilinos_operator().UseTranspose());
2704 };
2705 }
2706
2707
2708
2710 : vmult(payload.vmult)
2711 , Tvmult(payload.Tvmult)
2712 , inv_vmult(payload.inv_vmult)
2713 , inv_Tvmult(payload.inv_Tvmult)
2714 , use_transpose(payload.use_transpose)
2715 , communicator(payload.communicator)
2716 , domain_map(payload.domain_map)
2717 , range_map(payload.range_map)
2718 {}
2719
2720
2721
2722 // Composite copy constructor
2723 // This is required for PackagedOperations
2725 const TrilinosPayload &second_op)
2726 : use_transpose(false)
2727 , // The combination of operators provides the exact
2728 // definition of the operation
2729 communicator(first_op.communicator)
2730 , domain_map(second_op.domain_map)
2731 , range_map(first_op.range_map)
2732 {}
2733
2734
2735
2738 {
2739 TrilinosPayload return_op(*this);
2740
2741 return_op.vmult = [](Range &tril_dst, const Range &tril_src) {
2742 tril_dst = tril_src;
2743 };
2744
2745 return_op.Tvmult = [](Range &tril_dst, const Range &tril_src) {
2746 tril_dst = tril_src;
2747 };
2748
2749 return_op.inv_vmult = [](Range &tril_dst, const Range &tril_src) {
2750 tril_dst = tril_src;
2751 };
2752
2753 return_op.inv_Tvmult = [](Range &tril_dst, const Range &tril_src) {
2754 tril_dst = tril_src;
2755 };
2756
2757 return return_op;
2758 }
2759
2760
2761
2764 {
2765 TrilinosPayload return_op(*this);
2766
2767 return_op.vmult = [](Range &tril_dst, const Domain &) {
2768 const int ierr = tril_dst.PutScalar(0.0);
2769
2770 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2771 };
2772
2773 return_op.Tvmult = [](Domain &tril_dst, const Range &) {
2774 const int ierr = tril_dst.PutScalar(0.0);
2775
2776 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2777 };
2778
2779 return_op.inv_vmult = [](Domain &tril_dst, const Range &) {
2780 AssertThrow(false,
2781 ExcMessage("Cannot compute inverse of null operator"));
2782
2783 const int ierr = tril_dst.PutScalar(0.0);
2784
2785 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2786 };
2787
2788 return_op.inv_Tvmult = [](Range &tril_dst, const Domain &) {
2789 AssertThrow(false,
2790 ExcMessage("Cannot compute inverse of null operator"));
2791
2792 const int ierr = tril_dst.PutScalar(0.0);
2793
2794 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2795 };
2796
2797 return return_op;
2798 }
2799
2800
2801
2804 {
2805 TrilinosPayload return_op(*this);
2806 return_op.transpose();
2807 return return_op;
2808 }
2809
2810
2811
2812 IndexSet
2814 {
2815 return IndexSet(domain_map);
2816 }
2817
2818
2819
2820 IndexSet
2822 {
2823 return IndexSet(range_map);
2824 }
2825
2826
2827
2828 MPI_Comm
2830 {
2831# ifdef DEAL_II_WITH_MPI
2832 return communicator.Comm();
2833# else
2834 return MPI_COMM_SELF;
2835# endif
2836 }
2837
2838
2839
2840 void
2842 {
2844 }
2845
2846
2847
2848 bool
2850 {
2851 return use_transpose;
2852 }
2853
2854
2855
2856 int
2858 {
2860 {
2865 }
2866 return 0;
2867 }
2868
2869
2870
2871 int
2873 {
2874 // The transposedness of the operations is taken care of
2875 // when we hit the transpose flag.
2876 vmult(Y, X);
2877 return 0;
2878 }
2879
2880
2881
2882 int
2884 {
2885 // The transposedness of the operations is taken care of
2886 // when we hit the transpose flag.
2887 inv_vmult(X, Y);
2888 return 0;
2889 }
2890
2891
2892
2893 const char *
2895 {
2896 return "TrilinosPayload";
2897 }
2898
2899
2900
2901 const Epetra_Comm &
2903 {
2904 return communicator;
2905 }
2906
2907
2908
2909 const Epetra_Map &
2911 {
2912 return domain_map;
2913 }
2914
2915
2916
2917 const Epetra_Map &
2919 {
2920 return range_map;
2921 }
2922
2923
2924
2925 bool
2927 {
2928 return false;
2929 }
2930
2931
2932
2933 double
2935 {
2937 return 0.0;
2938 }
2939
2940
2941
2944 const TrilinosPayload &second_op)
2945 {
2946 using Domain = typename TrilinosPayload::Domain;
2947 using Range = typename TrilinosPayload::Range;
2948 using Intermediate = typename TrilinosPayload::VectorType;
2949 using GVMVectorType = TrilinosWrappers::MPI::Vector;
2950
2952 second_op.locally_owned_domain_indices(),
2953 ExcMessage(
2954 "Operators are set to work on incompatible IndexSets."));
2956 second_op.locally_owned_range_indices(),
2957 ExcMessage(
2958 "Operators are set to work on incompatible IndexSets."));
2959
2960 TrilinosPayload return_op(first_op, second_op);
2961
2962 // Capture by copy so the payloads are always valid
2963 return_op.vmult = [first_op, second_op](Range & tril_dst,
2964 const Domain &tril_src) {
2965 // Duplicated from LinearOperator::operator*
2966 // TODO: Template the constructor on GrowingVectorMemory vector type?
2968 VectorMemory<GVMVectorType>::Pointer i(vector_memory);
2969
2970 // Initialize intermediate vector
2971 const Epetra_Map &first_op_init_map = first_op.OperatorDomainMap();
2972 i->reinit(IndexSet(first_op_init_map),
2973 first_op.get_mpi_communicator(),
2974 /*bool omit_zeroing_entries =*/true);
2975
2976 // Duplicated from TrilinosWrappers::SparseMatrix::vmult
2977 const size_type i_local_size = i->end() - i->begin();
2978 AssertDimension(i_local_size, first_op_init_map.NumMyPoints());
2979 const Epetra_Map &second_op_init_map = second_op.OperatorDomainMap();
2980 AssertDimension(i_local_size, second_op_init_map.NumMyPoints());
2981 (void)second_op_init_map;
2982 Intermediate tril_int(View,
2983 first_op_init_map,
2984 const_cast<TrilinosScalar *>(i->begin()),
2985 i_local_size,
2986 1);
2987
2988 // These operators may themselves be transposed or not, so we let them
2989 // decide what the intended outcome is
2990 second_op.Apply(tril_src, tril_int);
2991 first_op.Apply(tril_src, tril_dst);
2992 const int ierr = tril_dst.Update(1.0, tril_int, 1.0);
2993 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2994 };
2995
2996 return_op.Tvmult = [first_op, second_op](Domain & tril_dst,
2997 const Range &tril_src) {
2998 // Duplicated from LinearOperator::operator*
2999 // TODO: Template the constructor on GrowingVectorMemory vector type?
3001 VectorMemory<GVMVectorType>::Pointer i(vector_memory);
3002
3003 // These operators may themselves be transposed or not, so we let them
3004 // decide what the intended outcome is
3005 // We must first transpose the operators to get the right IndexSets
3006 // for the input, intermediate and result vectors
3007 const_cast<TrilinosPayload &>(first_op).transpose();
3008 const_cast<TrilinosPayload &>(second_op).transpose();
3009
3010 // Initialize intermediate vector
3011 const Epetra_Map &first_op_init_map = first_op.OperatorRangeMap();
3012 i->reinit(IndexSet(first_op_init_map),
3013 first_op.get_mpi_communicator(),
3014 /*bool omit_zeroing_entries =*/true);
3015
3016 // Duplicated from TrilinosWrappers::SparseMatrix::vmult
3017 const size_type i_local_size = i->end() - i->begin();
3018 AssertDimension(i_local_size, first_op_init_map.NumMyPoints());
3019 const Epetra_Map &second_op_init_map = second_op.OperatorRangeMap();
3020 AssertDimension(i_local_size, second_op_init_map.NumMyPoints());
3021 (void)second_op_init_map;
3022 Intermediate tril_int(View,
3023 first_op_init_map,
3024 const_cast<TrilinosScalar *>(i->begin()),
3025 i_local_size,
3026 1);
3027
3028 // These operators may themselves be transposed or not, so we let them
3029 // decide what the intended outcome is
3030 second_op.Apply(tril_src, tril_int);
3031 first_op.Apply(tril_src, tril_dst);
3032 const int ierr = tril_dst.Update(1.0, tril_int, 1.0);
3033 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
3034
3035 // Reset transpose flag
3036 const_cast<TrilinosPayload &>(first_op).transpose();
3037 const_cast<TrilinosPayload &>(second_op).transpose();
3038 };
3039
3040 return_op.inv_vmult = [first_op, second_op](Domain & tril_dst,
3041 const Range &tril_src) {
3042 // Duplicated from LinearOperator::operator*
3043 // TODO: Template the constructor on GrowingVectorMemory vector type?
3045 VectorMemory<GVMVectorType>::Pointer i(vector_memory);
3046
3047 // Initialize intermediate vector
3048 const Epetra_Map &first_op_init_map = first_op.OperatorRangeMap();
3049 i->reinit(IndexSet(first_op_init_map),
3050 first_op.get_mpi_communicator(),
3051 /*bool omit_zeroing_entries =*/true);
3052
3053 // Duplicated from TrilinosWrappers::SparseMatrix::vmult
3054 const size_type i_local_size = i->end() - i->begin();
3055 AssertDimension(i_local_size, first_op_init_map.NumMyPoints());
3056 const Epetra_Map &second_op_init_map = second_op.OperatorRangeMap();
3057 AssertDimension(i_local_size, second_op_init_map.NumMyPoints());
3058 (void)second_op_init_map;
3059 Intermediate tril_int(View,
3060 first_op_init_map,
3061 const_cast<TrilinosScalar *>(i->begin()),
3062 i_local_size,
3063 1);
3064
3065 // These operators may themselves be transposed or not, so we let them
3066 // decide what the intended outcome is
3067 second_op.ApplyInverse(tril_src, tril_int);
3068 first_op.ApplyInverse(tril_src, tril_dst);
3069 const int ierr = tril_dst.Update(1.0, tril_int, 1.0);
3070 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
3071 };
3072
3073 return_op.inv_Tvmult = [first_op, second_op](Range & tril_dst,
3074 const Domain &tril_src) {
3075 // Duplicated from LinearOperator::operator*
3076 // TODO: Template the constructor on GrowingVectorMemory vector type?
3078 VectorMemory<GVMVectorType>::Pointer i(vector_memory);
3079
3080 // These operators may themselves be transposed or not, so we let them
3081 // decide what the intended outcome is
3082 // We must first transpose the operators to get the right IndexSets
3083 // for the input, intermediate and result vectors
3084 const_cast<TrilinosPayload &>(first_op).transpose();
3085 const_cast<TrilinosPayload &>(second_op).transpose();
3086
3087 // Initialize intermediate vector
3088 const Epetra_Map &first_op_init_map = first_op.OperatorDomainMap();
3089 i->reinit(IndexSet(first_op_init_map),
3090 first_op.get_mpi_communicator(),
3091 /*bool omit_zeroing_entries =*/true);
3092
3093 // Duplicated from TrilinosWrappers::SparseMatrix::vmult
3094 const size_type i_local_size = i->end() - i->begin();
3095 AssertDimension(i_local_size, first_op_init_map.NumMyPoints());
3096 const Epetra_Map &second_op_init_map = second_op.OperatorDomainMap();
3097 AssertDimension(i_local_size, second_op_init_map.NumMyPoints());
3098 (void)second_op_init_map;
3099 Intermediate tril_int(View,
3100 first_op_init_map,
3101 const_cast<TrilinosScalar *>(i->begin()),
3102 i_local_size,
3103 1);
3104
3105 // These operators may themselves be transposed or not, so we let them
3106 // decide what the intended outcome is
3107 second_op.ApplyInverse(tril_src, tril_int);
3108 first_op.ApplyInverse(tril_src, tril_dst);
3109 const int ierr = tril_dst.Update(1.0, tril_int, 1.0);
3110 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
3111
3112 // Reset transpose flag
3113 const_cast<TrilinosPayload &>(first_op).transpose();
3114 const_cast<TrilinosPayload &>(second_op).transpose();
3115 };
3116
3117 return return_op;
3118 }
3119
3120
3121
3123 const TrilinosPayload &second_op)
3124 {
3125 using Domain = typename TrilinosPayload::Domain;
3126 using Range = typename TrilinosPayload::Range;
3127 using Intermediate = typename TrilinosPayload::VectorType;
3128 using GVMVectorType = TrilinosWrappers::MPI::Vector;
3129
3131 second_op.locally_owned_range_indices(),
3132 ExcMessage(
3133 "Operators are set to work on incompatible IndexSets."));
3134
3135 TrilinosPayload return_op(first_op, second_op);
3136
3137 // Capture by copy so the payloads are always valid
3138 return_op.vmult = [first_op, second_op](Range & tril_dst,
3139 const Domain &tril_src) {
3140 // Duplicated from LinearOperator::operator*
3141 // TODO: Template the constructor on GrowingVectorMemory vector type?
3143 VectorMemory<GVMVectorType>::Pointer i(vector_memory);
3144
3145 // Initialize intermediate vector
3146 const Epetra_Map &first_op_init_map = first_op.OperatorDomainMap();
3147 i->reinit(IndexSet(first_op_init_map),
3148 first_op.get_mpi_communicator(),
3149 /*bool omit_zeroing_entries =*/true);
3150
3151 // Duplicated from TrilinosWrappers::SparseMatrix::vmult
3152 const size_type i_local_size = i->end() - i->begin();
3153 AssertDimension(i_local_size, first_op_init_map.NumMyPoints());
3154 const Epetra_Map &second_op_init_map = second_op.OperatorRangeMap();
3155 AssertDimension(i_local_size, second_op_init_map.NumMyPoints());
3156 (void)second_op_init_map;
3157 Intermediate tril_int(View,
3158 first_op_init_map,
3159 const_cast<TrilinosScalar *>(i->begin()),
3160 i_local_size,
3161 1);
3162
3163 // These operators may themselves be transposed or not, so we let them
3164 // decide what the intended outcome is
3165 second_op.Apply(tril_src, tril_int);
3166 first_op.Apply(tril_int, tril_dst);
3167 };
3168
3169 return_op.Tvmult = [first_op, second_op](Domain & tril_dst,
3170 const Range &tril_src) {
3171 // Duplicated from LinearOperator::operator*
3172 // TODO: Template the constructor on GrowingVectorMemory vector type?
3174 VectorMemory<GVMVectorType>::Pointer i(vector_memory);
3175
3176 // These operators may themselves be transposed or not, so we let them
3177 // decide what the intended outcome is
3178 // We must first transpose the operators to get the right IndexSets
3179 // for the input, intermediate and result vectors
3180 const_cast<TrilinosPayload &>(first_op).transpose();
3181 const_cast<TrilinosPayload &>(second_op).transpose();
3182
3183 // Initialize intermediate vector
3184 const Epetra_Map &first_op_init_map = first_op.OperatorRangeMap();
3185 i->reinit(IndexSet(first_op_init_map),
3186 first_op.get_mpi_communicator(),
3187 /*bool omit_zeroing_entries =*/true);
3188
3189 // Duplicated from TrilinosWrappers::SparseMatrix::vmult
3190 const size_type i_local_size = i->end() - i->begin();
3191 AssertDimension(i_local_size, first_op_init_map.NumMyPoints());
3192 const Epetra_Map &second_op_init_map = second_op.OperatorDomainMap();
3193 AssertDimension(i_local_size, second_op_init_map.NumMyPoints());
3194 (void)second_op_init_map;
3195 Intermediate tril_int(View,
3196 first_op_init_map,
3197 const_cast<TrilinosScalar *>(i->begin()),
3198 i_local_size,
3199 1);
3200
3201 // Apply the operators in the reverse order to vmult
3202 first_op.Apply(tril_src, tril_int);
3203 second_op.Apply(tril_int, tril_dst);
3204
3205 // Reset transpose flag
3206 const_cast<TrilinosPayload &>(first_op).transpose();
3207 const_cast<TrilinosPayload &>(second_op).transpose();
3208 };
3209
3210 return_op.inv_vmult = [first_op, second_op](Domain & tril_dst,
3211 const Range &tril_src) {
3212 // Duplicated from LinearOperator::operator*
3213 // TODO: Template the constructor on GrowingVectorMemory vector type?
3215 VectorMemory<GVMVectorType>::Pointer i(vector_memory);
3216
3217 // Initialize intermediate vector
3218 const Epetra_Map &first_op_init_map = first_op.OperatorRangeMap();
3219 i->reinit(IndexSet(first_op_init_map),
3220 first_op.get_mpi_communicator(),
3221 /*bool omit_zeroing_entries =*/true);
3222
3223 // Duplicated from TrilinosWrappers::SparseMatrix::vmult
3224 const size_type i_local_size = i->end() - i->begin();
3225 AssertDimension(i_local_size, first_op_init_map.NumMyPoints());
3226 const Epetra_Map &second_op_init_map = second_op.OperatorDomainMap();
3227 AssertDimension(i_local_size, second_op_init_map.NumMyPoints());
3228 (void)second_op_init_map;
3229 Intermediate tril_int(View,
3230 first_op_init_map,
3231 const_cast<TrilinosScalar *>(i->begin()),
3232 i_local_size,
3233 1);
3234
3235 // Apply the operators in the reverse order to vmult
3236 // and the same order as Tvmult
3237 first_op.ApplyInverse(tril_src, tril_int);
3238 second_op.ApplyInverse(tril_int, tril_dst);
3239 };
3240
3241 return_op.inv_Tvmult = [first_op, second_op](Range & tril_dst,
3242 const Domain &tril_src) {
3243 // Duplicated from LinearOperator::operator*
3244 // TODO: Template the constructor on GrowingVectorMemory vector type?
3246 VectorMemory<GVMVectorType>::Pointer i(vector_memory);
3247
3248 // These operators may themselves be transposed or not, so we let them
3249 // decide what the intended outcome is
3250 // We must first transpose the operators to get the right IndexSets
3251 // for the input, intermediate and result vectors
3252 const_cast<TrilinosPayload &>(first_op).transpose();
3253 const_cast<TrilinosPayload &>(second_op).transpose();
3254
3255 // Initialize intermediate vector
3256 const Epetra_Map &first_op_init_map = first_op.OperatorDomainMap();
3257 i->reinit(IndexSet(first_op_init_map),
3258 first_op.get_mpi_communicator(),
3259 /*bool omit_zeroing_entries =*/true);
3260
3261 // Duplicated from TrilinosWrappers::SparseMatrix::vmult
3262 const size_type i_local_size = i->end() - i->begin();
3263 AssertDimension(i_local_size, first_op_init_map.NumMyPoints());
3264 const Epetra_Map &second_op_init_map = second_op.OperatorRangeMap();
3265 AssertDimension(i_local_size, second_op_init_map.NumMyPoints());
3266 (void)second_op_init_map;
3267 Intermediate tril_int(View,
3268 first_op_init_map,
3269 const_cast<TrilinosScalar *>(i->begin()),
3270 i_local_size,
3271 1);
3272
3273 // These operators may themselves be transposed or not, so we let them
3274 // decide what the intended outcome is
3275 // Apply the operators in the reverse order to Tvmult
3276 // and the same order as vmult
3277 second_op.ApplyInverse(tril_src, tril_int);
3278 first_op.ApplyInverse(tril_int, tril_dst);
3279
3280 // Reset transpose flag
3281 const_cast<TrilinosPayload &>(first_op).transpose();
3282 const_cast<TrilinosPayload &>(second_op).transpose();
3283 };
3284
3285 return return_op;
3286 }
3287
3288 } // namespace LinearOperatorImplementation
3289 } /* namespace internal */
3290} /* namespace TrilinosWrappers */
3291
3292
3293
3294// explicit instantiations
3295# include "trilinos_sparse_matrix.inst"
3296
3297# ifndef DOXYGEN
3298// TODO: put these instantiations into generic file
3299namespace TrilinosWrappers
3300{
3301 template void
3302 SparseMatrix::reinit(const ::SparsityPattern &);
3303
3304 template void
3306
3307 template void
3309 const IndexSet &,
3310 const ::SparsityPattern &,
3311 const MPI_Comm &,
3312 const bool);
3313
3314 template void
3316 const IndexSet &,
3317 const DynamicSparsityPattern &,
3318 const MPI_Comm &,
3319 const bool);
3320
3321 template void
3323
3324 template void
3326 const ::Vector<double> &) const;
3327
3328 template void
3331 const ::LinearAlgebra::distributed::Vector<double> &) const;
3332
3333# ifdef DEAL_II_WITH_MPI
3334# ifdef DEAL_II_TRILINOS_WITH_TPETRA
3335 template void
3338 const ::LinearAlgebra::TpetraWrappers::Vector<double> &) const;
3339
3340 template void
3343 const ::LinearAlgebra::TpetraWrappers::Vector<float> &) const;
3344# endif
3345
3346 template void
3350# endif
3351
3352 template void
3354
3355 template void
3357 const ::Vector<double> &) const;
3358
3359 template void
3362 const ::LinearAlgebra::distributed::Vector<double> &) const;
3363
3364# ifdef DEAL_II_WITH_MPI
3365# ifdef DEAL_II_TRILINOS_WITH_TPETRA
3366 template void
3369 const ::LinearAlgebra::TpetraWrappers::Vector<double> &) const;
3370
3371 template void
3374 const ::LinearAlgebra::TpetraWrappers::Vector<float> &) const;
3375# endif
3376
3377 template void
3381# endif
3382
3383 template void
3385
3386 template void
3388 const ::Vector<double> &) const;
3389
3390 template void
3393 const ::LinearAlgebra::distributed::Vector<double> &) const;
3394
3395# ifdef DEAL_II_WITH_MPI
3396# ifdef DEAL_II_TRILINOS_WITH_TPETRA
3397 template void
3400 const ::LinearAlgebra::TpetraWrappers::Vector<double> &) const;
3401
3402 template void
3405 const ::LinearAlgebra::TpetraWrappers::Vector<float> &) const;
3406# endif
3407
3408 template void
3412# endif
3413
3414 template void
3416
3417 template void
3419 const ::Vector<double> &) const;
3420
3421 template void
3424 const ::LinearAlgebra::distributed::Vector<double> &) const;
3425
3426# ifdef DEAL_II_WITH_MPI
3427# ifdef DEAL_II_TRILINOS_WITH_TPETRA
3428 template void
3431 const ::LinearAlgebra::TpetraWrappers::Vector<double> &) const;
3432
3433 template void
3436 const ::LinearAlgebra::TpetraWrappers::Vector<float> &) const;
3437# endif
3438
3439 template void
3443# endif
3444} // namespace TrilinosWrappers
3445# endif // DOXYGEN
3446
3448
3449#endif // DEAL_II_WITH_TRILINOS
size_type size() const
Definition: index_set.h:1634
size_type n_elements() const
Definition: index_set.h:1832
bool is_element(const size_type index) const
Definition: index_set.h:1765
Epetra_Map make_trilinos_map(const MPI_Comm &communicator=MPI_COMM_WORLD, const bool overlapping=false) const
Definition: index_set.cc:596
std::shared_ptr< std::vector< TrilinosScalar > > value_cache
std::shared_ptr< std::vector< size_type > > colnum_cache
void set(const size_type i, const size_type j, const TrilinosScalar value)
std::unique_ptr< Epetra_Map > column_space_map
std::enable_if< std::is_same< typenameVectorType::value_type, TrilinosScalar >::value >::type vmult(VectorType &dst, const VectorType &src) const
SparseMatrix & operator*=(const TrilinosScalar factor)
void mmult(SparseMatrix &C, const SparseMatrix &B, const MPI::Vector &V=MPI::Vector()) const
std::unique_ptr< Epetra_Export > nonlocal_matrix_exporter
std::unique_ptr< Epetra_FECrsMatrix > matrix
::types::global_dof_index size_type
const Epetra_CrsMatrix & trilinos_matrix() const
TrilinosScalar matrix_norm_square(const MPI::Vector &v) const
IndexSet locally_owned_range_indices() const
void print(std::ostream &out, const bool write_extended_trilinos_info=false) const
void Tmmult(SparseMatrix &C, const SparseMatrix &B, const MPI::Vector &V=MPI::Vector()) const
void clear_row(const size_type row, const TrilinosScalar new_diag_value=0)
void reinit(const SparsityPatternType &sparsity_pattern)
void vmult_add(VectorType &dst, const VectorType &src) const
SparseMatrix & operator=(const SparseMatrix &)=delete
void compress(::VectorOperation::values operation)
SparseMatrix & operator/=(const TrilinosScalar factor)
void Tvmult_add(VectorType &dst, const VectorType &src) const
void clear_rows(const std::vector< size_type > &rows, const TrilinosScalar new_diag_value=0)
TrilinosScalar el(const size_type i, const size_type j) const
IndexSet locally_owned_domain_indices() const
TrilinosScalar residual(MPI::Vector &dst, const MPI::Vector &x, const MPI::Vector &b) const
bool in_local_range(const size_type index) const
void copy_from(const SparseMatrix &source)
unsigned int row_length(const size_type row) const
const Epetra_CrsGraph & trilinos_sparsity_pattern() const
TrilinosScalar diag_element(const size_type i) const
void add(const size_type i, const size_type j, const TrilinosScalar value)
unsigned int local_size() const
std::enable_if< std::is_same< typenameVectorType::value_type, TrilinosScalar >::value >::type Tvmult(VectorType &dst, const VectorType &src) const
TrilinosScalar operator()(const size_type i, const size_type j) const
size_type n_nonzero_elements() const
TrilinosScalar matrix_scalar_product(const MPI::Vector &u, const MPI::Vector &v) const
std::pair< size_type, size_type > local_range() const
std::unique_ptr< Epetra_CrsMatrix > nonlocal_matrix
const Epetra_Map & domain_partitioner() const
const Epetra_FECrsGraph & trilinos_sparsity_pattern() const
std::function< void(VectorType &, const VectorType &)> inv_Tvmult
virtual int ApplyInverse(const VectorType &Y, VectorType &X) const override
virtual int Apply(const VectorType &X, VectorType &Y) const override
std::function< void(VectorType &, const VectorType &)> inv_vmult
Definition: vector.h:110
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:402
#define DEAL_II_DISABLE_EXTRA_DIAGNOSTICS
Definition: config.h:416
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:403
#define DEAL_II_ENABLE_EXTRA_DIAGNOSTICS
Definition: config.h:454
DerivativeForm< 1, spacedim, dim, Number > transpose(const DerivativeForm< 1, dim, spacedim, Number > &DF)
Point< 2 > second
Definition: grid_out.cc:4588
Point< 2 > first
Definition: grid_out.cc:4587
static ::ExceptionBase & ExcIO()
static ::ExceptionBase & ExcNotImplemented()
static ::ExceptionBase & ExcScalarAssignmentOnlyForZeroValue()
static ::ExceptionBase & ExcInvalidIndex(size_type arg1, size_type arg2)
#define Assert(cond, exc)
Definition: exceptions.h:1465
std::unique_ptr< Epetra_CrsGraph > nonlocal_graph
static ::ExceptionBase & ExcAccessToNonPresentElement(size_type arg1, size_type arg2)
#define AssertIsFinite(number)
Definition: exceptions.h:1721
static ::ExceptionBase & ExcAccessToNonlocalRow(std::size_t arg1)
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1622
#define AssertIndexRange(index, range)
Definition: exceptions.h:1690
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcDivideByZero()
static ::ExceptionBase & ExcSourceEqualsDestination()
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
static ::ExceptionBase & ExcNotQuadratic()
static ::ExceptionBase & ExcMatrixNotCompressed()
static ::ExceptionBase & ExcTrilinosError(int arg1)
virtual void reinit(const size_type m, const size_type n, const ArrayView< const unsigned int > &row_lengths) override
static ::ExceptionBase & ExcAccessToNonLocalElement(size_type arg1, size_type arg2, size_type arg3, size_type arg4)
static ::ExceptionBase & ExcMessage(std::string arg1)
static ::ExceptionBase & ExcTrilinosError(int arg1)
#define AssertThrow(cond, exc)
Definition: exceptions.h:1575
const IndexSet & row_index_set() const
size_type row_length(const size_type row) const
size_type column_number(const size_type row, const size_type index) const
const Epetra_BlockMap & trilinos_partitioner() const
Epetra_Operator & trilinos_operator() const
void reinit(const Vector &v, const bool omit_zeroing_entries=false, const bool allow_different_maps=false)
IndexSet complete_index_set(const IndexSet::size_type N)
Definition: index_set.h:1013
Expression fabs(const Expression &x)
typename ::internal::FEValuesViews::ViewType< dim, spacedim, Extractor >::type View
Definition: fe_values.h:2308
@ matrix
Contents is actually a matrix.
static const char V
void swap(MemorySpaceData< Number, MemorySpace > &, MemorySpaceData< Number, MemorySpace > &)
SymmetricTensor< 2, dim, Number > C(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > b(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
TrilinosPayload operator+(const TrilinosPayload &first_op, const TrilinosPayload &second_op)
TrilinosPayload operator*(const TrilinosPayload &first_op, const TrilinosPayload &second_op)
void check_vector_map_equality(const Epetra_CrsMatrix &, const VectorType &, const VectorType &)
VectorType::value_type * end(VectorType &V)
VectorType::value_type * begin(VectorType &V)
void check_vector_map_equality(const Epetra_CrsMatrix &mtrx, const Epetra_MultiVector &src, const Epetra_MultiVector &dst, const bool transpose)
::types::global_dof_index size_type
void perform_mmult(const SparseMatrix &inputleft, const SparseMatrix &inputright, SparseMatrix &result, const MPI::Vector &V, const bool transpose_left)
TrilinosWrappers::types::int_type min_my_gid(const Epetra_BlockMap &map)
TrilinosWrappers::types::int_type n_global_elements(const Epetra_BlockMap &map)
TrilinosWrappers::types::int_type global_column_index(const Epetra_CrsMatrix &matrix, const ::types::global_dof_index i)
TrilinosWrappers::types::int_type max_my_gid(const Epetra_BlockMap &map)
TrilinosWrappers::types::int_type global_row_index(const Epetra_CrsMatrix &matrix, const ::types::global_dof_index i)
TrilinosWrappers::types::int_type n_global_cols(const Epetra_CrsGraph &graph)
const Epetra_Comm & comm_self()
Definition: utilities.cc:1113
Iterator lower_bound(Iterator first, Iterator last, const T &val)
Definition: utilities.h:1111
Definition: types.h:32
unsigned int global_dof_index
Definition: types.h:76
double TrilinosScalar
Definition: types.h:163