Loading [MathJax]/extensions/TeX/AMSsymbols.js
 deal.II version GIT relicensing-3014-g8ee2161e1e 2025-04-03 17:00:00+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages Concepts
trilinos_sparse_matrix.cc
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2008 - 2024 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
17
18#ifdef DEAL_II_WITH_TRILINOS
19
22
29
30# include <boost/container/small_vector.hpp>
31
32# ifdef DEAL_II_TRILINOS_WITH_EPETRAEXT
33# include <EpetraExt_MatrixMatrix.h>
34# endif
35# include <Epetra_Export.h>
36# include <Teuchos_RCP.hpp>
37# include <ml_epetra_utils.h>
38# include <ml_struct.h>
39
40# include <memory>
41
43
44namespace TrilinosWrappers
45{
46 namespace internal
47 {
48 template <typename VectorType>
49 typename VectorType::value_type *
50 begin(VectorType &V)
51 {
52 return V.begin();
53 }
54
55 template <typename VectorType>
56 const typename VectorType::value_type *
57 begin(const VectorType &V)
58 {
59 return V.begin();
60 }
61
62 template <typename VectorType>
63 typename VectorType::value_type *
64 end(VectorType &V)
65 {
66 return V.end();
67 }
68
69 template <typename VectorType>
70 const typename VectorType::value_type *
71 end(const VectorType &V)
72 {
73 return V.end();
74 }
75
76 template <>
77 double *
79 {
80 return V.trilinos_vector()[0];
81 }
82
83 template <>
84 const double *
86 {
87 return V.trilinos_vector()[0];
88 }
89
90 template <>
91 double *
93 {
94 return V.trilinos_vector()[0] + V.trilinos_vector().MyLength();
95 }
96
97 template <>
98 const double *
100 {
101 return V.trilinos_vector()[0] + V.trilinos_vector().MyLength();
102 }
103
104# ifdef DEAL_II_TRILINOS_WITH_TPETRA
105 template <typename Number, typename MemorySpace>
106 Number *
108 {
109 return V.trilinos_vector().getDataNonConst().get();
110 }
111
112 template <typename Number, typename MemorySpace>
113 const Number *
115 {
116 return V.trilinos_vector().getData().get();
117 }
118
119 template <typename Number, typename MemorySpace>
120 Number *
122 {
123 return V.trilinos_vector().getDataNonConst().get() +
124 V.trilinos_vector().getLocalLength();
125 }
126
127 template <typename Number, typename MemorySpace>
128 const Number *
130 {
131 return V.trilinos_vector().getData().get() +
132 V.trilinos_vector().getLocalLength();
133 }
134# endif
135 } // namespace internal
136
137
138 namespace SparseMatrixIterators
139 {
140 void
142 {
143 // if we are asked to visit the past-the-end line, then simply
144 // release all our caches and go on with life.
145 //
146 // do the same if the row we're supposed to visit is not locally
147 // owned. this is simply going to make non-locally owned rows
148 // look like they're empty
149 if ((this->a_row == matrix->m()) ||
150 (matrix->in_local_range(this->a_row) == false))
151 {
152 colnum_cache.reset();
153 value_cache.reset();
154
155 return;
156 }
157
158 // get a representation of the present row
159 int ncols;
161 matrix->row_length(this->a_row);
162 if (value_cache.get() == nullptr)
163 {
164 value_cache = std::make_shared<std::vector<TrilinosScalar>>(colnums);
165 colnum_cache = std::make_shared<std::vector<size_type>>(colnums);
166 }
167 else
168 {
169 value_cache->resize(colnums);
170 colnum_cache->resize(colnums);
171 }
172
173 const int ierr = matrix->trilinos_matrix().ExtractGlobalRowCopy(
174 this->a_row,
175 colnums,
176 ncols,
177 value_cache->data(),
178 reinterpret_cast<TrilinosWrappers::types::int_type *>(
179 colnum_cache->data()));
180 AssertDimension(ncols, colnums);
181 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
182
183 // copy it into our caches if the
184 // line isn't empty. if it is, then
185 // we've done something wrong, since
186 // we shouldn't have initialized an
187 // iterator for an empty line (what
188 // would it point to?)
189 }
190 } // namespace SparseMatrixIterators
191
192
193 // The constructor is actually the
194 // only point where we have to check
195 // whether we build a serial or a
196 // parallel Trilinos matrix.
197 // Actually, it does not even matter
198 // how many threads there are, but
199 // only if we use an MPI compiler or
200 // a standard compiler. So, even one
201 // thread on a configuration with
202 // MPI will still get a parallel
203 // interface.
205 : column_space_map(new Epetra_Map(0, 0, Utilities::Trilinos::comm_self()))
206 , matrix(
207 new Epetra_FECrsMatrix(View, *column_space_map, *column_space_map, 0))
208 , last_action(Zero)
209 , compressed(true)
210 {
211 matrix->FillComplete();
212 }
213
214
215
217 const size_type n,
218 const unsigned int n_max_entries_per_row)
219 : column_space_map(
220 new Epetra_Map(static_cast<TrilinosWrappers::types::int_type>(n),
221 0,
222 Utilities::Trilinos::comm_self()))
223 ,
224
225 // on one processor only, we know how the
226 // columns of the matrix will be
227 // distributed (everything on one
228 // processor), so we can hand in this
229 // information to the constructor. we
230 // can't do so in parallel, where the
231 // information from columns is only
232 // available when entries have been added
233 matrix(new Epetra_FECrsMatrix(
234 Copy,
235 Epetra_Map(static_cast<TrilinosWrappers::types::int_type>(m),
236 0,
237 Utilities::Trilinos::comm_self()),
238 *column_space_map,
239 n_max_entries_per_row,
240 false))
241 , last_action(Zero)
242 , compressed(false)
243 {}
244
245
246
248 const size_type n,
249 const std::vector<unsigned int> &n_entries_per_row)
250 : column_space_map(
251 new Epetra_Map(static_cast<TrilinosWrappers::types::int_type>(n),
252 0,
253 Utilities::Trilinos::comm_self()))
254 , matrix(new Epetra_FECrsMatrix(
255 Copy,
256 Epetra_Map(static_cast<TrilinosWrappers::types::int_type>(m),
257 0,
258 Utilities::Trilinos::comm_self()),
259 *column_space_map,
260 reinterpret_cast<int *>(
261 const_cast<unsigned int *>(n_entries_per_row.data())),
262 false))
263 , last_action(Zero)
264 , compressed(false)
265 {}
266
267
268
269 SparseMatrix::SparseMatrix(const IndexSet &parallel_partitioning,
270 const MPI_Comm communicator,
271 const unsigned int n_max_entries_per_row)
272 : column_space_map(new Epetra_Map(
273 parallel_partitioning.make_trilinos_map(communicator, false)))
274 , matrix(new Epetra_FECrsMatrix(Copy,
275 *column_space_map,
276 n_max_entries_per_row,
277 false))
278 , last_action(Zero)
279 , compressed(false)
280 {}
281
282
283
284 SparseMatrix::SparseMatrix(const IndexSet &parallel_partitioning,
285 const MPI_Comm communicator,
286 const std::vector<unsigned int> &n_entries_per_row)
287 : column_space_map(new Epetra_Map(
288 parallel_partitioning.make_trilinos_map(communicator, false)))
289 , matrix(new Epetra_FECrsMatrix(Copy,
290 *column_space_map,
291 reinterpret_cast<int *>(
292 const_cast<unsigned int *>(
293 n_entries_per_row.data())),
294 false))
295 , last_action(Zero)
296 , compressed(false)
297 {}
298
299
300
301 SparseMatrix::SparseMatrix(const IndexSet &row_parallel_partitioning,
302 const IndexSet &col_parallel_partitioning,
303 const MPI_Comm communicator,
304 const size_type n_max_entries_per_row)
305 : column_space_map(new Epetra_Map(
306 col_parallel_partitioning.make_trilinos_map(communicator, false)))
307 , matrix(new Epetra_FECrsMatrix(
308 Copy,
309 row_parallel_partitioning.make_trilinos_map(communicator, false),
310 n_max_entries_per_row,
311 false))
312 , last_action(Zero)
313 , compressed(false)
314 {}
315
316
317
318 SparseMatrix::SparseMatrix(const IndexSet &row_parallel_partitioning,
319 const IndexSet &col_parallel_partitioning,
320 const MPI_Comm communicator,
321 const std::vector<unsigned int> &n_entries_per_row)
322 : column_space_map(new Epetra_Map(
323 col_parallel_partitioning.make_trilinos_map(communicator, false)))
324 , matrix(new Epetra_FECrsMatrix(
325 Copy,
326 row_parallel_partitioning.make_trilinos_map(communicator, false),
327 reinterpret_cast<int *>(
328 const_cast<unsigned int *>(n_entries_per_row.data())),
329 false))
330 , last_action(Zero)
331 , compressed(false)
332 {}
333
334
335
337 : column_space_map(new Epetra_Map(sparsity_pattern.domain_partitioner()))
338 , matrix(
339 new Epetra_FECrsMatrix(Copy,
340 sparsity_pattern.trilinos_sparsity_pattern(),
341 false))
342 , last_action(Zero)
343 , compressed(true)
344 {
345 Assert(sparsity_pattern.trilinos_sparsity_pattern().Filled() == true,
347 "The Trilinos sparsity pattern has not been compressed."));
349 }
350
351
352
354 : column_space_map(std::move(other.column_space_map))
355 , matrix(std::move(other.matrix))
356 , nonlocal_matrix(std::move(other.nonlocal_matrix))
357 , nonlocal_matrix_exporter(std::move(other.nonlocal_matrix_exporter))
358 , last_action(other.last_action)
359 , compressed(other.compressed)
360 {
361 other.last_action = Zero;
362 other.compressed = false;
363 }
364
365
366
367 void
369 {
370 if (this == &rhs)
371 return;
372
373 nonlocal_matrix.reset();
375
376 // check whether we need to update the whole matrix layout (we have
377 // different maps or if we detect a row where the columns of the two
378 // matrices do not match)
379 bool needs_deep_copy =
380 !matrix->RowMap().SameAs(rhs.matrix->RowMap()) ||
381 !matrix->ColMap().SameAs(rhs.matrix->ColMap()) ||
382 !matrix->DomainMap().SameAs(rhs.matrix->DomainMap()) ||
384 if (!needs_deep_copy)
385 {
386 // Try to copy all the rows of the matrix one by one. In case of error
387 // (i.e., the column indices are different), we need to abort and blow
388 // away the matrix.
389 for (const auto row : locally_owned_range_indices())
390 {
391 const int row_local = matrix->RowMap().LID(
392 static_cast<TrilinosWrappers::types::int_type>(row));
393 Assert((row_local >= 0), ExcAccessToNonlocalRow(row));
394
395 int n_entries, rhs_n_entries;
396 TrilinosScalar *value_ptr, *rhs_value_ptr;
397 int *index_ptr, *rhs_index_ptr;
398 int ierr = rhs.matrix->ExtractMyRowView(row_local,
399 rhs_n_entries,
400 rhs_value_ptr,
401 rhs_index_ptr);
402 (void)ierr;
403 Assert(ierr == 0, ExcTrilinosError(ierr));
404
405 ierr = matrix->ExtractMyRowView(row_local,
406 n_entries,
407 value_ptr,
408 index_ptr);
409 Assert(ierr == 0, ExcTrilinosError(ierr));
410
411 if (n_entries != rhs_n_entries ||
412 std::memcmp(static_cast<void *>(index_ptr),
413 static_cast<void *>(rhs_index_ptr),
414 sizeof(int) * n_entries) != 0)
415 {
416 needs_deep_copy = true;
417 break;
418 }
419
420 for (int i = 0; i < n_entries; ++i)
421 value_ptr[i] = rhs_value_ptr[i];
422 }
423 }
424
425 if (needs_deep_copy)
426 {
428 std::make_unique<Epetra_Map>(rhs.trilinos_matrix().DomainMap());
429
430 // release memory before reallocation
431 matrix = std::make_unique<Epetra_FECrsMatrix>(*rhs.matrix);
432
433 matrix->FillComplete(*column_space_map, matrix->RowMap());
434 }
435
436 if (rhs.nonlocal_matrix.get() != nullptr)
438 std::make_unique<Epetra_CrsMatrix>(Copy, rhs.nonlocal_matrix->Graph());
439 }
440
441
442
443 namespace
444 {
445 template <typename SparsityPatternType>
446 void
447 reinit_matrix(const IndexSet &row_parallel_partitioning,
448 const IndexSet &column_parallel_partitioning,
449 const SparsityPatternType &sparsity_pattern,
450 const bool exchange_data,
451 const MPI_Comm communicator,
452 std::unique_ptr<Epetra_Map> &column_space_map,
453 std::unique_ptr<Epetra_FECrsMatrix> &matrix,
454 std::unique_ptr<Epetra_CrsMatrix> &nonlocal_matrix,
455 std::unique_ptr<Epetra_Export> &nonlocal_matrix_exporter)
456 {
457 // release memory before reallocation
458 matrix.reset();
459 nonlocal_matrix.reset();
460 nonlocal_matrix_exporter.reset();
461
462 column_space_map = std::make_unique<Epetra_Map>(
463 column_parallel_partitioning.make_trilinos_map(communicator, false));
464
465 if (column_space_map->Comm().MyPID() == 0)
466 {
467 AssertDimension(sparsity_pattern.n_rows(),
468 row_parallel_partitioning.size());
469 AssertDimension(sparsity_pattern.n_cols(),
470 column_parallel_partitioning.size());
471 }
472
473 Epetra_Map row_space_map =
474 row_parallel_partitioning.make_trilinos_map(communicator, false);
475
476 // if we want to exchange data, build a usual Trilinos sparsity pattern
477 // and let that handle the exchange. otherwise, manually create a
478 // CrsGraph, which consumes considerably less memory because it can set
479 // correct number of indices right from the start
480 if (exchange_data)
481 {
482 SparsityPattern trilinos_sparsity;
483 trilinos_sparsity.reinit(row_parallel_partitioning,
484 column_parallel_partitioning,
485 sparsity_pattern,
486 communicator,
487 exchange_data);
488 matrix = std::make_unique<Epetra_FECrsMatrix>(
489 Copy, trilinos_sparsity.trilinos_sparsity_pattern(), false);
490
491 return;
492 }
493
495 row_space_map),
497 row_space_map) +
498 1;
499 std::vector<int> n_entries_per_row(last_row - first_row);
500
501 for (SparseMatrix::size_type row = first_row; row < last_row; ++row)
502 n_entries_per_row[row - first_row] = sparsity_pattern.row_length(row);
503
504 // The deal.II notation of a Sparsity pattern corresponds to the Epetra
505 // concept of a Graph. Hence, we generate a graph by copying the
506 // sparsity pattern into it, and then build up the matrix from the
507 // graph. This is considerable faster than directly filling elements
508 // into the matrix. Moreover, it consumes less memory, since the
509 // internal reordering is done on ints only, and we can leave the
510 // doubles aside.
511
512 // for more than one processor, need to specify only row map first and
513 // let the matrix entries decide about the column map (which says which
514 // columns are present in the matrix, not to be confused with the
515 // col_map that tells how the domain dofs of the matrix will be
516 // distributed). for only one processor, we can directly assign the
517 // columns as well. Compare this with bug # 4123 in the Sandia Bugzilla.
518 std::unique_ptr<Epetra_CrsGraph> graph;
519 if (row_space_map.Comm().NumProc() > 1)
520 graph = std::make_unique<Epetra_CrsGraph>(Copy,
521 row_space_map,
522 n_entries_per_row.data(),
523 true);
524 else
525 graph = std::make_unique<Epetra_CrsGraph>(Copy,
526 row_space_map,
527 *column_space_map,
528 n_entries_per_row.data(),
529 true);
530
531 // This functions assumes that the sparsity pattern sits on all
532 // processors (completely). The parallel version uses an Epetra graph
533 // that is already distributed.
534
535 // now insert the indices
536 std::vector<TrilinosWrappers::types::int_type> row_indices;
537
538 for (SparseMatrix::size_type row = first_row; row < last_row; ++row)
539 {
540 const int row_length = sparsity_pattern.row_length(row);
541 if (row_length == 0)
542 continue;
543
544 row_indices.resize(row_length, -1);
545 {
546 typename SparsityPatternType::iterator p =
547 sparsity_pattern.begin(row);
548 for (SparseMatrix::size_type col = 0;
549 p != sparsity_pattern.end(row);
550 ++p, ++col)
551 row_indices[col] = p->column();
552 }
553 graph->Epetra_CrsGraph::InsertGlobalIndices(row,
554 row_length,
555 row_indices.data());
556 }
557
558 // Eventually, optimize the graph structure (sort indices, make memory
559 // contiguous, etc). note that the documentation of the function indeed
560 // states that we first need to provide the column (domain) map and then
561 // the row (range) map
562 graph->FillComplete(*column_space_map, row_space_map);
563 graph->OptimizeStorage();
564
565 // check whether we got the number of columns right.
566 AssertDimension(sparsity_pattern.n_cols(),
568 (void)n_global_cols;
569
570 // And now finally generate the matrix.
571 matrix = std::make_unique<Epetra_FECrsMatrix>(Copy, *graph, false);
572 }
573
574
575
576 // for the non-local graph, we need to circumvent the problem that some
577 // processors will not add into the non-local graph at all: We do not want
578 // to insert dummy elements on >5000 processors because that gets very
579 // slow. Thus, we set a flag in Epetra_CrsGraph that sets the correct
580 // flag. Since it is protected, we need to expose this information by
581 // deriving a class from Epetra_CrsGraph for the purpose of creating the
582 // data structure
583 class Epetra_CrsGraphMod : public Epetra_CrsGraph
584 {
585 public:
586 Epetra_CrsGraphMod(const Epetra_Map &row_map,
587 const int *n_entries_per_row)
588 : Epetra_CrsGraph(Copy, row_map, n_entries_per_row, true)
589 {}
590
591 void
592 SetIndicesAreGlobal()
593 {
594 this->Epetra_CrsGraph::SetIndicesAreGlobal(true);
595 }
596 };
597
598
599
600 // specialization for DynamicSparsityPattern which can provide us with
601 // more information about the non-locally owned rows
602 template <>
603 void
604 reinit_matrix(const IndexSet &row_parallel_partitioning,
605 const IndexSet &column_parallel_partitioning,
606 const DynamicSparsityPattern &sparsity_pattern,
607 const bool exchange_data,
608 const MPI_Comm communicator,
609 std::unique_ptr<Epetra_Map> &column_space_map,
610 std::unique_ptr<Epetra_FECrsMatrix> &matrix,
611 std::unique_ptr<Epetra_CrsMatrix> &nonlocal_matrix,
612 std::unique_ptr<Epetra_Export> &nonlocal_matrix_exporter)
613 {
614 matrix.reset();
615 nonlocal_matrix.reset();
616 nonlocal_matrix_exporter.reset();
617
618 column_space_map = std::make_unique<Epetra_Map>(
619 column_parallel_partitioning.make_trilinos_map(communicator, false));
620
621 AssertDimension(sparsity_pattern.n_rows(),
622 row_parallel_partitioning.size());
623 AssertDimension(sparsity_pattern.n_cols(),
624 column_parallel_partitioning.size());
625
626 Epetra_Map row_space_map =
627 row_parallel_partitioning.make_trilinos_map(communicator, false);
628
629 IndexSet relevant_rows(sparsity_pattern.row_index_set());
630 // serial case
631 if (relevant_rows.size() == 0)
632 {
633 relevant_rows.set_size(
635 relevant_rows.add_range(
636 0, TrilinosWrappers::n_global_elements(row_space_map));
637 }
638 relevant_rows.compress();
639 Assert(relevant_rows.n_elements() >=
640 static_cast<unsigned int>(row_space_map.NumMyElements()),
642 "Locally relevant rows of sparsity pattern must contain "
643 "all locally owned rows"));
644
645 // check whether the relevant rows correspond to exactly the same map as
646 // the owned rows. In that case, do not create the nonlocal graph and
647 // fill the columns by demand
648 const bool have_ghost_rows = [&]() {
649 const std::vector<::types::global_dof_index> indices =
650 relevant_rows.get_index_vector();
651 Epetra_Map relevant_map(
653 TrilinosWrappers::types::int_type(relevant_rows.n_elements()),
654 (indices.empty() ?
655 nullptr :
656 reinterpret_cast<const TrilinosWrappers::types::int_type *>(
657 indices.data())),
658 0,
659 row_space_map.Comm());
660 return !relevant_map.SameAs(row_space_map);
661 }();
662
663 std::vector<TrilinosWrappers::types::int_type> ghost_rows;
664 std::vector<int> n_entries_per_row(row_space_map.NumMyElements());
665 std::vector<int> n_entries_per_ghost_row;
666 {
668 for (const auto global_row : relevant_rows)
669 {
670 if (row_space_map.MyGID(
671 static_cast<TrilinosWrappers::types::int_type>(global_row)))
672 n_entries_per_row[own++] =
673 sparsity_pattern.row_length(global_row);
674 else if (sparsity_pattern.row_length(global_row) > 0)
675 {
676 ghost_rows.push_back(global_row);
677 n_entries_per_ghost_row.push_back(
678 sparsity_pattern.row_length(global_row));
679 }
680 }
681 }
682
683 Epetra_Map off_processor_map(-1,
684 ghost_rows.size(),
685 (ghost_rows.size() > 0) ?
686 (ghost_rows.data()) :
687 nullptr,
688 0,
689 row_space_map.Comm());
690
691 std::unique_ptr<Epetra_CrsGraph> graph;
692 std::unique_ptr<Epetra_CrsGraphMod> nonlocal_graph;
693 if (row_space_map.Comm().NumProc() > 1)
694 {
695 graph =
696 std::make_unique<Epetra_CrsGraph>(Copy,
697 row_space_map,
698 (n_entries_per_row.size() > 0) ?
699 (n_entries_per_row.data()) :
700 nullptr,
701 exchange_data ? false : true);
702 if (have_ghost_rows == true)
703 nonlocal_graph = std::make_unique<Epetra_CrsGraphMod>(
704 off_processor_map, n_entries_per_ghost_row.data());
705 }
706 else
707 graph =
708 std::make_unique<Epetra_CrsGraph>(Copy,
709 row_space_map,
710 *column_space_map,
711 (n_entries_per_row.size() > 0) ?
712 (n_entries_per_row.data()) :
713 nullptr,
714 true);
715
716 // now insert the indices, select between the right matrix
717 std::vector<TrilinosWrappers::types::int_type> row_indices;
718
719 for (const auto global_row : relevant_rows)
720 {
721 const int row_length = sparsity_pattern.row_length(global_row);
722 if (row_length == 0)
723 continue;
724
725 row_indices.resize(row_length, -1);
726 for (int col = 0; col < row_length; ++col)
727 row_indices[col] = sparsity_pattern.column_number(global_row, col);
728
729 if (row_space_map.MyGID(
730 static_cast<TrilinosWrappers::types::int_type>(global_row)))
731 graph->InsertGlobalIndices(global_row,
732 row_length,
733 row_indices.data());
734 else
735 {
736 Assert(nonlocal_graph.get() != nullptr, ExcInternalError());
737 nonlocal_graph->InsertGlobalIndices(global_row,
738 row_length,
739 row_indices.data());
740 }
741 }
742
743 // finalize nonlocal graph and create nonlocal matrix
744 if (nonlocal_graph.get() != nullptr)
745 {
746 // must make sure the IndicesAreGlobal flag is set on all processors
747 // because some processors might not call InsertGlobalIndices (and
748 // we do not want to insert dummy indices on all processors for
749 // large-scale simulations due to the bad impact on performance)
750 nonlocal_graph->SetIndicesAreGlobal();
751 Assert(nonlocal_graph->IndicesAreGlobal() == true,
753 nonlocal_graph->FillComplete(*column_space_map, row_space_map);
754 nonlocal_graph->OptimizeStorage();
755
756 // insert data from nonlocal graph into the final sparsity pattern
757 if (exchange_data)
758 {
759 Epetra_Export exporter(nonlocal_graph->RowMap(), row_space_map);
760 int ierr = graph->Export(*nonlocal_graph, exporter, Add);
761 (void)ierr;
762 Assert(ierr == 0, ExcTrilinosError(ierr));
763 }
764
765 nonlocal_matrix =
766 std::make_unique<Epetra_CrsMatrix>(Copy, *nonlocal_graph);
767 }
768
769 graph->FillComplete(*column_space_map, row_space_map);
770 graph->OptimizeStorage();
771
772 AssertDimension(sparsity_pattern.n_cols(),
774
775 matrix = std::make_unique<Epetra_FECrsMatrix>(Copy, *graph, false);
776 }
777 } // namespace
778
779
780
781 template <typename SparsityPatternType>
782 void
783 SparseMatrix::reinit(const SparsityPatternType &sparsity_pattern)
784 {
785 reinit_matrix(complete_index_set(sparsity_pattern.n_rows()),
786 complete_index_set(sparsity_pattern.n_cols()),
787 sparsity_pattern,
788 false,
789 MPI_COMM_SELF,
791 matrix,
794 }
795
796
797
798 template <typename SparsityPatternType>
799 inline std::enable_if_t<
800 !std::is_same_v<SparsityPatternType, ::SparseMatrix<double>>>
801 SparseMatrix::reinit(const IndexSet &row_parallel_partitioning,
802 const IndexSet &col_parallel_partitioning,
803 const SparsityPatternType &sparsity_pattern,
804 const MPI_Comm communicator,
805 const bool exchange_data)
806 {
807 reinit_matrix(row_parallel_partitioning,
808 col_parallel_partitioning,
809 sparsity_pattern,
810 exchange_data,
811 communicator,
813 matrix,
816
817 // In the end, the matrix needs to be compressed in order to be really
818 // ready.
819 last_action = Zero;
821 }
822
823
824
825 void
826 SparseMatrix::reinit(const SparsityPattern &sparsity_pattern)
827 {
828 matrix.reset();
830
831 // reinit with a (parallel) Trilinos sparsity pattern.
833 std::make_unique<Epetra_Map>(sparsity_pattern.domain_partitioner());
834 matrix = std::make_unique<Epetra_FECrsMatrix>(
835 Copy, sparsity_pattern.trilinos_sparsity_pattern(), false);
836
837 if (sparsity_pattern.nonlocal_graph.get() != nullptr)
839 std::make_unique<Epetra_CrsMatrix>(Copy,
840 *sparsity_pattern.nonlocal_graph);
841 else
842 nonlocal_matrix.reset();
843
844 last_action = Zero;
846 }
847
848
849
850 void
851 SparseMatrix::reinit(const SparseMatrix &sparse_matrix)
852 {
853 if (this == &sparse_matrix)
854 return;
855
857 std::make_unique<Epetra_Map>(sparse_matrix.trilinos_matrix().DomainMap());
858 matrix.reset();
860 matrix = std::make_unique<Epetra_FECrsMatrix>(
861 Copy, sparse_matrix.trilinos_sparsity_pattern(), false);
862
863 if (sparse_matrix.nonlocal_matrix != nullptr)
864 nonlocal_matrix = std::make_unique<Epetra_CrsMatrix>(
865 Copy, sparse_matrix.nonlocal_matrix->Graph());
866 else
867 nonlocal_matrix.reset();
868
869 last_action = Zero;
871 }
872
873
874
875 template <typename number>
876 inline void
878 const IndexSet &row_parallel_partitioning,
879 const IndexSet &col_parallel_partitioning,
880 const ::SparseMatrix<number> &dealii_sparse_matrix,
881 const MPI_Comm communicator,
882 const double drop_tolerance,
883 const bool copy_values,
884 const ::SparsityPattern *use_this_sparsity)
885 {
886 if (copy_values == false)
887 {
888 // in case we do not copy values, just
889 // call the other function.
890 if (use_this_sparsity == nullptr)
891 reinit(row_parallel_partitioning,
892 col_parallel_partitioning,
893 dealii_sparse_matrix.get_sparsity_pattern(),
894 communicator,
895 false);
896 else
897 reinit(row_parallel_partitioning,
898 col_parallel_partitioning,
899 *use_this_sparsity,
900 communicator,
901 false);
902 return;
903 }
904
905 const size_type n_rows = dealii_sparse_matrix.m();
906
907 AssertDimension(row_parallel_partitioning.size(), n_rows);
908 AssertDimension(col_parallel_partitioning.size(), dealii_sparse_matrix.n());
909
910 const ::SparsityPattern &sparsity_pattern =
911 (use_this_sparsity != nullptr) ?
912 *use_this_sparsity :
913 dealii_sparse_matrix.get_sparsity_pattern();
914
915 if (matrix.get() == nullptr || m() != n_rows ||
916 n_nonzero_elements() != sparsity_pattern.n_nonzero_elements())
917 {
918 reinit(row_parallel_partitioning,
919 col_parallel_partitioning,
920 sparsity_pattern,
921 communicator,
922 false);
923 }
924
925 // fill the values. the same as above: go through all rows of the
926 // matrix, and then all columns. since the sparsity patterns of the
927 // input matrix and the specified sparsity pattern might be different,
928 // need to go through the row for both these sparsity structures
929 // simultaneously in order to really set the correct values.
930 size_type maximum_row_length = matrix->MaxNumEntries();
931 std::vector<size_type> row_indices(maximum_row_length);
932 std::vector<TrilinosScalar> values(maximum_row_length);
933
934 for (size_type row = 0; row < n_rows; ++row)
935 // see if the row is locally stored on this processor
936 if (row_parallel_partitioning.is_element(row) == true)
937 {
938 ::SparsityPattern::iterator select_index =
939 sparsity_pattern.begin(row);
940 typename ::SparseMatrix<number>::const_iterator it =
941 dealii_sparse_matrix.begin(row);
942 size_type col = 0;
943 if (sparsity_pattern.n_rows() == sparsity_pattern.n_cols())
944 {
945 // optimized diagonal
946 AssertDimension(it->column(), row);
947 if (std::fabs(it->value()) > drop_tolerance)
948 {
949 values[col] = it->value();
950 row_indices[col++] = it->column();
951 }
952 ++select_index;
953 ++it;
954 }
955
956 while (it != dealii_sparse_matrix.end(row) &&
957 select_index != sparsity_pattern.end(row))
958 {
959 while (select_index->column() < it->column() &&
960 select_index != sparsity_pattern.end(row))
961 ++select_index;
962 while (it->column() < select_index->column() &&
963 it != dealii_sparse_matrix.end(row))
964 ++it;
965
966 if (it == dealii_sparse_matrix.end(row))
967 break;
968 if (std::fabs(it->value()) > drop_tolerance)
969 {
970 values[col] = it->value();
971 row_indices[col++] = it->column();
972 }
973 ++select_index;
974 ++it;
975 }
976 set(row,
977 col,
978 reinterpret_cast<size_type *>(row_indices.data()),
979 values.data(),
980 false);
981 }
983 }
984
985
986
987 template <typename number>
988 void
990 const ::SparseMatrix<number> &dealii_sparse_matrix,
991 const double drop_tolerance,
992 const bool copy_values,
993 const ::SparsityPattern *use_this_sparsity)
994 {
995 reinit(complete_index_set(dealii_sparse_matrix.m()),
996 complete_index_set(dealii_sparse_matrix.n()),
997 dealii_sparse_matrix,
998 MPI_COMM_SELF,
999 drop_tolerance,
1000 copy_values,
1001 use_this_sparsity);
1002 }
1003
1004
1005
1006 void
1007 SparseMatrix::reinit(const Epetra_CrsMatrix &input_matrix,
1008 const bool copy_values)
1009 {
1010 Assert(input_matrix.Filled() == true,
1011 ExcMessage("Input CrsMatrix has not called FillComplete()!"));
1012
1013 column_space_map = std::make_unique<Epetra_Map>(input_matrix.DomainMap());
1014
1015 const Epetra_CrsGraph *graph = &input_matrix.Graph();
1016
1017 nonlocal_matrix.reset();
1019 matrix.reset();
1020 matrix = std::make_unique<Epetra_FECrsMatrix>(Copy, *graph, false);
1021
1022 matrix->FillComplete(*column_space_map, input_matrix.RangeMap(), true);
1023
1024 if (copy_values == true)
1025 {
1026 // point to the first data entry in the two
1027 // matrices and copy the content
1028 const TrilinosScalar *in_values = input_matrix[0];
1029 TrilinosScalar *values = (*matrix)[0];
1030 const size_type my_nonzeros = input_matrix.NumMyNonzeros();
1031 std::memcpy(values, in_values, my_nonzeros * sizeof(TrilinosScalar));
1032 }
1033
1034 last_action = Zero;
1036 }
1037
1038
1039
1040 void
1042 {
1043 Epetra_CombineMode mode = last_action;
1044 if (last_action == Zero)
1045 {
1046 if ((operation == VectorOperation::add) ||
1047 (operation == VectorOperation::unknown))
1048 mode = Add;
1049 else if (operation == VectorOperation::insert)
1050 mode = Insert;
1051 else
1052 Assert(
1053 false,
1054 ExcMessage(
1055 "compress() can only be called with VectorOperation add, insert, or unknown"));
1056 }
1057 else
1058 {
1059 Assert(
1060 ((last_action == Add) && (operation != VectorOperation::insert)) ||
1061 ((last_action == Insert) && (operation != VectorOperation::add)),
1062 ExcMessage("Operation and argument to compress() do not match"));
1063 }
1064
1065 // flush buffers
1066 int ierr;
1067 if (nonlocal_matrix.get() != nullptr && mode == Add)
1068 {
1069 // do only export in case of an add() operation, otherwise the owning
1070 // processor must have set the correct entry
1071 nonlocal_matrix->FillComplete(*column_space_map, matrix->RowMap());
1072 if (nonlocal_matrix_exporter.get() == nullptr)
1074 std::make_unique<Epetra_Export>(nonlocal_matrix->RowMap(),
1075 matrix->RowMap());
1076 ierr =
1078 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1079 ierr = matrix->FillComplete(*column_space_map, matrix->RowMap());
1080 nonlocal_matrix->PutScalar(0);
1081 }
1082 else
1083 ierr =
1084 matrix->GlobalAssemble(*column_space_map, matrix->RowMap(), true, mode);
1085
1086 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1087
1088 ierr = matrix->OptimizeStorage();
1089 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1090
1091 last_action = Zero;
1092
1093 compressed = true;
1094 }
1095
1096
1097
1098 void
1100 {
1101 // When we clear the matrix, reset
1102 // the pointer and generate an
1103 // empty matrix.
1105 std::make_unique<Epetra_Map>(0, 0, Utilities::Trilinos::comm_self());
1106 matrix = std::make_unique<Epetra_FECrsMatrix>(View, *column_space_map, 0);
1107 nonlocal_matrix.reset();
1109
1110 matrix->FillComplete();
1111
1112 compressed = true;
1113 }
1114
1115
1116
1117 void
1119 const TrilinosScalar new_diag_value)
1120 {
1121 Assert(matrix->Filled() == true, ExcMatrixNotCompressed());
1122
1123 // Only do this on the rows owned
1124 // locally on this processor.
1125 int local_row =
1126 matrix->LRID(static_cast<TrilinosWrappers::types::int_type>(row));
1127 if (local_row >= 0)
1128 {
1129 TrilinosScalar *values;
1130 int *col_indices;
1131 int num_entries;
1132 const int ierr =
1133 matrix->ExtractMyRowView(local_row, num_entries, values, col_indices);
1134 (void)ierr;
1135
1136 Assert(ierr == 0, ExcTrilinosError(ierr));
1137
1138 const std::ptrdiff_t diag_index =
1139 std::find(col_indices, col_indices + num_entries, local_row) -
1140 col_indices;
1141
1142 for (TrilinosWrappers::types::int_type j = 0; j < num_entries; ++j)
1143 if (diag_index != j || new_diag_value == 0)
1144 values[j] = 0.;
1145
1146 if (diag_index != num_entries)
1147 values[diag_index] = new_diag_value;
1148 }
1149 }
1150
1151
1152
1153 void
1155 const TrilinosScalar new_diag_value)
1156 {
1157 for (const auto row : rows)
1158 clear_row(row, new_diag_value);
1159 }
1160
1161
1162
1165 {
1166 // Extract local indices in
1167 // the matrix.
1168 int trilinos_i =
1169 matrix->LRID(static_cast<TrilinosWrappers::types::int_type>(i)),
1170 trilinos_j =
1171 matrix->LCID(static_cast<TrilinosWrappers::types::int_type>(j));
1172 TrilinosScalar value = 0.;
1173
1174 // If the data is not on the
1175 // present processor, we throw
1176 // an exception. This is one of
1177 // the two tiny differences to
1178 // the el(i,j) call, which does
1179 // not throw any assertions.
1180 if (trilinos_i == -1)
1181 {
1182 Assert(false,
1184 i, j, local_range().first, local_range().second - 1));
1185 }
1186 else
1187 {
1188 // Check whether the matrix has
1189 // already been transformed to local
1190 // indices.
1191 Assert(matrix->Filled(), ExcMatrixNotCompressed());
1192
1193 // Prepare pointers for extraction
1194 // of a view of the row.
1195 int nnz_present = matrix->NumMyEntries(trilinos_i);
1196 int nnz_extracted;
1197 int *col_indices;
1198 TrilinosScalar *values;
1199
1200 // Generate the view and make
1201 // sure that we have not generated
1202 // an error.
1203 // TODO Check that col_indices are int and not long long
1204 int ierr = matrix->ExtractMyRowView(trilinos_i,
1205 nnz_extracted,
1206 values,
1207 col_indices);
1208 (void)ierr;
1209 Assert(ierr == 0, ExcTrilinosError(ierr));
1210
1211 Assert(nnz_present == nnz_extracted,
1212 ExcDimensionMismatch(nnz_present, nnz_extracted));
1213
1214 // Search the index where we
1215 // look for the value, and then
1216 // finally get it.
1217 const std::ptrdiff_t local_col_index =
1218 std::find(col_indices, col_indices + nnz_present, trilinos_j) -
1219 col_indices;
1220
1221 // This is actually the only
1222 // difference to the el(i,j)
1223 // function, which means that
1224 // we throw an exception in
1225 // this case instead of just
1226 // returning zero for an
1227 // element that is not present
1228 // in the sparsity pattern.
1229 if (local_col_index == nnz_present)
1230 {
1231 Assert(false, ExcInvalidIndex(i, j));
1232 }
1233 else
1234 value = values[local_col_index];
1235 }
1236
1237 return value;
1238 }
1239
1240
1241
1243 SparseMatrix::el(const size_type i, const size_type j) const
1244 {
1245 // Extract local indices in
1246 // the matrix.
1247 int trilinos_i =
1248 matrix->LRID(static_cast<TrilinosWrappers::types::int_type>(i)),
1249 trilinos_j =
1250 matrix->LCID(static_cast<TrilinosWrappers::types::int_type>(j));
1251 TrilinosScalar value = 0.;
1252
1253 // If the data is not on the
1254 // present processor, we can't
1255 // continue. Just print out zero
1256 // as discussed in the
1257 // documentation of this
1258 // function. if you want error
1259 // checking, use operator().
1260 if ((trilinos_i == -1) || (trilinos_j == -1))
1261 return 0.;
1262 else
1263 {
1264 // Check whether the matrix
1265 // already is transformed to
1266 // local indices.
1267 Assert(matrix->Filled(), ExcMatrixNotCompressed());
1268
1269 // Prepare pointers for extraction
1270 // of a view of the row.
1271 int nnz_present = matrix->NumMyEntries(trilinos_i);
1272 int nnz_extracted;
1273 int *col_indices;
1274 TrilinosScalar *values;
1275
1276 // Generate the view and make
1277 // sure that we have not generated
1278 // an error.
1279 int ierr = matrix->ExtractMyRowView(trilinos_i,
1280 nnz_extracted,
1281 values,
1282 col_indices);
1283 (void)ierr;
1284 Assert(ierr == 0, ExcTrilinosError(ierr));
1285
1286 Assert(nnz_present == nnz_extracted,
1287 ExcDimensionMismatch(nnz_present, nnz_extracted));
1288
1289 // Search the index where we
1290 // look for the value, and then
1291 // finally get it.
1292 const std::ptrdiff_t local_col_index =
1293 std::find(col_indices, col_indices + nnz_present, trilinos_j) -
1294 col_indices;
1295
1296 // This is actually the only
1297 // difference to the () function
1298 // querying (i,j), where we throw an
1299 // exception instead of just
1300 // returning zero for an element
1301 // that is not present in the
1302 // sparsity pattern.
1303 if (local_col_index == nnz_present)
1304 value = 0;
1305 else
1306 value = values[local_col_index];
1307 }
1308
1309 return value;
1310 }
1311
1312
1313
1316 {
1317 Assert(m() == n(), ExcNotQuadratic());
1318
1319 if constexpr (running_in_debug_mode())
1320 {
1321 // use operator() in debug mode because
1322 // it checks if this is a valid element
1323 // (in parallel)
1324 return operator()(i, i);
1325 }
1326 else
1327 {
1328 // Trilinos doesn't seem to have a
1329 // more efficient way to access the
1330 // diagonal than by just using the
1331 // standard el(i,j) function.
1332 return el(i, i);
1333 }
1334 }
1335
1336
1337
1338 unsigned int
1340 {
1341 Assert(row < m(), ExcInternalError());
1342
1343 // get a representation of the
1344 // present row
1345 int ncols = -1;
1346 int local_row =
1347 matrix->LRID(static_cast<TrilinosWrappers::types::int_type>(row));
1348 Assert((local_row >= 0), ExcAccessToNonlocalRow(row));
1349
1350 // on the processor who owns this
1351 // row, we'll have a non-negative
1352 // value.
1353 if (local_row >= 0)
1354 {
1355 int ierr = matrix->NumMyRowEntries(local_row, ncols);
1356 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1357 }
1358
1359 return static_cast<unsigned int>(ncols);
1360 }
1361
1362
1363
1364 void
1365 SparseMatrix::set(const std::vector<size_type> &row_indices,
1366 const std::vector<size_type> &col_indices,
1367 const FullMatrix<TrilinosScalar> &values,
1368 const bool elide_zero_values)
1369 {
1370 Assert(row_indices.size() == values.m(),
1371 ExcDimensionMismatch(row_indices.size(), values.m()));
1372 Assert(col_indices.size() == values.n(),
1373 ExcDimensionMismatch(col_indices.size(), values.n()));
1374
1375 for (size_type i = 0; i < row_indices.size(); ++i)
1376 set(row_indices[i],
1377 col_indices.size(),
1378 col_indices.data(),
1379 &values(i, 0),
1380 elide_zero_values);
1381 }
1382
1383
1384
1385 void
1387 const std::vector<size_type> &col_indices,
1388 const std::vector<TrilinosScalar> &values,
1389 const bool elide_zero_values)
1390 {
1391 Assert(col_indices.size() == values.size(),
1392 ExcDimensionMismatch(col_indices.size(), values.size()));
1393
1394 set(row,
1395 col_indices.size(),
1396 col_indices.data(),
1397 values.data(),
1398 elide_zero_values);
1399 }
1400
1401
1402
1403 template <>
1404 void
1405 SparseMatrix::set<TrilinosScalar>(const size_type row,
1406 const size_type n_cols,
1407 const size_type *col_indices,
1408 const TrilinosScalar *values,
1409 const bool elide_zero_values)
1410 {
1411 AssertIndexRange(row, this->m());
1412
1413 int ierr;
1414 if (last_action == Add)
1415 {
1416 ierr =
1417 matrix->GlobalAssemble(*column_space_map, matrix->RowMap(), true);
1418
1419 Assert(ierr == 0, ExcTrilinosError(ierr));
1420 }
1421
1422 last_action = Insert;
1423
1424 const TrilinosWrappers::types::int_type *col_index_ptr;
1425 const TrilinosScalar *col_value_ptr;
1426 const TrilinosWrappers::types::int_type trilinos_row = row;
1428
1429 boost::container::small_vector<TrilinosScalar, 200> local_value_array(
1430 elide_zero_values ? n_cols : 0);
1431 boost::container::small_vector<TrilinosWrappers::types::int_type, 200>
1432 local_index_array(elide_zero_values ? n_cols : 0);
1433
1434 // If we don't elide zeros, the pointers are already available... need to
1435 // cast to non-const pointers as that is the format taken by Trilinos (but
1436 // we will not modify const data)
1437 if (elide_zero_values == false)
1438 {
1439 col_index_ptr =
1440 reinterpret_cast<const TrilinosWrappers::types::int_type *>(
1441 col_indices);
1442 col_value_ptr = values;
1443 n_columns = n_cols;
1444 }
1445 else
1446 {
1447 // Otherwise, extract nonzero values in each row and get the
1448 // respective indices.
1449 col_index_ptr = local_index_array.data();
1450 col_value_ptr = local_value_array.data();
1451
1452 n_columns = 0;
1453 for (size_type j = 0; j < n_cols; ++j)
1454 {
1455 const double value = values[j];
1456 AssertIsFinite(value);
1457 if (value != 0)
1458 {
1459 local_index_array[n_columns] = col_indices[j];
1460 local_value_array[n_columns] = value;
1461 ++n_columns;
1462 }
1463 }
1464
1465 AssertIndexRange(n_columns, n_cols + 1);
1466 }
1467
1468
1469 // If the calling matrix owns the row to which we want to insert values,
1470 // we can directly call the Epetra_CrsMatrix input function, which is much
1471 // faster than the Epetra_FECrsMatrix function. We distinguish between two
1472 // cases: the first one is when the matrix is not filled (i.e., it is
1473 // possible to add new elements to the sparsity pattern), and the second
1474 // one is when the pattern is already fixed. In the former case, we add
1475 // the possibility to insert new values, and in the second we just replace
1476 // data.
1477 if (matrix->RowMap().MyGID(
1478 static_cast<TrilinosWrappers::types::int_type>(row)) == true)
1479 {
1480 if (matrix->Filled() == false)
1481 {
1482 ierr = matrix->Epetra_CrsMatrix::InsertGlobalValues(
1483 row, static_cast<int>(n_columns), col_value_ptr, col_index_ptr);
1484
1485 // When inserting elements, we do not want to create exceptions in
1486 // the case when inserting non-local data (since that's what we
1487 // want to do right now).
1488 if (ierr > 0)
1489 ierr = 0;
1490 }
1491 else
1492 ierr = matrix->Epetra_CrsMatrix::ReplaceGlobalValues(row,
1493 n_columns,
1494 col_value_ptr,
1495 col_index_ptr);
1496 }
1497 else
1498 {
1499 // When we're at off-processor data, we have to stick with the
1500 // standard Insert/ReplaceGlobalValues function. Nevertheless, the way
1501 // we call it is the fastest one (any other will lead to repeated
1502 // allocation and deallocation of memory in order to call the function
1503 // we already use, which is very inefficient if writing one element at
1504 // a time).
1505 compressed = false;
1506
1507 if (matrix->Filled() == false)
1508 {
1509 ierr = matrix->InsertGlobalValues(1,
1510 &trilinos_row,
1511 n_columns,
1512 col_index_ptr,
1513 &col_value_ptr,
1514 Epetra_FECrsMatrix::ROW_MAJOR);
1515 if (ierr > 0)
1516 ierr = 0;
1517 }
1518 else
1519 ierr = matrix->ReplaceGlobalValues(1,
1520 &trilinos_row,
1521 n_columns,
1522 col_index_ptr,
1523 &col_value_ptr,
1524 Epetra_FECrsMatrix::ROW_MAJOR);
1525 // use the FECrsMatrix facilities for set even in the case when we
1526 // have explicitly set the off-processor rows because that only works
1527 // properly when adding elements, not when setting them (since we want
1528 // to only touch elements that have been set explicitly, and there is
1529 // no way on the receiving processor to identify them otherwise)
1530 }
1531
1532 Assert(ierr <= 0, ExcAccessToNonPresentElement(row, col_index_ptr[0]));
1533 AssertThrow(ierr >= 0, ExcTrilinosError(ierr));
1534 }
1535
1536
1537
1538 void
1539 SparseMatrix::add(const std::vector<size_type> &indices,
1540 const FullMatrix<TrilinosScalar> &values,
1541 const bool elide_zero_values)
1542 {
1543 Assert(indices.size() == values.m(),
1544 ExcDimensionMismatch(indices.size(), values.m()));
1545 Assert(values.m() == values.n(), ExcNotQuadratic());
1546
1547 for (size_type i = 0; i < indices.size(); ++i)
1548 add(indices[i],
1549 indices.size(),
1550 indices.data(),
1551 &values(i, 0),
1552 elide_zero_values);
1553 }
1554
1555
1556
1557 void
1558 SparseMatrix::add(const std::vector<size_type> &row_indices,
1559 const std::vector<size_type> &col_indices,
1560 const FullMatrix<TrilinosScalar> &values,
1561 const bool elide_zero_values)
1562 {
1563 Assert(row_indices.size() == values.m(),
1564 ExcDimensionMismatch(row_indices.size(), values.m()));
1565 Assert(col_indices.size() == values.n(),
1566 ExcDimensionMismatch(col_indices.size(), values.n()));
1567
1568 for (size_type i = 0; i < row_indices.size(); ++i)
1569 add(row_indices[i],
1570 col_indices.size(),
1571 col_indices.data(),
1572 &values(i, 0),
1573 elide_zero_values);
1574 }
1575
1576
1577
1578 void
1580 const std::vector<size_type> &col_indices,
1581 const std::vector<TrilinosScalar> &values,
1582 const bool elide_zero_values)
1583 {
1584 Assert(col_indices.size() == values.size(),
1585 ExcDimensionMismatch(col_indices.size(), values.size()));
1586
1587 add(row,
1588 col_indices.size(),
1589 col_indices.data(),
1590 values.data(),
1591 elide_zero_values);
1592 }
1593
1594
1595
1596 void
1598 const size_type n_cols,
1599 const size_type *col_indices,
1600 const TrilinosScalar *values,
1601 const bool elide_zero_values,
1602 const bool /*col_indices_are_sorted*/)
1603 {
1604 AssertIndexRange(row, this->m());
1605 for (size_type n = 0; n < n_cols; ++n)
1606 AssertIndexRange(col_indices[n], this->n());
1607
1608 int ierr;
1609 if (last_action == Insert)
1610 {
1611 // TODO: this could lead to a dead lock when only one processor
1612 // calls GlobalAssemble.
1613 ierr =
1614 matrix->GlobalAssemble(*column_space_map, matrix->RowMap(), false);
1615
1616 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1617 }
1618
1619 last_action = Add;
1620
1621 const TrilinosWrappers::types::int_type *col_index_ptr;
1622 const TrilinosScalar *col_value_ptr;
1623 const TrilinosWrappers::types::int_type trilinos_row = row;
1625
1626 boost::container::small_vector<TrilinosScalar, 100> local_value_array(
1627 n_cols);
1628 boost::container::small_vector<TrilinosWrappers::types::int_type, 100>
1629 local_index_array(n_cols);
1630
1631 // If we don't elide zeros, the pointers are already available... need to
1632 // cast to non-const pointers as that is the format taken by Trilinos (but
1633 // we will not modify const data)
1634 if (elide_zero_values == false)
1635 {
1636 col_index_ptr =
1637 reinterpret_cast<const TrilinosWrappers::types::int_type *>(
1638 col_indices);
1639 col_value_ptr = values;
1640 n_columns = n_cols;
1641 if constexpr (running_in_debug_mode())
1642 {
1643 for (size_type j = 0; j < n_cols; ++j)
1644 AssertIsFinite(values[j]);
1645 }
1646 }
1647 else
1648 {
1649 // Otherwise, extract nonzero values in each row and the corresponding
1650 // index.
1651 col_index_ptr = local_index_array.data();
1652 col_value_ptr = local_value_array.data();
1653
1654 n_columns = 0;
1655 for (size_type j = 0; j < n_cols; ++j)
1656 {
1657 const double value = values[j];
1658
1659 AssertIsFinite(value);
1660 if (value != 0)
1661 {
1662 local_index_array[n_columns] = col_indices[j];
1663 local_value_array[n_columns] = value;
1664 ++n_columns;
1665 }
1666 }
1667
1668 AssertIndexRange(n_columns, n_cols + 1);
1669 }
1670 // Exit early if there is nothing to do
1671 if (n_columns == 0)
1672 {
1673 return;
1674 }
1675
1676 // If the calling processor owns the row to which we want to add values, we
1677 // can directly call the Epetra_CrsMatrix input function, which is much
1678 // faster than the Epetra_FECrsMatrix function.
1679 if (matrix->RowMap().MyGID(
1680 static_cast<TrilinosWrappers::types::int_type>(row)) == true)
1681 {
1682 ierr = matrix->Epetra_CrsMatrix::SumIntoGlobalValues(row,
1683 n_columns,
1684 col_value_ptr,
1685 col_index_ptr);
1686 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1687 }
1688 else if (nonlocal_matrix.get() != nullptr)
1689 {
1690 compressed = false;
1691 // this is the case when we have explicitly set the off-processor rows
1692 // and want to create a separate matrix object for them (to retain
1693 // thread-safety)
1694 Assert(nonlocal_matrix->RowMap().LID(
1695 static_cast<TrilinosWrappers::types::int_type>(row)) != -1,
1696 ExcMessage("Attempted to write into off-processor matrix row "
1697 "that has not be specified as being writable upon "
1698 "initialization"));
1699 ierr = nonlocal_matrix->SumIntoGlobalValues(row,
1700 n_columns,
1701 col_value_ptr,
1702 col_index_ptr);
1703 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1704 }
1705 else
1706 {
1707 // When we're at off-processor data, we have to stick with the
1708 // standard SumIntoGlobalValues function. Nevertheless, the way we
1709 // call it is the fastest one (any other will lead to repeated
1710 // allocation and deallocation of memory in order to call the function
1711 // we already use, which is very inefficient if writing one element at
1712 // a time).
1713 compressed = false;
1714
1715 ierr = matrix->SumIntoGlobalValues(1,
1716 &trilinos_row,
1717 n_columns,
1718 col_index_ptr,
1719 &col_value_ptr,
1720 Epetra_FECrsMatrix::ROW_MAJOR);
1721 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1722 }
1723
1724 if constexpr (running_in_debug_mode())
1725 {
1726 if (ierr > 0)
1727 {
1728 std::cout << "------------------------------------------"
1729 << std::endl;
1730 std::cout << "Got error " << ierr << " in row " << row
1731 << " of proc " << matrix->RowMap().Comm().MyPID()
1732 << " when trying to add the columns:" << std::endl;
1733 for (TrilinosWrappers::types::int_type i = 0; i < n_columns; ++i)
1734 std::cout << col_index_ptr[i] << " ";
1735 std::cout << std::endl << std::endl;
1736 std::cout << "Matrix row "
1737 << (matrix->RowMap().MyGID(
1739 row)) == false ?
1740 "(nonlocal part)" :
1741 "")
1742 << " has the following indices:" << std::endl;
1743 std::vector<TrilinosWrappers::types::int_type> indices;
1744 const Epetra_CrsGraph *graph =
1745 (nonlocal_matrix.get() != nullptr &&
1746 matrix->RowMap().MyGID(
1747 static_cast<TrilinosWrappers::types::int_type>(row)) ==
1748 false) ?
1749 &nonlocal_matrix->Graph() :
1750 &matrix->Graph();
1751
1752 indices.resize(graph->NumGlobalIndices(row));
1753 int n_indices = 0;
1754 graph->ExtractGlobalRowCopy(row,
1755 indices.size(),
1756 n_indices,
1757 indices.data());
1758 AssertDimension(n_indices, indices.size());
1759
1760 for (TrilinosWrappers::types::int_type i = 0; i < n_indices; ++i)
1761 std::cout << indices[i] << " ";
1762 std::cout << std::endl << std::endl;
1763 Assert(ierr <= 0,
1764 ExcAccessToNonPresentElement(row, col_index_ptr[0]));
1765 }
1766 }
1767 Assert(ierr >= 0, ExcTrilinosError(ierr));
1768 }
1769
1770
1771
1772 SparseMatrix &
1774 {
1775 (void)d;
1777 compress(VectorOperation::unknown); // TODO: why do we do this? Should we
1778 // not check for is_compressed?
1779
1780 // As checked above, we are only allowed to use d==0.0, so pass
1781 // a constant zero (instead of a run-time value 'd' that *happens* to
1782 // have a zero value) to the underlying class in hopes that the compiler
1783 // can optimize this somehow.
1784 const int ierr = matrix->PutScalar(/*d=*/0.0);
1785 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1786
1787 if (nonlocal_matrix.get() != nullptr)
1788 {
1789 const int ierr = nonlocal_matrix->PutScalar(/*d=*/0.0);
1790 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1791 }
1792
1793 return *this;
1794 }
1795
1796
1797
1798 void
1800 {
1801 AssertDimension(rhs.m(), m());
1802 AssertDimension(rhs.n(), n());
1805 Assert(matrix->RowMap().SameAs(rhs.matrix->RowMap()),
1806 ExcMessage("Can only add matrices with same distribution of rows"));
1807 Assert(matrix->Filled() && rhs.matrix->Filled(),
1808 ExcMessage("Addition of matrices only allowed if matrices are "
1809 "filled, i.e., compress() has been called"));
1810
1811 const bool same_col_map = matrix->ColMap().SameAs(rhs.matrix->ColMap());
1812
1813 for (const auto row : locally_owned_range_indices())
1814 {
1815 const int row_local = matrix->RowMap().LID(
1816 static_cast<TrilinosWrappers::types::int_type>(row));
1817 Assert((row_local >= 0), ExcAccessToNonlocalRow(row));
1818
1819 // First get a view to the matrix columns of both matrices. Note that
1820 // the data is in local index spaces so we need to be careful not only
1821 // to compare column indices in case they are derived from the same
1822 // map.
1823 int n_entries, rhs_n_entries;
1824 TrilinosScalar *value_ptr, *rhs_value_ptr;
1825 int *index_ptr, *rhs_index_ptr;
1826 int ierr = rhs.matrix->ExtractMyRowView(row_local,
1827 rhs_n_entries,
1828 rhs_value_ptr,
1829 rhs_index_ptr);
1830 (void)ierr;
1831 Assert(ierr == 0, ExcTrilinosError(ierr));
1832
1833 ierr =
1834 matrix->ExtractMyRowView(row_local, n_entries, value_ptr, index_ptr);
1835 Assert(ierr == 0, ExcTrilinosError(ierr));
1836 bool expensive_checks = (n_entries != rhs_n_entries || !same_col_map);
1837 if (!expensive_checks)
1838 {
1839 // check if the column indices are the same. If yes, can simply
1840 // copy over the data.
1841 expensive_checks = std::memcmp(static_cast<void *>(index_ptr),
1842 static_cast<void *>(rhs_index_ptr),
1843 sizeof(int) * n_entries) != 0;
1844 if (!expensive_checks)
1845 for (int i = 0; i < n_entries; ++i)
1846 value_ptr[i] += rhs_value_ptr[i] * factor;
1847 }
1848 // Now to the expensive case where we need to check all column indices
1849 // against each other (transformed into global index space) and where
1850 // we need to make sure that all entries we are about to add into the
1851 // lhs matrix actually exist
1852 if (expensive_checks)
1853 {
1854 for (int i = 0; i < rhs_n_entries; ++i)
1855 {
1856 if (rhs_value_ptr[i] == 0.)
1857 continue;
1858 const TrilinosWrappers::types::int_type rhs_global_col =
1859 global_column_index(*rhs.matrix, rhs_index_ptr[i]);
1860 int local_col = matrix->ColMap().LID(rhs_global_col);
1861 int *local_index = Utilities::lower_bound(index_ptr,
1862 index_ptr + n_entries,
1863 local_col);
1864 Assert(local_index != index_ptr + n_entries &&
1865 *local_index == local_col,
1866 ExcMessage(
1867 "Adding the entries from the other matrix "
1868 "failed, because the sparsity pattern "
1869 "of that matrix includes more elements than the "
1870 "calling matrix, which is not allowed."));
1871 value_ptr[local_index - index_ptr] += factor * rhs_value_ptr[i];
1872 }
1873 }
1874 }
1875 }
1876
1877
1878
1879 void
1881 {
1882 // This only flips a flag that tells
1883 // Trilinos that any vmult operation
1884 // should be done with the
1885 // transpose. However, the matrix
1886 // structure is not reset.
1887 int ierr;
1888
1889 if (!matrix->UseTranspose())
1890 {
1891 ierr = matrix->SetUseTranspose(true);
1892 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1893 }
1894 else
1895 {
1896 ierr = matrix->SetUseTranspose(false);
1897 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1898 }
1899 }
1900
1901
1902
1903 SparseMatrix &
1905 {
1906 const int ierr = matrix->Scale(a);
1907 Assert(ierr == 0, ExcTrilinosError(ierr));
1908 (void)ierr; // removes -Wunused-variable in optimized mode
1909
1910 return *this;
1911 }
1912
1913
1914
1915 SparseMatrix &
1917 {
1918 Assert(a != 0, ExcDivideByZero());
1919
1920 const TrilinosScalar factor = 1. / a;
1921
1922 const int ierr = matrix->Scale(factor);
1923 Assert(ierr == 0, ExcTrilinosError(ierr));
1924 (void)ierr; // removes -Wunused-variable in optimized mode
1925
1926 return *this;
1927 }
1928
1929
1930
1933 {
1934 Assert(matrix->Filled(), ExcMatrixNotCompressed());
1935 return matrix->NormOne();
1936 }
1937
1938
1939
1942 {
1943 Assert(matrix->Filled(), ExcMatrixNotCompressed());
1944 return matrix->NormInf();
1945 }
1946
1947
1948
1951 {
1952 Assert(matrix->Filled(), ExcMatrixNotCompressed());
1953 return matrix->NormFrobenius();
1954 }
1955
1956
1957
1958 namespace internal
1959 {
1960 namespace SparseMatrixImplementation
1961 {
1962 template <typename VectorType>
1963 inline void
1964 check_vector_map_equality(const Epetra_CrsMatrix &,
1965 const VectorType &,
1966 const VectorType &)
1967 {}
1968
1969 inline void
1970 check_vector_map_equality(const Epetra_CrsMatrix &m,
1973 {
1974 Assert(in.trilinos_partitioner().SameAs(m.DomainMap()) == true,
1975 ExcMessage("The column partitioning of a matrix does not match "
1976 "the partitioning of a vector you are trying to "
1977 "multiply it with. Are you multiplying the "
1978 "matrix with a vector that has ghost elements?"));
1979 Assert(out.trilinos_partitioner().SameAs(m.RangeMap()) == true,
1980 ExcMessage("The row partitioning of a matrix does not match "
1981 "the partitioning of a vector you are trying to "
1982 "put the result of a matrix-vector product in. "
1983 "Are you trying to put the product of the "
1984 "matrix with a vector into a vector that has "
1985 "ghost elements?"));
1986 (void)m;
1987 (void)in;
1988 (void)out;
1989 }
1990 } // namespace SparseMatrixImplementation
1991 } // namespace internal
1992
1993
1994 template <typename VectorType>
1995 std::enable_if_t<
1996 std::is_same_v<typename VectorType::value_type, TrilinosScalar>>
1997 SparseMatrix::vmult(VectorType &dst, const VectorType &src) const
1998 {
1999 Assert(&src != &dst, ExcSourceEqualsDestination());
2000 Assert(matrix->Filled(), ExcMatrixNotCompressed());
2001 (void)src;
2002 (void)dst;
2003
2005 src,
2006 dst);
2007 const size_type dst_local_size = internal::end(dst) - internal::begin(dst);
2008 AssertDimension(dst_local_size, matrix->RangeMap().NumMyPoints());
2009 const size_type src_local_size = internal::end(src) - internal::begin(src);
2010 AssertDimension(src_local_size, matrix->DomainMap().NumMyPoints());
2011
2012 Epetra_MultiVector tril_dst(
2013 View, matrix->RangeMap(), internal::begin(dst), dst_local_size, 1);
2014 Epetra_MultiVector tril_src(View,
2015 matrix->DomainMap(),
2016 const_cast<TrilinosScalar *>(
2017 internal::begin(src)),
2018 src_local_size,
2019 1);
2020
2021 const int ierr = matrix->Multiply(false, tril_src, tril_dst);
2022 Assert(ierr == 0, ExcTrilinosError(ierr));
2023 (void)ierr; // removes -Wunused-variable in optimized mode
2024 }
2025
2026
2027
2028 template <typename VectorType>
2029 std::enable_if_t<
2030 !std::is_same_v<typename VectorType::value_type, TrilinosScalar>>
2031 SparseMatrix::vmult(VectorType & /*dst*/, const VectorType & /*src*/) const
2032 {
2034 }
2035
2036
2037
2038 template <typename VectorType>
2039 std::enable_if_t<
2040 std::is_same_v<typename VectorType::value_type, TrilinosScalar>>
2041 SparseMatrix::Tvmult(VectorType &dst, const VectorType &src) const
2042 {
2043 Assert(&src != &dst, ExcSourceEqualsDestination());
2044 Assert(matrix->Filled(), ExcMatrixNotCompressed());
2045
2047 dst,
2048 src);
2049 const size_type dst_local_size = internal::end(dst) - internal::begin(dst);
2050 AssertDimension(dst_local_size, matrix->DomainMap().NumMyPoints());
2051 const size_type src_local_size = internal::end(src) - internal::begin(src);
2052 AssertDimension(src_local_size, matrix->RangeMap().NumMyPoints());
2053
2054 Epetra_MultiVector tril_dst(
2055 View, matrix->DomainMap(), internal::begin(dst), dst_local_size, 1);
2056 Epetra_MultiVector tril_src(View,
2057 matrix->RangeMap(),
2058 const_cast<double *>(internal::begin(src)),
2059 src_local_size,
2060 1);
2061
2062 const int ierr = matrix->Multiply(true, tril_src, tril_dst);
2063 Assert(ierr == 0, ExcTrilinosError(ierr));
2064 (void)ierr; // removes -Wunused-variable in optimized mode
2065 }
2066
2067
2068
2069 template <typename VectorType>
2070 std::enable_if_t<
2071 !std::is_same_v<typename VectorType::value_type, TrilinosScalar>>
2072 SparseMatrix::Tvmult(VectorType & /*dst*/, const VectorType & /*src*/) const
2073 {
2075 }
2076
2077
2078
2079 template <typename VectorType>
2080 void
2081 SparseMatrix::vmult_add(VectorType &dst, const VectorType &src) const
2082 {
2083 Assert(&src != &dst, ExcSourceEqualsDestination());
2084
2085 // Reinit a temporary vector with fast argument set, which does not
2086 // overwrite the content (to save time).
2087 VectorType tmp_vector;
2088 tmp_vector.reinit(dst, true);
2089 vmult(tmp_vector, src);
2090 dst += tmp_vector;
2091 }
2092
2093
2094
2095 template <typename VectorType>
2096 void
2097 SparseMatrix::Tvmult_add(VectorType &dst, const VectorType &src) const
2098 {
2099 Assert(&src != &dst, ExcSourceEqualsDestination());
2100
2101 // Reinit a temporary vector with fast argument set, which does not
2102 // overwrite the content (to save time).
2103 VectorType tmp_vector;
2104 tmp_vector.reinit(dst, true);
2105 Tvmult(tmp_vector, src);
2106 dst += tmp_vector;
2107 }
2108
2109
2110
2113 {
2114 AssertDimension(m(), v.size());
2115 Assert(matrix->RowMap().SameAs(matrix->DomainMap()), ExcNotQuadratic());
2116
2117 MPI::Vector temp_vector;
2118 temp_vector.reinit(v, true);
2119
2120 vmult(temp_vector, v);
2121 return temp_vector * v;
2122 }
2123
2124
2125
2128 const MPI::Vector &v) const
2129 {
2130 AssertDimension(m(), u.size());
2131 AssertDimension(m(), v.size());
2132 Assert(matrix->RowMap().SameAs(matrix->DomainMap()), ExcNotQuadratic());
2133
2134 MPI::Vector temp_vector;
2135 temp_vector.reinit(v, true);
2136
2137 vmult(temp_vector, v);
2138 return u * temp_vector;
2139 }
2140
2141
2142
2143 namespace internals
2144 {
2145 void
2146 perform_mmult(const SparseMatrix &inputleft,
2147 const SparseMatrix &inputright,
2148 SparseMatrix &result,
2149 const MPI::Vector &V,
2150 const bool transpose_left)
2151 {
2152 const bool use_vector = (V.size() == inputright.m() ? true : false);
2153 if (transpose_left == false)
2154 {
2155 Assert(inputleft.n() == inputright.m(),
2156 ExcDimensionMismatch(inputleft.n(), inputright.m()));
2157 Assert(inputleft.trilinos_matrix().DomainMap().SameAs(
2158 inputright.trilinos_matrix().RangeMap()),
2159 ExcMessage("Parallel partitioning of A and B does not fit."));
2160 }
2161 else
2162 {
2163 Assert(inputleft.m() == inputright.m(),
2164 ExcDimensionMismatch(inputleft.m(), inputright.m()));
2165 Assert(inputleft.trilinos_matrix().RangeMap().SameAs(
2166 inputright.trilinos_matrix().RangeMap()),
2167 ExcMessage("Parallel partitioning of A and B does not fit."));
2168 }
2169
2170 result.clear();
2171
2172 // create a suitable operator B: in case
2173 // we do not use a vector, all we need to
2174 // do is to set the pointer. Otherwise,
2175 // we insert the data from B, but
2176 // multiply each row with the respective
2177 // vector element.
2178 Teuchos::RCP<Epetra_CrsMatrix> mod_B;
2179 if (use_vector == false)
2180 {
2181 mod_B = Teuchos::rcp(const_cast<Epetra_CrsMatrix *>(
2182 &inputright.trilinos_matrix()),
2183 false);
2184 }
2185 else
2186 {
2187 mod_B = Teuchos::rcp(
2188 new Epetra_CrsMatrix(Copy, inputright.trilinos_sparsity_pattern()),
2189 true);
2190 mod_B->FillComplete(inputright.trilinos_matrix().DomainMap(),
2191 inputright.trilinos_matrix().RangeMap());
2192 Assert(inputright.local_range() == V.local_range(),
2193 ExcMessage("Parallel distribution of matrix B and vector V "
2194 "does not match."));
2195
2196 const int local_N = inputright.local_size();
2197 for (int i = 0; i < local_N; ++i)
2198 {
2199 int N_entries = -1;
2200 double *new_data, *B_data;
2201 mod_B->ExtractMyRowView(i, N_entries, new_data);
2202 inputright.trilinos_matrix().ExtractMyRowView(i,
2203 N_entries,
2204 B_data);
2205 double value = V.trilinos_vector()[0][i];
2206 for (TrilinosWrappers::types::int_type j = 0; j < N_entries; ++j)
2207 new_data[j] = value * B_data[j];
2208 }
2209 }
2210
2211
2212 SparseMatrix tmp_result(transpose_left ?
2213 inputleft.locally_owned_domain_indices() :
2214 inputleft.locally_owned_range_indices(),
2215 inputright.locally_owned_domain_indices(),
2216 inputleft.get_mpi_communicator());
2217
2218# ifdef DEAL_II_TRILINOS_WITH_EPETRAEXT
2219 EpetraExt::MatrixMatrix::Multiply(inputleft.trilinos_matrix(),
2220 transpose_left,
2221 *mod_B,
2222 false,
2223 const_cast<Epetra_CrsMatrix &>(
2224 tmp_result.trilinos_matrix()));
2225# else
2226 Assert(false,
2227 ExcMessage("This function requires that the Trilinos "
2228 "installation found while running the deal.II "
2229 "CMake scripts contains the optional Trilinos "
2230 "package 'EpetraExt'. However, this optional "
2231 "part of Trilinos was not found."));
2232# endif
2233 result.reinit(tmp_result.trilinos_matrix());
2234 }
2235 } // namespace internals
2236
2237
2238 void
2240 const SparseMatrix &B,
2241 const MPI::Vector &V) const
2242 {
2243 internals::perform_mmult(*this, B, C, V, false);
2244 }
2245
2246
2247
2248 void
2250 const SparseMatrix &B,
2251 const MPI::Vector &V) const
2252 {
2253 internals::perform_mmult(*this, B, C, V, true);
2254 }
2255
2256
2257
2258 void
2263
2264
2265
2266 // As of now, no particularly neat
2267 // output is generated in case of
2268 // multiple processors.
2269 void
2270 SparseMatrix::print(std::ostream &out,
2271 const bool print_detailed_trilinos_information) const
2272 {
2273 if (print_detailed_trilinos_information == true)
2274 out << *matrix;
2275 else
2276 {
2277 double *values;
2278 int *indices;
2279 int num_entries;
2280
2281 for (int i = 0; i < matrix->NumMyRows(); ++i)
2282 {
2283 const int ierr =
2284 matrix->ExtractMyRowView(i, num_entries, values, indices);
2285 (void)ierr;
2286 Assert(ierr == 0, ExcTrilinosError(ierr));
2287
2288 for (TrilinosWrappers::types::int_type j = 0; j < num_entries; ++j)
2290 << ","
2292 << ") " << values[j] << std::endl;
2293 }
2294 }
2295
2296 AssertThrow(out.fail() == false, ExcIO());
2297 }
2298
2299
2300
2303 {
2304 size_type static_memory =
2305 sizeof(*this) + sizeof(*matrix) + sizeof(*matrix->Graph().DataPtr());
2306 return (
2308 matrix->NumMyNonzeros() +
2309 sizeof(int) * local_size() + static_memory);
2310 }
2311
2312
2313
2314 MPI_Comm
2316 {
2317 const Epetra_MpiComm *mpi_comm =
2318 dynamic_cast<const Epetra_MpiComm *>(&matrix->RangeMap().Comm());
2319 Assert(mpi_comm != nullptr, ExcInternalError());
2320 return mpi_comm->Comm();
2321 }
2322} // namespace TrilinosWrappers
2323
2324
2325namespace TrilinosWrappers
2326{
2327 namespace internal
2328 {
2329 namespace LinearOperatorImplementation
2330 {
2332
2334 : use_transpose(false)
2335 , communicator(MPI_COMM_SELF)
2336 , domain_map(IndexSet().make_trilinos_map(communicator.Comm()))
2337 , range_map(IndexSet().make_trilinos_map(communicator.Comm()))
2338 {
2339 vmult = [](Range &, const Domain &) {
2340 Assert(false,
2341 ExcMessage("Uninitialized TrilinosPayload::vmult called "
2342 "(Default constructor)"));
2343 };
2344
2345 Tvmult = [](Domain &, const Range &) {
2346 Assert(false,
2347 ExcMessage("Uninitialized TrilinosPayload::Tvmult called "
2348 "(Default constructor)"));
2349 };
2350
2351 inv_vmult = [](Domain &, const Range &) {
2352 Assert(false,
2353 ExcMessage("Uninitialized TrilinosPayload::inv_vmult called "
2354 "(Default constructor)"));
2355 };
2356
2357 inv_Tvmult = [](Range &, const Domain &) {
2358 Assert(false,
2359 ExcMessage("Uninitialized TrilinosPayload::inv_Tvmult called "
2360 "(Default constructor)"));
2361 };
2362 }
2363
2364
2365
2367 const TrilinosWrappers::SparseMatrix &matrix_exemplar,
2368 const TrilinosWrappers::SparseMatrix &matrix)
2369 : TrilinosPayload(const_cast<Epetra_CrsMatrix &>(
2370 matrix.trilinos_matrix()),
2371 /*op_supports_inverse_operations = */ false,
2372 matrix_exemplar.trilinos_matrix().UseTranspose(),
2373 matrix_exemplar.get_mpi_communicator(),
2374 matrix_exemplar.locally_owned_domain_indices(),
2375 matrix_exemplar.locally_owned_range_indices())
2376 {}
2377
2378
2379
2381 const TrilinosPayload &payload_exemplar,
2382 const TrilinosWrappers::SparseMatrix &matrix)
2383
2384 : TrilinosPayload(const_cast<Epetra_CrsMatrix &>(
2385 matrix.trilinos_matrix()),
2386 /*op_supports_inverse_operations = */ false,
2387 payload_exemplar.UseTranspose(),
2388 payload_exemplar.get_mpi_communicator(),
2389 payload_exemplar.locally_owned_domain_indices(),
2390 payload_exemplar.locally_owned_range_indices())
2391 {}
2392
2393
2394
2396 const TrilinosWrappers::SparseMatrix &matrix_exemplar,
2397 const TrilinosWrappers::PreconditionBase &preconditioner)
2398 : TrilinosPayload(preconditioner.trilinos_operator(),
2399 /*op_supports_inverse_operations = */ true,
2400 matrix_exemplar.trilinos_matrix().UseTranspose(),
2401 matrix_exemplar.get_mpi_communicator(),
2402 matrix_exemplar.locally_owned_domain_indices(),
2403 matrix_exemplar.locally_owned_range_indices())
2404 {}
2405
2406
2407
2409 const TrilinosWrappers::PreconditionBase &preconditioner_exemplar,
2410 const TrilinosWrappers::PreconditionBase &preconditioner)
2412 preconditioner.trilinos_operator(),
2413 /*op_supports_inverse_operations = */ true,
2414 preconditioner_exemplar.trilinos_operator().UseTranspose(),
2415 preconditioner_exemplar.get_mpi_communicator(),
2416 preconditioner_exemplar.locally_owned_domain_indices(),
2417 preconditioner_exemplar.locally_owned_range_indices())
2418 {}
2419
2420
2421
2423 const TrilinosPayload &payload_exemplar,
2424 const TrilinosWrappers::PreconditionBase &preconditioner)
2425 : TrilinosPayload(preconditioner.trilinos_operator(),
2426 /*op_supports_inverse_operations = */ true,
2427 payload_exemplar.UseTranspose(),
2428 payload_exemplar.get_mpi_communicator(),
2429 payload_exemplar.locally_owned_domain_indices(),
2430 payload_exemplar.locally_owned_range_indices())
2431 {}
2432
2433
2434
2436 : vmult(payload.vmult)
2437 , Tvmult(payload.Tvmult)
2438 , inv_vmult(payload.inv_vmult)
2439 , inv_Tvmult(payload.inv_Tvmult)
2440 , use_transpose(payload.use_transpose)
2441 , communicator(payload.communicator)
2442 , domain_map(payload.domain_map)
2443 , range_map(payload.range_map)
2444 {}
2445
2446
2447
2448 // Composite copy constructor
2449 // This is required for PackagedOperations
2451 const TrilinosPayload &second_op)
2452 : use_transpose(false)
2453 , // The combination of operators provides the exact
2454 // definition of the operation
2455 communicator(first_op.communicator)
2456 , domain_map(second_op.domain_map)
2457 , range_map(first_op.range_map)
2458 {}
2459
2460
2461
2464 {
2465 TrilinosPayload return_op(*this);
2466
2467 return_op.vmult = [](Range &tril_dst, const Range &tril_src) {
2468 tril_dst = tril_src;
2469 };
2470
2471 return_op.Tvmult = [](Range &tril_dst, const Range &tril_src) {
2472 tril_dst = tril_src;
2473 };
2474
2475 return_op.inv_vmult = [](Range &tril_dst, const Range &tril_src) {
2476 tril_dst = tril_src;
2477 };
2478
2479 return_op.inv_Tvmult = [](Range &tril_dst, const Range &tril_src) {
2480 tril_dst = tril_src;
2481 };
2482
2483 return return_op;
2484 }
2485
2486
2487
2490 {
2491 TrilinosPayload return_op(*this);
2492
2493 return_op.vmult = [](Range &tril_dst, const Domain &) {
2494 const int ierr = tril_dst.PutScalar(0.0);
2495
2496 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2497 };
2498
2499 return_op.Tvmult = [](Domain &tril_dst, const Range &) {
2500 const int ierr = tril_dst.PutScalar(0.0);
2501
2502 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2503 };
2504
2505 return_op.inv_vmult = [](Domain &tril_dst, const Range &) {
2506 AssertThrow(false,
2507 ExcMessage("Cannot compute inverse of null operator"));
2508
2509 const int ierr = tril_dst.PutScalar(0.0);
2510
2511 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2512 };
2513
2514 return_op.inv_Tvmult = [](Range &tril_dst, const Domain &) {
2515 AssertThrow(false,
2516 ExcMessage("Cannot compute inverse of null operator"));
2517
2518 const int ierr = tril_dst.PutScalar(0.0);
2519
2520 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2521 };
2522
2523 return return_op;
2524 }
2525
2526
2527
2530 {
2531 TrilinosPayload return_op(*this);
2532 return_op.transpose();
2533 return return_op;
2534 }
2535
2536
2537
2538 IndexSet
2543
2544
2545
2546 IndexSet
2551
2552
2553
2554 MPI_Comm
2556 {
2557 return communicator.Comm();
2558 }
2559
2560
2561
2562 void
2567
2568
2569
2570 bool
2572 {
2573 return use_transpose;
2574 }
2575
2576
2577
2578 int
2580 {
2582 {
2584 std::swap(domain_map, range_map);
2585 std::swap(vmult, Tvmult);
2586 std::swap(inv_vmult, inv_Tvmult);
2587 }
2588 return 0;
2589 }
2590
2591
2592
2593 int
2595 {
2596 // The transposedness of the operations is taken care of
2597 // when we hit the transpose flag.
2598 vmult(Y, X);
2599 return 0;
2600 }
2601
2602
2603
2604 int
2606 {
2607 // The transposedness of the operations is taken care of
2608 // when we hit the transpose flag.
2609 inv_vmult(X, Y);
2610 return 0;
2611 }
2612
2613
2614
2615 const char *
2617 {
2618 return "TrilinosPayload";
2619 }
2620
2621
2622
2623 const Epetra_Comm &
2625 {
2626 return communicator;
2627 }
2628
2629
2630
2631 const Epetra_Map &
2633 {
2634 return domain_map;
2635 }
2636
2637
2638
2639 const Epetra_Map &
2641 {
2642 return range_map;
2643 }
2644
2645
2646
2647 bool
2649 {
2650 return false;
2651 }
2652
2653
2654
2655 double
2657 {
2659 return 0.0;
2660 }
2661
2662
2663
2666 const TrilinosPayload &second_op)
2667 {
2668 using Domain = typename TrilinosPayload::Domain;
2669 using Range = typename TrilinosPayload::Range;
2670 using Intermediate = typename TrilinosPayload::VectorType;
2671 using GVMVectorType = TrilinosWrappers::MPI::Vector;
2672
2674 second_op.locally_owned_domain_indices(),
2675 ExcMessage(
2676 "Operators are set to work on incompatible IndexSets."));
2678 second_op.locally_owned_range_indices(),
2679 ExcMessage(
2680 "Operators are set to work on incompatible IndexSets."));
2681
2682 TrilinosPayload return_op(first_op, second_op);
2683
2684 // Capture by copy so the payloads are always valid
2685 return_op.vmult = [first_op, second_op](Range &tril_dst,
2686 const Domain &tril_src) {
2687 // Duplicated from LinearOperator::operator*
2688 // TODO: Template the constructor on GrowingVectorMemory vector type?
2690 VectorMemory<GVMVectorType>::Pointer i(vector_memory);
2691
2692 // Initialize intermediate vector
2693 const Epetra_Map &first_op_init_map = first_op.OperatorDomainMap();
2694 i->reinit(IndexSet(first_op_init_map),
2695 first_op.get_mpi_communicator(),
2696 /*bool omit_zeroing_entries =*/true);
2697
2698 // Duplicated from TrilinosWrappers::SparseMatrix::vmult
2699 const size_type i_local_size = i->end() - i->begin();
2700 AssertDimension(i_local_size, first_op_init_map.NumMyPoints());
2701 const Epetra_Map &second_op_init_map = second_op.OperatorDomainMap();
2702 AssertDimension(i_local_size, second_op_init_map.NumMyPoints());
2703 (void)second_op_init_map;
2704 Intermediate tril_int(View,
2705 first_op_init_map,
2706 const_cast<TrilinosScalar *>(i->begin()),
2707 i_local_size,
2708 1);
2709
2710 // These operators may themselves be transposed or not, so we let them
2711 // decide what the intended outcome is
2712 second_op.Apply(tril_src, tril_int);
2713 first_op.Apply(tril_src, tril_dst);
2714 const int ierr = tril_dst.Update(1.0, tril_int, 1.0);
2715 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2716 };
2717
2718 return_op.Tvmult = [first_op, second_op](Domain &tril_dst,
2719 const Range &tril_src) {
2720 // Duplicated from LinearOperator::operator*
2721 // TODO: Template the constructor on GrowingVectorMemory vector type?
2723 VectorMemory<GVMVectorType>::Pointer i(vector_memory);
2724
2725 // These operators may themselves be transposed or not, so we let them
2726 // decide what the intended outcome is
2727 // We must first transpose the operators to get the right IndexSets
2728 // for the input, intermediate and result vectors
2729 const_cast<TrilinosPayload &>(first_op).transpose();
2730 const_cast<TrilinosPayload &>(second_op).transpose();
2731
2732 // Initialize intermediate vector
2733 const Epetra_Map &first_op_init_map = first_op.OperatorRangeMap();
2734 i->reinit(IndexSet(first_op_init_map),
2735 first_op.get_mpi_communicator(),
2736 /*bool omit_zeroing_entries =*/true);
2737
2738 // Duplicated from TrilinosWrappers::SparseMatrix::vmult
2739 const size_type i_local_size = i->end() - i->begin();
2740 AssertDimension(i_local_size, first_op_init_map.NumMyPoints());
2741 const Epetra_Map &second_op_init_map = second_op.OperatorRangeMap();
2742 AssertDimension(i_local_size, second_op_init_map.NumMyPoints());
2743 (void)second_op_init_map;
2744 Intermediate tril_int(View,
2745 first_op_init_map,
2746 const_cast<TrilinosScalar *>(i->begin()),
2747 i_local_size,
2748 1);
2749
2750 // These operators may themselves be transposed or not, so we let them
2751 // decide what the intended outcome is
2752 second_op.Apply(tril_src, tril_int);
2753 first_op.Apply(tril_src, tril_dst);
2754 const int ierr = tril_dst.Update(1.0, tril_int, 1.0);
2755 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2756
2757 // Reset transpose flag
2758 const_cast<TrilinosPayload &>(first_op).transpose();
2759 const_cast<TrilinosPayload &>(second_op).transpose();
2760 };
2761
2762 return_op.inv_vmult = [first_op, second_op](Domain &tril_dst,
2763 const Range &tril_src) {
2764 // Duplicated from LinearOperator::operator*
2765 // TODO: Template the constructor on GrowingVectorMemory vector type?
2767 VectorMemory<GVMVectorType>::Pointer i(vector_memory);
2768
2769 // Initialize intermediate vector
2770 const Epetra_Map &first_op_init_map = first_op.OperatorRangeMap();
2771 i->reinit(IndexSet(first_op_init_map),
2772 first_op.get_mpi_communicator(),
2773 /*bool omit_zeroing_entries =*/true);
2774
2775 // Duplicated from TrilinosWrappers::SparseMatrix::vmult
2776 const size_type i_local_size = i->end() - i->begin();
2777 AssertDimension(i_local_size, first_op_init_map.NumMyPoints());
2778 const Epetra_Map &second_op_init_map = second_op.OperatorRangeMap();
2779 AssertDimension(i_local_size, second_op_init_map.NumMyPoints());
2780 (void)second_op_init_map;
2781 Intermediate tril_int(View,
2782 first_op_init_map,
2783 const_cast<TrilinosScalar *>(i->begin()),
2784 i_local_size,
2785 1);
2786
2787 // These operators may themselves be transposed or not, so we let them
2788 // decide what the intended outcome is
2789 second_op.ApplyInverse(tril_src, tril_int);
2790 first_op.ApplyInverse(tril_src, tril_dst);
2791 const int ierr = tril_dst.Update(1.0, tril_int, 1.0);
2792 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2793 };
2794
2795 return_op.inv_Tvmult = [first_op, second_op](Range &tril_dst,
2796 const Domain &tril_src) {
2797 // Duplicated from LinearOperator::operator*
2798 // TODO: Template the constructor on GrowingVectorMemory vector type?
2800 VectorMemory<GVMVectorType>::Pointer i(vector_memory);
2801
2802 // These operators may themselves be transposed or not, so we let them
2803 // decide what the intended outcome is
2804 // We must first transpose the operators to get the right IndexSets
2805 // for the input, intermediate and result vectors
2806 const_cast<TrilinosPayload &>(first_op).transpose();
2807 const_cast<TrilinosPayload &>(second_op).transpose();
2808
2809 // Initialize intermediate vector
2810 const Epetra_Map &first_op_init_map = first_op.OperatorDomainMap();
2811 i->reinit(IndexSet(first_op_init_map),
2812 first_op.get_mpi_communicator(),
2813 /*bool omit_zeroing_entries =*/true);
2814
2815 // Duplicated from TrilinosWrappers::SparseMatrix::vmult
2816 const size_type i_local_size = i->end() - i->begin();
2817 AssertDimension(i_local_size, first_op_init_map.NumMyPoints());
2818 const Epetra_Map &second_op_init_map = second_op.OperatorDomainMap();
2819 AssertDimension(i_local_size, second_op_init_map.NumMyPoints());
2820 (void)second_op_init_map;
2821 Intermediate tril_int(View,
2822 first_op_init_map,
2823 const_cast<TrilinosScalar *>(i->begin()),
2824 i_local_size,
2825 1);
2826
2827 // These operators may themselves be transposed or not, so we let them
2828 // decide what the intended outcome is
2829 second_op.ApplyInverse(tril_src, tril_int);
2830 first_op.ApplyInverse(tril_src, tril_dst);
2831 const int ierr = tril_dst.Update(1.0, tril_int, 1.0);
2832 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2833
2834 // Reset transpose flag
2835 const_cast<TrilinosPayload &>(first_op).transpose();
2836 const_cast<TrilinosPayload &>(second_op).transpose();
2837 };
2838
2839 return return_op;
2840 }
2841
2842
2843
2844 TrilinosPayload
2846 const TrilinosPayload &second_op)
2847 {
2848 using Domain = typename TrilinosPayload::Domain;
2849 using Range = typename TrilinosPayload::Range;
2850 using Intermediate = typename TrilinosPayload::VectorType;
2851 using GVMVectorType = TrilinosWrappers::MPI::Vector;
2852
2854 second_op.locally_owned_range_indices(),
2855 ExcMessage(
2856 "Operators are set to work on incompatible IndexSets."));
2857
2858 TrilinosPayload return_op(first_op, second_op);
2859
2860 // Capture by copy so the payloads are always valid
2861 return_op.vmult = [first_op, second_op](Range &tril_dst,
2862 const Domain &tril_src) {
2863 // Duplicated from LinearOperator::operator*
2864 // TODO: Template the constructor on GrowingVectorMemory vector type?
2866 VectorMemory<GVMVectorType>::Pointer i(vector_memory);
2867
2868 // Initialize intermediate vector
2869 const Epetra_Map &first_op_init_map = first_op.OperatorDomainMap();
2870 i->reinit(IndexSet(first_op_init_map),
2871 first_op.get_mpi_communicator(),
2872 /*bool omit_zeroing_entries =*/true);
2873
2874 // Duplicated from TrilinosWrappers::SparseMatrix::vmult
2875 const size_type i_local_size = i->end() - i->begin();
2876 AssertDimension(i_local_size, first_op_init_map.NumMyPoints());
2877 const Epetra_Map &second_op_init_map = second_op.OperatorRangeMap();
2878 AssertDimension(i_local_size, second_op_init_map.NumMyPoints());
2879 (void)second_op_init_map;
2880 Intermediate tril_int(View,
2881 first_op_init_map,
2882 const_cast<TrilinosScalar *>(i->begin()),
2883 i_local_size,
2884 1);
2885
2886 // These operators may themselves be transposed or not, so we let them
2887 // decide what the intended outcome is
2888 second_op.Apply(tril_src, tril_int);
2889 first_op.Apply(tril_int, tril_dst);
2890 };
2891
2892 return_op.Tvmult = [first_op, second_op](Domain &tril_dst,
2893 const Range &tril_src) {
2894 // Duplicated from LinearOperator::operator*
2895 // TODO: Template the constructor on GrowingVectorMemory vector type?
2897 VectorMemory<GVMVectorType>::Pointer i(vector_memory);
2898
2899 // These operators may themselves be transposed or not, so we let them
2900 // decide what the intended outcome is
2901 // We must first transpose the operators to get the right IndexSets
2902 // for the input, intermediate and result vectors
2903 const_cast<TrilinosPayload &>(first_op).transpose();
2904 const_cast<TrilinosPayload &>(second_op).transpose();
2905
2906 // Initialize intermediate vector
2907 const Epetra_Map &first_op_init_map = first_op.OperatorRangeMap();
2908 i->reinit(IndexSet(first_op_init_map),
2909 first_op.get_mpi_communicator(),
2910 /*bool omit_zeroing_entries =*/true);
2911
2912 // Duplicated from TrilinosWrappers::SparseMatrix::vmult
2913 const size_type i_local_size = i->end() - i->begin();
2914 AssertDimension(i_local_size, first_op_init_map.NumMyPoints());
2915 const Epetra_Map &second_op_init_map = second_op.OperatorDomainMap();
2916 AssertDimension(i_local_size, second_op_init_map.NumMyPoints());
2917 (void)second_op_init_map;
2918 Intermediate tril_int(View,
2919 first_op_init_map,
2920 const_cast<TrilinosScalar *>(i->begin()),
2921 i_local_size,
2922 1);
2923
2924 // Apply the operators in the reverse order to vmult
2925 first_op.Apply(tril_src, tril_int);
2926 second_op.Apply(tril_int, tril_dst);
2927
2928 // Reset transpose flag
2929 const_cast<TrilinosPayload &>(first_op).transpose();
2930 const_cast<TrilinosPayload &>(second_op).transpose();
2931 };
2932
2933 return_op.inv_vmult = [first_op, second_op](Domain &tril_dst,
2934 const Range &tril_src) {
2935 // Duplicated from LinearOperator::operator*
2936 // TODO: Template the constructor on GrowingVectorMemory vector type?
2938 VectorMemory<GVMVectorType>::Pointer i(vector_memory);
2939
2940 // Initialize intermediate vector
2941 const Epetra_Map &first_op_init_map = first_op.OperatorRangeMap();
2942 i->reinit(IndexSet(first_op_init_map),
2943 first_op.get_mpi_communicator(),
2944 /*bool omit_zeroing_entries =*/true);
2945
2946 // Duplicated from TrilinosWrappers::SparseMatrix::vmult
2947 const size_type i_local_size = i->end() - i->begin();
2948 AssertDimension(i_local_size, first_op_init_map.NumMyPoints());
2949 const Epetra_Map &second_op_init_map = second_op.OperatorDomainMap();
2950 AssertDimension(i_local_size, second_op_init_map.NumMyPoints());
2951 (void)second_op_init_map;
2952 Intermediate tril_int(View,
2953 first_op_init_map,
2954 const_cast<TrilinosScalar *>(i->begin()),
2955 i_local_size,
2956 1);
2957
2958 // Apply the operators in the reverse order to vmult
2959 // and the same order as Tvmult
2960 first_op.ApplyInverse(tril_src, tril_int);
2961 second_op.ApplyInverse(tril_int, tril_dst);
2962 };
2963
2964 return_op.inv_Tvmult = [first_op, second_op](Range &tril_dst,
2965 const Domain &tril_src) {
2966 // Duplicated from LinearOperator::operator*
2967 // TODO: Template the constructor on GrowingVectorMemory vector type?
2969 VectorMemory<GVMVectorType>::Pointer i(vector_memory);
2970
2971 // These operators may themselves be transposed or not, so we let them
2972 // decide what the intended outcome is
2973 // We must first transpose the operators to get the right IndexSets
2974 // for the input, intermediate and result vectors
2975 const_cast<TrilinosPayload &>(first_op).transpose();
2976 const_cast<TrilinosPayload &>(second_op).transpose();
2977
2978 // Initialize intermediate vector
2979 const Epetra_Map &first_op_init_map = first_op.OperatorDomainMap();
2980 i->reinit(IndexSet(first_op_init_map),
2981 first_op.get_mpi_communicator(),
2982 /*bool omit_zeroing_entries =*/true);
2983
2984 // Duplicated from TrilinosWrappers::SparseMatrix::vmult
2985 const size_type i_local_size = i->end() - i->begin();
2986 AssertDimension(i_local_size, first_op_init_map.NumMyPoints());
2987 const Epetra_Map &second_op_init_map = second_op.OperatorRangeMap();
2988 AssertDimension(i_local_size, second_op_init_map.NumMyPoints());
2989 (void)second_op_init_map;
2990 Intermediate tril_int(View,
2991 first_op_init_map,
2992 const_cast<TrilinosScalar *>(i->begin()),
2993 i_local_size,
2994 1);
2995
2996 // These operators may themselves be transposed or not, so we let them
2997 // decide what the intended outcome is
2998 // Apply the operators in the reverse order to Tvmult
2999 // and the same order as vmult
3000 second_op.ApplyInverse(tril_src, tril_int);
3001 first_op.ApplyInverse(tril_int, tril_dst);
3002
3003 // Reset transpose flag
3004 const_cast<TrilinosPayload &>(first_op).transpose();
3005 const_cast<TrilinosPayload &>(second_op).transpose();
3006 };
3007
3008 return return_op;
3009 }
3010
3011 } // namespace LinearOperatorImplementation
3012 } /* namespace internal */
3013} /* namespace TrilinosWrappers */
3014
3015
3016
3017// explicit instantiations
3018# include "lac/trilinos_sparse_matrix.inst"
3019
3020# ifndef DOXYGEN
3021// TODO: put these instantiations into generic file
3022namespace TrilinosWrappers
3023{
3024 template void
3025 SparseMatrix::reinit(const ::SparsityPattern &);
3026
3027 template void
3029
3030 template void
3032 const IndexSet &,
3033 const ::SparsityPattern &,
3034 const MPI_Comm,
3035 const bool);
3036
3037 template void
3039 const IndexSet &,
3040 const DynamicSparsityPattern &,
3041 const MPI_Comm,
3042 const bool);
3043
3044 template void
3045 SparseMatrix::vmult(MPI::Vector &, const MPI::Vector &) const;
3046
3047 template void
3049 const ::Vector<double> &) const;
3050
3051 template void
3054 const ::LinearAlgebra::distributed::Vector<double, MemorySpace::Host>
3055 &) const;
3056
3057 template void
3060 const ::LinearAlgebra::distributed::Vector<float, MemorySpace::Host>
3061 &) const;
3062
3063# ifdef DEAL_II_TRILINOS_WITH_TPETRA
3064# if defined(HAVE_TPETRA_INST_DOUBLE)
3065 template void
3068 const ::LinearAlgebra::TpetraWrappers::Vector<double,
3070 const;
3071 template void
3074 &,
3075 const ::LinearAlgebra::TpetraWrappers::Vector<double,
3077 const;
3078# endif
3079
3080# if defined(HAVE_TPETRA_INST_FLOAT)
3081 template void
3084 const ::LinearAlgebra::TpetraWrappers::Vector<float,
3086 const;
3087 template void
3090 &,
3091 const ::LinearAlgebra::TpetraWrappers::Vector<float,
3093 const;
3094# endif
3095# endif
3096
3097 template void
3100 const ::LinearAlgebra::EpetraWrappers::Vector &) const;
3101
3102 template void
3103 SparseMatrix::Tvmult(MPI::Vector &, const MPI::Vector &) const;
3104
3105 template void
3107 const ::Vector<double> &) const;
3108
3109 template void
3112 const ::LinearAlgebra::distributed::Vector<double, MemorySpace::Host>
3113 &) const;
3114
3115# ifdef DEAL_II_TRILINOS_WITH_TPETRA
3116# if defined(HAVE_TPETRA_INST_DOUBLE)
3117 template void
3120 const ::LinearAlgebra::TpetraWrappers::Vector<double,
3122 const;
3123 template void
3126 &,
3127 const ::LinearAlgebra::TpetraWrappers::Vector<double,
3129 const;
3130# endif
3131
3132# if defined(HAVE_TPETRA_INST_FLOAT)
3133 template void
3136 const ::LinearAlgebra::TpetraWrappers::Vector<float,
3138 const;
3139 template void
3142 &,
3143 const ::LinearAlgebra::TpetraWrappers::Vector<float,
3145 const;
3146# endif
3147# endif
3148
3149 template void
3152 const ::LinearAlgebra::EpetraWrappers::Vector &) const;
3153
3154 template void
3155 SparseMatrix::vmult_add(MPI::Vector &, const MPI::Vector &) const;
3156
3157 template void
3159 const ::Vector<double> &) const;
3160
3161 template void
3164 const ::LinearAlgebra::distributed::Vector<double, MemorySpace::Host>
3165 &) const;
3166
3167# ifdef DEAL_II_TRILINOS_WITH_TPETRA
3168# if defined(HAVE_TPETRA_INST_DOUBLE)
3169 template void
3172 const ::LinearAlgebra::TpetraWrappers::Vector<double,
3174 const;
3175 template void
3178 &,
3179 const ::LinearAlgebra::TpetraWrappers::Vector<double,
3181 const;
3182# endif
3183
3184# if defined(HAVE_TPETRA_INST_FLOAT)
3185 template void
3188 const ::LinearAlgebra::TpetraWrappers::Vector<float,
3190 const;
3191 template void
3194 &,
3195 const ::LinearAlgebra::TpetraWrappers::Vector<float,
3197 const;
3198# endif
3199# endif
3200
3201 template void
3204 const ::LinearAlgebra::EpetraWrappers::Vector &) const;
3205
3206 template void
3207 SparseMatrix::Tvmult_add(MPI::Vector &, const MPI::Vector &) const;
3208
3209 template void
3211 const ::Vector<double> &) const;
3212
3213 template void
3216 const ::LinearAlgebra::distributed::Vector<double, MemorySpace::Host>
3217 &) const;
3218
3219 template void
3222 const ::LinearAlgebra::distributed::Vector<float, MemorySpace::Host>
3223 &) const;
3224
3225
3226# ifdef DEAL_II_TRILINOS_WITH_TPETRA
3227# if defined(HAVE_TPETRA_INST_DOUBLE)
3228 template void
3231 const ::LinearAlgebra::TpetraWrappers::Vector<double,
3233 const;
3234 template void
3237 &,
3238 const ::LinearAlgebra::TpetraWrappers::Vector<double,
3240 const;
3241# endif
3242
3243# if defined(HAVE_TPETRA_INST_FLOAT)
3244 template void
3247 const ::LinearAlgebra::TpetraWrappers::Vector<float,
3249 const;
3250 template void
3253 &,
3254 const ::LinearAlgebra::TpetraWrappers::Vector<float,
3256 const;
3257# endif
3258# endif
3259
3260 template void
3263 const ::LinearAlgebra::EpetraWrappers::Vector &) const;
3264} // namespace TrilinosWrappers
3265# endif // DOXYGEN
3266
3268
3269#endif // DEAL_II_WITH_TRILINOS
const IndexSet & row_index_set() const
size_type row_length(const size_type row) const
size_type column_number(const size_type row, const size_type index) const
size_type size() const
Definition index_set.h:1787
bool is_element(const size_type index) const
Definition index_set.h:1905
Epetra_Map make_trilinos_map(const MPI_Comm communicator=MPI_COMM_WORLD, const bool overlapping=false) const
size_type n_rows() const
size_type n_cols() const
void reinit(const size_type m, const size_type n, const ArrayView< const unsigned int > &row_lengths)
const Epetra_BlockMap & trilinos_partitioner() const
size_type size() const override
void reinit(const Vector &v, const bool omit_zeroing_entries=false, const bool allow_different_maps=false)
std::shared_ptr< std::vector< TrilinosScalar > > value_cache
std::shared_ptr< std::vector< size_type > > colnum_cache
void set(const size_type i, const size_type j, const TrilinosScalar value)
std::unique_ptr< Epetra_Map > column_space_map
SparseMatrix & operator*=(const TrilinosScalar factor)
void mmult(SparseMatrix &C, const SparseMatrix &B, const MPI::Vector &V=MPI::Vector()) const
std::unique_ptr< Epetra_Export > nonlocal_matrix_exporter
void compress(VectorOperation::values operation)
std::unique_ptr< Epetra_FECrsMatrix > matrix
const Epetra_CrsMatrix & trilinos_matrix() const
TrilinosScalar matrix_norm_square(const MPI::Vector &v) const
IndexSet locally_owned_range_indices() const
void print(std::ostream &out, const bool write_extended_trilinos_info=false) const
void Tmmult(SparseMatrix &C, const SparseMatrix &B, const MPI::Vector &V=MPI::Vector()) const
std::enable_if_t< std::is_same_v< typename VectorType::value_type, TrilinosScalar > > Tvmult(VectorType &dst, const VectorType &src) const
void clear_row(const size_type row, const TrilinosScalar new_diag_value=0)
void clear_rows(const ArrayView< const size_type > &rows, const TrilinosScalar new_diag_value=0)
void reinit(const SparsityPatternType &sparsity_pattern)
void vmult_add(VectorType &dst, const VectorType &src) const
SparseMatrix & operator=(const SparseMatrix &)=delete
SparseMatrix & operator/=(const TrilinosScalar factor)
void Tvmult_add(VectorType &dst, const VectorType &src) const
TrilinosScalar el(const size_type i, const size_type j) const
IndexSet locally_owned_domain_indices() const
bool in_local_range(const size_type index) const
void copy_from(const SparseMatrix &source)
unsigned int row_length(const size_type row) const
std::uint64_t n_nonzero_elements() const
std::enable_if_t< std::is_same_v< typename VectorType::value_type, TrilinosScalar > > vmult(VectorType &dst, const VectorType &src) const
const Epetra_CrsGraph & trilinos_sparsity_pattern() const
TrilinosScalar diag_element(const size_type i) const
void add(const size_type i, const size_type j, const TrilinosScalar value)
unsigned int local_size() const
TrilinosScalar operator()(const size_type i, const size_type j) const
TrilinosScalar matrix_scalar_product(const MPI::Vector &u, const MPI::Vector &v) const
std::pair< size_type, size_type > local_range() const
std::unique_ptr< Epetra_CrsMatrix > nonlocal_matrix
std::unique_ptr< Epetra_CrsGraph > nonlocal_graph
const Epetra_FECrsGraph & trilinos_sparsity_pattern() const
::types::global_dof_index size_type
std::function< void(VectorType &, const VectorType &)> inv_Tvmult
virtual int ApplyInverse(const VectorType &Y, VectorType &X) const override
virtual int Apply(const VectorType &X, VectorType &Y) const override
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:35
constexpr bool running_in_debug_mode()
Definition config.h:73
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:36
DerivativeForm< 1, spacedim, dim, Number > transpose(const DerivativeForm< 1, dim, spacedim, Number > &DF)
#define DEAL_II_NOT_IMPLEMENTED()
Point< 2 > second
Definition grid_out.cc:4633
Point< 2 > first
Definition grid_out.cc:4632
static ::ExceptionBase & ExcIO()
static ::ExceptionBase & ExcNotImplemented()
static ::ExceptionBase & ExcScalarAssignmentOnlyForZeroValue()
static ::ExceptionBase & ExcInvalidIndex(size_type arg1, size_type arg2)
#define Assert(cond, exc)
static ::ExceptionBase & ExcAccessToNonPresentElement(size_type arg1, size_type arg2)
#define AssertIsFinite(number)
static ::ExceptionBase & ExcAccessToNonlocalRow(std::size_t arg1)
#define AssertDimension(dim1, dim2)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcDivideByZero()
static ::ExceptionBase & ExcSourceEqualsDestination()
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
static ::ExceptionBase & ExcNotQuadratic()
static ::ExceptionBase & ExcMatrixNotCompressed()
static ::ExceptionBase & ExcTrilinosError(int arg1)
static ::ExceptionBase & ExcAccessToNonLocalElement(size_type arg1, size_type arg2, size_type arg3, size_type arg4)
static ::ExceptionBase & ExcMessage(std::string arg1)
static ::ExceptionBase & ExcTrilinosError(int arg1)
#define AssertThrow(cond, exc)
IndexSet complete_index_set(const IndexSet::size_type N)
Definition index_set.h:1215
std::vector< index_type > data
Definition mpi.cc:746
bool use_vector
Definition mpi.cc:744
@ matrix
Contents is actually a matrix.
TrilinosPayload operator+(const TrilinosPayload &first_op, const TrilinosPayload &second_op)
TrilinosPayload operator*(const TrilinosPayload &first_op, const TrilinosPayload &second_op)
void check_vector_map_equality(const Epetra_CrsMatrix &, const VectorType &, const VectorType &)
VectorType::value_type * end(VectorType &V)
VectorType::value_type * begin(VectorType &V)
void perform_mmult(const SparseMatrix &inputleft, const SparseMatrix &inputright, SparseMatrix &result, const MPI::Vector &V, const bool transpose_left)
TrilinosWrappers::types::int_type min_my_gid(const Epetra_BlockMap &map)
TrilinosWrappers::types::int64_type n_global_elements(const Epetra_BlockMap &map)
TrilinosWrappers::types::int_type global_column_index(const Epetra_CrsMatrix &matrix, const ::types::global_dof_index i)
TrilinosWrappers::types::int_type max_my_gid(const Epetra_BlockMap &map)
TrilinosWrappers::types::int_type global_row_index(const Epetra_CrsMatrix &matrix, const ::types::global_dof_index i)
TrilinosWrappers::types::int_type n_global_cols(const Epetra_CrsGraph &graph)
const Epetra_Comm & comm_self()
Iterator lower_bound(Iterator first, Iterator last, const T &val)
Definition utilities.h:1033
Definition types.h:32
double TrilinosScalar
Definition types.h:190