deal.II version GIT relicensing-3718-gc13d52846a 2025-07-09 19:30:01+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
trilinos_sparse_matrix.cc
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2008 - 2025 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
17
18#ifdef DEAL_II_WITH_TRILINOS
19
22
29
30# include <boost/container/small_vector.hpp>
31
33# ifdef DEAL_II_TRILINOS_WITH_EPETRAEXT
34# include <EpetraExt_MatrixMatrix.h>
35# endif
36# include <Epetra_Export.h>
37# include <Teuchos_RCP.hpp>
38# include <ml_epetra_utils.h>
39# include <ml_struct.h>
41
42# include <memory>
43
45
46namespace TrilinosWrappers
47{
48 namespace internal
49 {
50 template <typename VectorType>
51 typename VectorType::value_type *
52 begin(VectorType &V)
53 {
54 return V.begin();
55 }
56
57 template <typename VectorType>
58 const typename VectorType::value_type *
59 begin(const VectorType &V)
60 {
61 return V.begin();
62 }
63
64 template <typename VectorType>
65 typename VectorType::value_type *
66 end(VectorType &V)
67 {
68 return V.end();
69 }
70
71 template <typename VectorType>
72 const typename VectorType::value_type *
73 end(const VectorType &V)
74 {
75 return V.end();
76 }
77
78 template <>
79 double *
81 {
82 return V.trilinos_vector()[0];
83 }
84
85 template <>
86 const double *
88 {
89 return V.trilinos_vector()[0];
90 }
91
92 template <>
93 double *
95 {
96 return V.trilinos_vector()[0] + V.trilinos_vector().MyLength();
97 }
98
99 template <>
100 const double *
102 {
103 return V.trilinos_vector()[0] + V.trilinos_vector().MyLength();
104 }
105
106# ifdef DEAL_II_TRILINOS_WITH_TPETRA
107 template <typename Number, typename MemorySpace>
108 Number *
110 {
111 return V.trilinos_vector().getDataNonConst().get();
112 }
113
114 template <typename Number, typename MemorySpace>
115 const Number *
117 {
118 return V.trilinos_vector().getData().get();
119 }
120
121 template <typename Number, typename MemorySpace>
122 Number *
124 {
125 return V.trilinos_vector().getDataNonConst().get() +
126 V.trilinos_vector().getLocalLength();
127 }
128
129 template <typename Number, typename MemorySpace>
130 const Number *
132 {
133 return V.trilinos_vector().getData().get() +
134 V.trilinos_vector().getLocalLength();
135 }
136# endif
137 } // namespace internal
138
139
140 namespace SparseMatrixIterators
141 {
142 void
144 {
145 // if we are asked to visit the past-the-end line, then simply
146 // release all our caches and go on with life.
147 //
148 // do the same if the row we're supposed to visit is not locally
149 // owned. this is simply going to make non-locally owned rows
150 // look like they're empty
151 if ((this->a_row == matrix->m()) ||
152 (matrix->in_local_range(this->a_row) == false))
153 {
154 colnum_cache.reset();
155 value_cache.reset();
156
157 return;
158 }
159
160 // get a representation of the present row
161 int ncols;
163 matrix->row_length(this->a_row);
164 if (value_cache.get() == nullptr)
165 {
166 value_cache = std::make_shared<std::vector<TrilinosScalar>>(colnums);
167 colnum_cache = std::make_shared<std::vector<size_type>>(colnums);
168 }
169 else
170 {
171 value_cache->resize(colnums);
172 colnum_cache->resize(colnums);
173 }
174
175 const int ierr = matrix->trilinos_matrix().ExtractGlobalRowCopy(
176 this->a_row,
177 colnums,
178 ncols,
179 value_cache->data(),
180 reinterpret_cast<TrilinosWrappers::types::int_type *>(
181 colnum_cache->data()));
182 AssertDimension(ncols, colnums);
183 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
184
185 // copy it into our caches if the
186 // line isn't empty. if it is, then
187 // we've done something wrong, since
188 // we shouldn't have initialized an
189 // iterator for an empty line (what
190 // would it point to?)
191 }
192 } // namespace SparseMatrixIterators
193
194
195 // The constructor is actually the
196 // only point where we have to check
197 // whether we build a serial or a
198 // parallel Trilinos matrix.
199 // Actually, it does not even matter
200 // how many threads there are, but
201 // only if we use an MPI compiler or
202 // a standard compiler. So, even one
203 // thread on a configuration with
204 // MPI will still get a parallel
205 // interface.
207 : column_space_map(new Epetra_Map(0, 0, Utilities::Trilinos::comm_self()))
208 , matrix(
209 new Epetra_FECrsMatrix(View, *column_space_map, *column_space_map, 0))
210 , last_action(Zero)
211 , compressed(true)
212 {
213 matrix->FillComplete();
214 }
215
216
217
219 const size_type n,
220 const unsigned int n_max_entries_per_row)
221 : column_space_map(
222 new Epetra_Map(static_cast<TrilinosWrappers::types::int_type>(n),
223 0,
224 Utilities::Trilinos::comm_self()))
225 ,
226
227 // on one processor only, we know how the
228 // columns of the matrix will be
229 // distributed (everything on one
230 // processor), so we can hand in this
231 // information to the constructor. we
232 // can't do so in parallel, where the
233 // information from columns is only
234 // available when entries have been added
235 matrix(new Epetra_FECrsMatrix(
236 Copy,
237 Epetra_Map(static_cast<TrilinosWrappers::types::int_type>(m),
238 0,
239 Utilities::Trilinos::comm_self()),
240 *column_space_map,
241 n_max_entries_per_row,
242 false))
243 , last_action(Zero)
244 , compressed(false)
245 {}
246
247
248
250 const size_type n,
251 const std::vector<unsigned int> &n_entries_per_row)
252 : column_space_map(
253 new Epetra_Map(static_cast<TrilinosWrappers::types::int_type>(n),
254 0,
255 Utilities::Trilinos::comm_self()))
256 , matrix(new Epetra_FECrsMatrix(
257 Copy,
258 Epetra_Map(static_cast<TrilinosWrappers::types::int_type>(m),
259 0,
260 Utilities::Trilinos::comm_self()),
261 *column_space_map,
262 reinterpret_cast<int *>(
263 const_cast<unsigned int *>(n_entries_per_row.data())),
264 false))
265 , last_action(Zero)
266 , compressed(false)
267 {}
268
269
270
271 SparseMatrix::SparseMatrix(const IndexSet &parallel_partitioning,
272 const MPI_Comm communicator,
273 const unsigned int n_max_entries_per_row)
274 : column_space_map(new Epetra_Map(
275 parallel_partitioning.make_trilinos_map(communicator, false)))
276 , matrix(new Epetra_FECrsMatrix(Copy,
277 *column_space_map,
278 n_max_entries_per_row,
279 false))
280 , last_action(Zero)
281 , compressed(false)
282 {}
283
284
285
286 SparseMatrix::SparseMatrix(const IndexSet &parallel_partitioning,
287 const MPI_Comm communicator,
288 const std::vector<unsigned int> &n_entries_per_row)
289 : column_space_map(new Epetra_Map(
290 parallel_partitioning.make_trilinos_map(communicator, false)))
291 , matrix(new Epetra_FECrsMatrix(Copy,
292 *column_space_map,
293 reinterpret_cast<int *>(
294 const_cast<unsigned int *>(
295 n_entries_per_row.data())),
296 false))
297 , last_action(Zero)
298 , compressed(false)
299 {}
300
301
302
303 SparseMatrix::SparseMatrix(const IndexSet &row_parallel_partitioning,
304 const IndexSet &col_parallel_partitioning,
305 const MPI_Comm communicator,
306 const size_type n_max_entries_per_row)
307 : column_space_map(new Epetra_Map(
308 col_parallel_partitioning.make_trilinos_map(communicator, false)))
309 , matrix(new Epetra_FECrsMatrix(
310 Copy,
311 row_parallel_partitioning.make_trilinos_map(communicator, false),
312 n_max_entries_per_row,
313 false))
314 , last_action(Zero)
315 , compressed(false)
316 {}
317
318
319
320 SparseMatrix::SparseMatrix(const IndexSet &row_parallel_partitioning,
321 const IndexSet &col_parallel_partitioning,
322 const MPI_Comm communicator,
323 const std::vector<unsigned int> &n_entries_per_row)
324 : column_space_map(new Epetra_Map(
325 col_parallel_partitioning.make_trilinos_map(communicator, false)))
326 , matrix(new Epetra_FECrsMatrix(
327 Copy,
328 row_parallel_partitioning.make_trilinos_map(communicator, false),
329 reinterpret_cast<int *>(
330 const_cast<unsigned int *>(n_entries_per_row.data())),
331 false))
332 , last_action(Zero)
333 , compressed(false)
334 {}
335
336
337
339 : column_space_map(new Epetra_Map(sparsity_pattern.domain_partitioner()))
340 , matrix(
341 new Epetra_FECrsMatrix(Copy,
342 sparsity_pattern.trilinos_sparsity_pattern(),
343 false))
344 , last_action(Zero)
345 , compressed(true)
346 {
347 Assert(sparsity_pattern.trilinos_sparsity_pattern().Filled() == true,
349 "The Trilinos sparsity pattern has not been compressed."));
351 }
352
353
354
356 : column_space_map(std::move(other.column_space_map))
357 , matrix(std::move(other.matrix))
358 , nonlocal_matrix(std::move(other.nonlocal_matrix))
359 , nonlocal_matrix_exporter(std::move(other.nonlocal_matrix_exporter))
360 , last_action(other.last_action)
361 , compressed(other.compressed)
362 {
363 other.last_action = Zero;
364 other.compressed = false;
365 }
366
367
368
369 void
371 {
372 if (this == &rhs)
373 return;
374
375 nonlocal_matrix.reset();
377
378 // check whether we need to update the whole matrix layout (we have
379 // different maps or if we detect a row where the columns of the two
380 // matrices do not match)
381 bool needs_deep_copy =
382 !matrix->RowMap().SameAs(rhs.matrix->RowMap()) ||
383 !matrix->ColMap().SameAs(rhs.matrix->ColMap()) ||
384 !matrix->DomainMap().SameAs(rhs.matrix->DomainMap()) ||
386 if (!needs_deep_copy)
387 {
388 // Try to copy all the rows of the matrix one by one. In case of error
389 // (i.e., the column indices are different), we need to abort and blow
390 // away the matrix.
391 for (const auto row : locally_owned_range_indices())
392 {
393 const int row_local = matrix->RowMap().LID(
394 static_cast<TrilinosWrappers::types::int_type>(row));
395 Assert((row_local >= 0), ExcAccessToNonlocalRow(row));
396
397 int n_entries, rhs_n_entries;
398 TrilinosScalar *value_ptr, *rhs_value_ptr;
399 int *index_ptr, *rhs_index_ptr;
400 int ierr = rhs.matrix->ExtractMyRowView(row_local,
401 rhs_n_entries,
402 rhs_value_ptr,
403 rhs_index_ptr);
404 Assert(ierr == 0, ExcTrilinosError(ierr));
405
406 ierr = matrix->ExtractMyRowView(row_local,
407 n_entries,
408 value_ptr,
409 index_ptr);
410 Assert(ierr == 0, ExcTrilinosError(ierr));
411
412 if (n_entries != rhs_n_entries ||
413 std::memcmp(static_cast<void *>(index_ptr),
414 static_cast<void *>(rhs_index_ptr),
415 sizeof(int) * n_entries) != 0)
416 {
417 needs_deep_copy = true;
418 break;
419 }
420
421 for (int i = 0; i < n_entries; ++i)
422 value_ptr[i] = rhs_value_ptr[i];
423 }
424 }
425
426 if (needs_deep_copy)
427 {
429 std::make_unique<Epetra_Map>(rhs.trilinos_matrix().DomainMap());
430
431 // release memory before reallocation
432 matrix = std::make_unique<Epetra_FECrsMatrix>(*rhs.matrix);
433
434 matrix->FillComplete(*column_space_map, matrix->RowMap());
435 }
436
437 if (rhs.nonlocal_matrix.get() != nullptr)
439 std::make_unique<Epetra_CrsMatrix>(Copy, rhs.nonlocal_matrix->Graph());
440 }
441
442
443
444 namespace
445 {
446 template <typename SparsityPatternType>
447 void
448 reinit_matrix(const IndexSet &row_parallel_partitioning,
449 const IndexSet &column_parallel_partitioning,
450 const SparsityPatternType &sparsity_pattern,
451 const bool exchange_data,
452 const MPI_Comm communicator,
453 std::unique_ptr<Epetra_Map> &column_space_map,
454 std::unique_ptr<Epetra_FECrsMatrix> &matrix,
455 std::unique_ptr<Epetra_CrsMatrix> &nonlocal_matrix,
456 std::unique_ptr<Epetra_Export> &nonlocal_matrix_exporter)
457 {
458 // release memory before reallocation
459 matrix.reset();
460 nonlocal_matrix.reset();
461 nonlocal_matrix_exporter.reset();
462
463 column_space_map = std::make_unique<Epetra_Map>(
464 column_parallel_partitioning.make_trilinos_map(communicator, false));
465
466 if (column_space_map->Comm().MyPID() == 0)
467 {
468 AssertDimension(sparsity_pattern.n_rows(),
469 row_parallel_partitioning.size());
470 AssertDimension(sparsity_pattern.n_cols(),
471 column_parallel_partitioning.size());
472 }
473
474 Epetra_Map row_space_map =
475 row_parallel_partitioning.make_trilinos_map(communicator, false);
476
477 // if we want to exchange data, build a usual Trilinos sparsity pattern
478 // and let that handle the exchange. otherwise, manually create a
479 // CrsGraph, which consumes considerably less memory because it can set
480 // correct number of indices right from the start
481 if (exchange_data)
482 {
483 SparsityPattern trilinos_sparsity;
484 trilinos_sparsity.reinit(row_parallel_partitioning,
485 column_parallel_partitioning,
486 sparsity_pattern,
487 communicator,
488 exchange_data);
489 matrix = std::make_unique<Epetra_FECrsMatrix>(
490 Copy, trilinos_sparsity.trilinos_sparsity_pattern(), false);
491
492 return;
493 }
494
496 row_space_map),
498 row_space_map) +
499 1;
500 std::vector<int> n_entries_per_row(last_row - first_row);
501
502 for (SparseMatrix::size_type row = first_row; row < last_row; ++row)
503 n_entries_per_row[row - first_row] = sparsity_pattern.row_length(row);
504
505 // The deal.II notation of a Sparsity pattern corresponds to the Epetra
506 // concept of a Graph. Hence, we generate a graph by copying the
507 // sparsity pattern into it, and then build up the matrix from the
508 // graph. This is considerable faster than directly filling elements
509 // into the matrix. Moreover, it consumes less memory, since the
510 // internal reordering is done on ints only, and we can leave the
511 // doubles aside.
512
513 // for more than one processor, need to specify only row map first and
514 // let the matrix entries decide about the column map (which says which
515 // columns are present in the matrix, not to be confused with the
516 // col_map that tells how the domain dofs of the matrix will be
517 // distributed). for only one processor, we can directly assign the
518 // columns as well. Compare this with bug # 4123 in the Sandia Bugzilla.
519 std::unique_ptr<Epetra_CrsGraph> graph;
520 if (row_space_map.Comm().NumProc() > 1)
521 graph = std::make_unique<Epetra_CrsGraph>(Copy,
522 row_space_map,
523 n_entries_per_row.data(),
524 true);
525 else
526 graph = std::make_unique<Epetra_CrsGraph>(Copy,
527 row_space_map,
528 *column_space_map,
529 n_entries_per_row.data(),
530 true);
531
532 // This functions assumes that the sparsity pattern sits on all
533 // processors (completely). The parallel version uses an Epetra graph
534 // that is already distributed.
535
536 // now insert the indices
537 std::vector<TrilinosWrappers::types::int_type> row_indices;
538
539 for (SparseMatrix::size_type row = first_row; row < last_row; ++row)
540 {
541 const int row_length = sparsity_pattern.row_length(row);
542 if (row_length == 0)
543 continue;
544
545 row_indices.resize(row_length, -1);
546 {
547 typename SparsityPatternType::iterator p =
548 sparsity_pattern.begin(row);
549 for (SparseMatrix::size_type col = 0;
550 p != sparsity_pattern.end(row);
551 ++p, ++col)
552 row_indices[col] = p->column();
553 }
554 graph->Epetra_CrsGraph::InsertGlobalIndices(row,
555 row_length,
556 row_indices.data());
557 }
558
559 // Eventually, optimize the graph structure (sort indices, make memory
560 // contiguous, etc). note that the documentation of the function indeed
561 // states that we first need to provide the column (domain) map and then
562 // the row (range) map
563 graph->FillComplete(*column_space_map, row_space_map);
564 graph->OptimizeStorage();
565
566 // check whether we got the number of columns right.
567 AssertDimension(sparsity_pattern.n_cols(),
569
570 // And now finally generate the matrix.
571 matrix = std::make_unique<Epetra_FECrsMatrix>(Copy, *graph, false);
572 }
573
574
575
576 // for the non-local graph, we need to circumvent the problem that some
577 // processors will not add into the non-local graph at all: We do not want
578 // to insert dummy elements on >5000 processors because that gets very
579 // slow. Thus, we set a flag in Epetra_CrsGraph that sets the correct
580 // flag. Since it is protected, we need to expose this information by
581 // deriving a class from Epetra_CrsGraph for the purpose of creating the
582 // data structure
583 class Epetra_CrsGraphMod : public Epetra_CrsGraph
584 {
585 public:
586 Epetra_CrsGraphMod(const Epetra_Map &row_map,
587 const int *n_entries_per_row)
588 : Epetra_CrsGraph(Copy, row_map, n_entries_per_row, true)
589 {}
590
591 void
592 SetIndicesAreGlobal()
593 {
594 this->Epetra_CrsGraph::SetIndicesAreGlobal(true);
595 }
596 };
597
598
599
600 // specialization for DynamicSparsityPattern which can provide us with
601 // more information about the non-locally owned rows
602 template <>
603 void
604 reinit_matrix(const IndexSet &row_parallel_partitioning,
605 const IndexSet &column_parallel_partitioning,
606 const DynamicSparsityPattern &sparsity_pattern,
607 const bool exchange_data,
608 const MPI_Comm communicator,
609 std::unique_ptr<Epetra_Map> &column_space_map,
610 std::unique_ptr<Epetra_FECrsMatrix> &matrix,
611 std::unique_ptr<Epetra_CrsMatrix> &nonlocal_matrix,
612 std::unique_ptr<Epetra_Export> &nonlocal_matrix_exporter)
613 {
614 matrix.reset();
615 nonlocal_matrix.reset();
616 nonlocal_matrix_exporter.reset();
617
618 column_space_map = std::make_unique<Epetra_Map>(
619 column_parallel_partitioning.make_trilinos_map(communicator, false));
620
621 AssertDimension(sparsity_pattern.n_rows(),
622 row_parallel_partitioning.size());
623 AssertDimension(sparsity_pattern.n_cols(),
624 column_parallel_partitioning.size());
625
626 Epetra_Map row_space_map =
627 row_parallel_partitioning.make_trilinos_map(communicator, false);
628
629 IndexSet relevant_rows(sparsity_pattern.row_index_set());
630 // serial case
631 if (relevant_rows.size() == 0)
632 {
633 relevant_rows.set_size(
635 relevant_rows.add_range(
636 0, TrilinosWrappers::n_global_elements(row_space_map));
637 }
638 relevant_rows.compress();
639 Assert(relevant_rows.n_elements() >=
640 static_cast<unsigned int>(row_space_map.NumMyElements()),
642 "Locally relevant rows of sparsity pattern must contain "
643 "all locally owned rows"));
644
645 // check whether the relevant rows correspond to exactly the same map as
646 // the owned rows. In that case, do not create the nonlocal graph and
647 // fill the columns by demand
648 const bool have_ghost_rows = [&]() {
649 const std::vector<::types::global_dof_index> indices =
650 relevant_rows.get_index_vector();
651 Epetra_Map relevant_map(
653 TrilinosWrappers::types::int_type(relevant_rows.n_elements()),
654 (indices.empty() ?
655 nullptr :
656 reinterpret_cast<const TrilinosWrappers::types::int_type *>(
657 indices.data())),
658 0,
659 row_space_map.Comm());
660 return !relevant_map.SameAs(row_space_map);
661 }();
662
663 std::vector<TrilinosWrappers::types::int_type> ghost_rows;
664 std::vector<int> n_entries_per_row(row_space_map.NumMyElements());
665 std::vector<int> n_entries_per_ghost_row;
666 {
668 for (const auto global_row : relevant_rows)
669 {
670 if (row_space_map.MyGID(
671 static_cast<TrilinosWrappers::types::int_type>(global_row)))
672 n_entries_per_row[own++] =
673 sparsity_pattern.row_length(global_row);
674 else if (sparsity_pattern.row_length(global_row) > 0)
675 {
676 ghost_rows.push_back(global_row);
677 n_entries_per_ghost_row.push_back(
678 sparsity_pattern.row_length(global_row));
679 }
680 }
681 }
682
683 Epetra_Map off_processor_map(-1,
684 ghost_rows.size(),
685 (ghost_rows.size() > 0) ?
686 (ghost_rows.data()) :
687 nullptr,
688 0,
689 row_space_map.Comm());
690
691 std::unique_ptr<Epetra_CrsGraph> graph;
692 std::unique_ptr<Epetra_CrsGraphMod> nonlocal_graph;
693 if (row_space_map.Comm().NumProc() > 1)
694 {
695 graph =
696 std::make_unique<Epetra_CrsGraph>(Copy,
697 row_space_map,
698 (n_entries_per_row.size() > 0) ?
699 (n_entries_per_row.data()) :
700 nullptr,
701 exchange_data ? false : true);
702 if (have_ghost_rows == true)
703 nonlocal_graph = std::make_unique<Epetra_CrsGraphMod>(
704 off_processor_map, n_entries_per_ghost_row.data());
705 }
706 else
707 graph =
708 std::make_unique<Epetra_CrsGraph>(Copy,
709 row_space_map,
710 *column_space_map,
711 (n_entries_per_row.size() > 0) ?
712 (n_entries_per_row.data()) :
713 nullptr,
714 true);
715
716 // now insert the indices, select between the right matrix
717 std::vector<TrilinosWrappers::types::int_type> row_indices;
718
719 for (const auto global_row : relevant_rows)
720 {
721 const int row_length = sparsity_pattern.row_length(global_row);
722 if (row_length == 0)
723 continue;
724
725 row_indices.resize(row_length, -1);
726 for (int col = 0; col < row_length; ++col)
727 row_indices[col] = sparsity_pattern.column_number(global_row, col);
728
729 if (row_space_map.MyGID(
730 static_cast<TrilinosWrappers::types::int_type>(global_row)))
731 graph->InsertGlobalIndices(global_row,
732 row_length,
733 row_indices.data());
734 else
735 {
736 Assert(nonlocal_graph.get() != nullptr, ExcInternalError());
737 nonlocal_graph->InsertGlobalIndices(global_row,
738 row_length,
739 row_indices.data());
740 }
741 }
742
743 // finalize nonlocal graph and create nonlocal matrix
744 if (nonlocal_graph.get() != nullptr)
745 {
746 // must make sure the IndicesAreGlobal flag is set on all processors
747 // because some processors might not call InsertGlobalIndices (and
748 // we do not want to insert dummy indices on all processors for
749 // large-scale simulations due to the bad impact on performance)
750 nonlocal_graph->SetIndicesAreGlobal();
751 Assert(nonlocal_graph->IndicesAreGlobal() == true,
753 nonlocal_graph->FillComplete(*column_space_map, row_space_map);
754 nonlocal_graph->OptimizeStorage();
755
756 // insert data from nonlocal graph into the final sparsity pattern
757 if (exchange_data)
758 {
759 Epetra_Export exporter(nonlocal_graph->RowMap(), row_space_map);
760 int ierr = graph->Export(*nonlocal_graph, exporter, Add);
761 Assert(ierr == 0, ExcTrilinosError(ierr));
762 }
763
764 nonlocal_matrix =
765 std::make_unique<Epetra_CrsMatrix>(Copy, *nonlocal_graph);
766 }
767
768 graph->FillComplete(*column_space_map, row_space_map);
769 graph->OptimizeStorage();
770
771 AssertDimension(sparsity_pattern.n_cols(),
773
774 matrix = std::make_unique<Epetra_FECrsMatrix>(Copy, *graph, false);
775 }
776 } // namespace
777
778
779
780 template <typename SparsityPatternType>
781 void
782 SparseMatrix::reinit(const SparsityPatternType &sparsity_pattern)
783 {
784 reinit_matrix(complete_index_set(sparsity_pattern.n_rows()),
785 complete_index_set(sparsity_pattern.n_cols()),
786 sparsity_pattern,
787 false,
788 MPI_COMM_SELF,
790 matrix,
793 }
794
795
796
797 template <typename SparsityPatternType>
798 inline std::enable_if_t<
799 !std::is_same_v<SparsityPatternType, ::SparseMatrix<double>>>
800 SparseMatrix::reinit(const IndexSet &row_parallel_partitioning,
801 const IndexSet &col_parallel_partitioning,
802 const SparsityPatternType &sparsity_pattern,
803 const MPI_Comm communicator,
804 const bool exchange_data)
805 {
806 reinit_matrix(row_parallel_partitioning,
807 col_parallel_partitioning,
808 sparsity_pattern,
809 exchange_data,
810 communicator,
812 matrix,
815
816 // In the end, the matrix needs to be compressed in order to be really
817 // ready.
818 last_action = Zero;
820 }
821
822
823
824 void
825 SparseMatrix::reinit(const SparsityPattern &sparsity_pattern)
826 {
827 matrix.reset();
829
830 // reinit with a (parallel) Trilinos sparsity pattern.
832 std::make_unique<Epetra_Map>(sparsity_pattern.domain_partitioner());
833 matrix = std::make_unique<Epetra_FECrsMatrix>(
834 Copy, sparsity_pattern.trilinos_sparsity_pattern(), false);
835
836 if (sparsity_pattern.nonlocal_graph.get() != nullptr)
838 std::make_unique<Epetra_CrsMatrix>(Copy,
839 *sparsity_pattern.nonlocal_graph);
840 else
841 nonlocal_matrix.reset();
842
843 last_action = Zero;
845 }
846
847
848
849 void
850 SparseMatrix::reinit(const SparseMatrix &sparse_matrix)
851 {
852 if (this == &sparse_matrix)
853 return;
854
856 std::make_unique<Epetra_Map>(sparse_matrix.trilinos_matrix().DomainMap());
857 matrix.reset();
859 matrix = std::make_unique<Epetra_FECrsMatrix>(
860 Copy, sparse_matrix.trilinos_sparsity_pattern(), false);
861
862 if (sparse_matrix.nonlocal_matrix != nullptr)
863 nonlocal_matrix = std::make_unique<Epetra_CrsMatrix>(
864 Copy, sparse_matrix.nonlocal_matrix->Graph());
865 else
866 nonlocal_matrix.reset();
867
868 last_action = Zero;
870 }
871
872
873
874 template <typename number>
875 inline void
877 const IndexSet &row_parallel_partitioning,
878 const IndexSet &col_parallel_partitioning,
879 const ::SparseMatrix<number> &dealii_sparse_matrix,
880 const MPI_Comm communicator,
881 const double drop_tolerance,
882 const bool copy_values,
883 const ::SparsityPattern *use_this_sparsity)
884 {
885 if (copy_values == false)
886 {
887 // in case we do not copy values, just
888 // call the other function.
889 if (use_this_sparsity == nullptr)
890 reinit(row_parallel_partitioning,
891 col_parallel_partitioning,
892 dealii_sparse_matrix.get_sparsity_pattern(),
893 communicator,
894 false);
895 else
896 reinit(row_parallel_partitioning,
897 col_parallel_partitioning,
898 *use_this_sparsity,
899 communicator,
900 false);
901 return;
902 }
903
904 const size_type n_rows = dealii_sparse_matrix.m();
905
906 AssertDimension(row_parallel_partitioning.size(), n_rows);
907 AssertDimension(col_parallel_partitioning.size(), dealii_sparse_matrix.n());
908
909 const ::SparsityPattern &sparsity_pattern =
910 (use_this_sparsity != nullptr) ?
911 *use_this_sparsity :
912 dealii_sparse_matrix.get_sparsity_pattern();
913
914 if (matrix.get() == nullptr || m() != n_rows ||
915 n_nonzero_elements() != sparsity_pattern.n_nonzero_elements())
916 {
917 reinit(row_parallel_partitioning,
918 col_parallel_partitioning,
919 sparsity_pattern,
920 communicator,
921 false);
922 }
923
924 // fill the values. the same as above: go through all rows of the
925 // matrix, and then all columns. since the sparsity patterns of the
926 // input matrix and the specified sparsity pattern might be different,
927 // need to go through the row for both these sparsity structures
928 // simultaneously in order to really set the correct values.
929 size_type maximum_row_length = matrix->MaxNumEntries();
930 std::vector<size_type> row_indices(maximum_row_length);
931 std::vector<TrilinosScalar> values(maximum_row_length);
932
933 for (size_type row = 0; row < n_rows; ++row)
934 // see if the row is locally stored on this processor
935 if (row_parallel_partitioning.is_element(row) == true)
936 {
937 ::SparsityPattern::iterator select_index =
938 sparsity_pattern.begin(row);
939 typename ::SparseMatrix<number>::const_iterator it =
940 dealii_sparse_matrix.begin(row);
941 size_type col = 0;
942 if (sparsity_pattern.n_rows() == sparsity_pattern.n_cols())
943 {
944 // optimized diagonal
945 AssertDimension(it->column(), row);
946 if (std::fabs(it->value()) > drop_tolerance)
947 {
948 values[col] = it->value();
949 row_indices[col++] = it->column();
950 }
951 ++select_index;
952 ++it;
953 }
954
955 while (it != dealii_sparse_matrix.end(row) &&
956 select_index != sparsity_pattern.end(row))
957 {
958 while (select_index->column() < it->column() &&
959 select_index != sparsity_pattern.end(row))
960 ++select_index;
961 while (it->column() < select_index->column() &&
962 it != dealii_sparse_matrix.end(row))
963 ++it;
964
965 if (it == dealii_sparse_matrix.end(row))
966 break;
967 if (std::fabs(it->value()) > drop_tolerance)
968 {
969 values[col] = it->value();
970 row_indices[col++] = it->column();
971 }
972 ++select_index;
973 ++it;
974 }
975 set(row,
976 col,
977 reinterpret_cast<size_type *>(row_indices.data()),
978 values.data(),
979 false);
980 }
982 }
983
984
985
986 template <typename number>
987 void
989 const ::SparseMatrix<number> &dealii_sparse_matrix,
990 const double drop_tolerance,
991 const bool copy_values,
992 const ::SparsityPattern *use_this_sparsity)
993 {
994 reinit(complete_index_set(dealii_sparse_matrix.m()),
995 complete_index_set(dealii_sparse_matrix.n()),
996 dealii_sparse_matrix,
997 MPI_COMM_SELF,
998 drop_tolerance,
999 copy_values,
1000 use_this_sparsity);
1001 }
1002
1003
1004
1005 void
1006 SparseMatrix::reinit(const Epetra_CrsMatrix &input_matrix,
1007 const bool copy_values)
1008 {
1009 Assert(input_matrix.Filled() == true,
1010 ExcMessage("Input CrsMatrix has not called FillComplete()!"));
1011
1012 column_space_map = std::make_unique<Epetra_Map>(input_matrix.DomainMap());
1013
1014 const Epetra_CrsGraph *graph = &input_matrix.Graph();
1015
1016 nonlocal_matrix.reset();
1018 matrix.reset();
1019 matrix = std::make_unique<Epetra_FECrsMatrix>(Copy, *graph, false);
1020
1021 matrix->FillComplete(*column_space_map, input_matrix.RangeMap(), true);
1022
1023 if (copy_values == true)
1024 {
1025 // point to the first data entry in the two
1026 // matrices and copy the content
1027 const TrilinosScalar *in_values = input_matrix[0];
1028 TrilinosScalar *values = (*matrix)[0];
1029 const size_type my_nonzeros = input_matrix.NumMyNonzeros();
1030 std::memcpy(values, in_values, my_nonzeros * sizeof(TrilinosScalar));
1031 }
1032
1033 last_action = Zero;
1035 }
1036
1037
1038
1039 void
1041 {
1042 Epetra_CombineMode mode = last_action;
1043 if (last_action == Zero)
1044 {
1045 if ((operation == VectorOperation::add) ||
1046 (operation == VectorOperation::unknown))
1047 mode = Add;
1048 else if (operation == VectorOperation::insert)
1049 mode = Insert;
1050 else
1051 Assert(
1052 false,
1053 ExcMessage(
1054 "compress() can only be called with VectorOperation add, insert, or unknown"));
1055 }
1056 else
1057 {
1058 Assert(
1059 ((last_action == Add) && (operation != VectorOperation::insert)) ||
1060 ((last_action == Insert) && (operation != VectorOperation::add)),
1061 ExcMessage("Operation and argument to compress() do not match"));
1062 }
1063
1064 // flush buffers
1065 int ierr;
1066 if (nonlocal_matrix.get() != nullptr && mode == Add)
1067 {
1068 // do only export in case of an add() operation, otherwise the owning
1069 // processor must have set the correct entry
1070 nonlocal_matrix->FillComplete(*column_space_map, matrix->RowMap());
1071 if (nonlocal_matrix_exporter.get() == nullptr)
1073 std::make_unique<Epetra_Export>(nonlocal_matrix->RowMap(),
1074 matrix->RowMap());
1075 ierr =
1077 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1078 ierr = matrix->FillComplete(*column_space_map, matrix->RowMap());
1079 nonlocal_matrix->PutScalar(0);
1080 }
1081 else
1082 ierr =
1083 matrix->GlobalAssemble(*column_space_map, matrix->RowMap(), true, mode);
1084
1085 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1086
1087 ierr = matrix->OptimizeStorage();
1088 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1089
1090 last_action = Zero;
1091
1092 compressed = true;
1093 }
1094
1095
1096
1097 void
1099 {
1100 // When we clear the matrix, reset
1101 // the pointer and generate an
1102 // empty matrix.
1104 std::make_unique<Epetra_Map>(0, 0, Utilities::Trilinos::comm_self());
1105 matrix = std::make_unique<Epetra_FECrsMatrix>(View, *column_space_map, 0);
1106 nonlocal_matrix.reset();
1108
1109 matrix->FillComplete();
1110
1111 compressed = true;
1112 }
1113
1114
1115
1116 void
1118 const TrilinosScalar new_diag_value)
1119 {
1120 Assert(matrix->Filled() == true, ExcMatrixNotCompressed());
1121
1122 // Only do this on the rows owned
1123 // locally on this processor.
1124 int local_row =
1125 matrix->LRID(static_cast<TrilinosWrappers::types::int_type>(row));
1126 if (local_row >= 0)
1127 {
1128 TrilinosScalar *values;
1129 int *col_indices;
1130 int num_entries;
1131 const int ierr =
1132 matrix->ExtractMyRowView(local_row, num_entries, values, col_indices);
1133
1134 Assert(ierr == 0, ExcTrilinosError(ierr));
1135
1136 const std::ptrdiff_t diag_index =
1137 std::find(col_indices, col_indices + num_entries, local_row) -
1138 col_indices;
1139
1140 for (TrilinosWrappers::types::int_type j = 0; j < num_entries; ++j)
1141 if (diag_index != j || new_diag_value == 0)
1142 values[j] = 0.;
1143
1144 if (diag_index != num_entries)
1145 values[diag_index] = new_diag_value;
1146 }
1147 }
1148
1149
1150
1151 void
1153 const TrilinosScalar new_diag_value)
1154 {
1155 for (const auto row : rows)
1156 clear_row(row, new_diag_value);
1157 }
1158
1159
1160
1163 {
1164 // Extract local indices in
1165 // the matrix.
1166 int trilinos_i =
1167 matrix->LRID(static_cast<TrilinosWrappers::types::int_type>(i)),
1168 trilinos_j =
1169 matrix->LCID(static_cast<TrilinosWrappers::types::int_type>(j));
1170 TrilinosScalar value = 0.;
1171
1172 // If the data is not on the
1173 // present processor, we throw
1174 // an exception. This is one of
1175 // the two tiny differences to
1176 // the el(i,j) call, which does
1177 // not throw any assertions.
1178 if (trilinos_i == -1)
1179 {
1180 Assert(false,
1182 i, j, local_range().first, local_range().second - 1));
1183 }
1184 else
1185 {
1186 // Check whether the matrix has
1187 // already been transformed to local
1188 // indices.
1189 Assert(matrix->Filled(), ExcMatrixNotCompressed());
1190
1191 // Prepare pointers for extraction
1192 // of a view of the row.
1193 int nnz_present = matrix->NumMyEntries(trilinos_i);
1194 int nnz_extracted;
1195 int *col_indices;
1196 TrilinosScalar *values;
1197
1198 // Generate the view and make
1199 // sure that we have not generated
1200 // an error.
1201 // TODO Check that col_indices are int and not long long
1202 int ierr = matrix->ExtractMyRowView(trilinos_i,
1203 nnz_extracted,
1204 values,
1205 col_indices);
1206 Assert(ierr == 0, ExcTrilinosError(ierr));
1207
1208 Assert(nnz_present == nnz_extracted,
1209 ExcDimensionMismatch(nnz_present, nnz_extracted));
1210
1211 // Search the index where we
1212 // look for the value, and then
1213 // finally get it.
1214 const std::ptrdiff_t local_col_index =
1215 std::find(col_indices, col_indices + nnz_present, trilinos_j) -
1216 col_indices;
1217
1218 // This is actually the only
1219 // difference to the el(i,j)
1220 // function, which means that
1221 // we throw an exception in
1222 // this case instead of just
1223 // returning zero for an
1224 // element that is not present
1225 // in the sparsity pattern.
1226 if (local_col_index == nnz_present)
1227 {
1228 Assert(false, ExcInvalidIndex(i, j));
1229 }
1230 else
1231 value = values[local_col_index];
1232 }
1233
1234 return value;
1235 }
1236
1237
1238
1240 SparseMatrix::el(const size_type i, const size_type j) const
1241 {
1242 // Extract local indices in
1243 // the matrix.
1244 int trilinos_i =
1245 matrix->LRID(static_cast<TrilinosWrappers::types::int_type>(i)),
1246 trilinos_j =
1247 matrix->LCID(static_cast<TrilinosWrappers::types::int_type>(j));
1248 TrilinosScalar value = 0.;
1249
1250 // If the data is not on the
1251 // present processor, we can't
1252 // continue. Just print out zero
1253 // as discussed in the
1254 // documentation of this
1255 // function. if you want error
1256 // checking, use operator().
1257 if ((trilinos_i == -1) || (trilinos_j == -1))
1258 return 0.;
1259 else
1260 {
1261 // Check whether the matrix
1262 // already is transformed to
1263 // local indices.
1264 Assert(matrix->Filled(), ExcMatrixNotCompressed());
1265
1266 // Prepare pointers for extraction
1267 // of a view of the row.
1268 int nnz_present = matrix->NumMyEntries(trilinos_i);
1269 int nnz_extracted;
1270 int *col_indices;
1271 TrilinosScalar *values;
1272
1273 // Generate the view and make
1274 // sure that we have not generated
1275 // an error.
1276 int ierr = matrix->ExtractMyRowView(trilinos_i,
1277 nnz_extracted,
1278 values,
1279 col_indices);
1280 Assert(ierr == 0, ExcTrilinosError(ierr));
1281
1282 Assert(nnz_present == nnz_extracted,
1283 ExcDimensionMismatch(nnz_present, nnz_extracted));
1284
1285 // Search the index where we
1286 // look for the value, and then
1287 // finally get it.
1288 const std::ptrdiff_t local_col_index =
1289 std::find(col_indices, col_indices + nnz_present, trilinos_j) -
1290 col_indices;
1291
1292 // This is actually the only
1293 // difference to the () function
1294 // querying (i,j), where we throw an
1295 // exception instead of just
1296 // returning zero for an element
1297 // that is not present in the
1298 // sparsity pattern.
1299 if (local_col_index == nnz_present)
1300 value = 0;
1301 else
1302 value = values[local_col_index];
1303 }
1304
1305 return value;
1306 }
1307
1308
1309
1312 {
1313 Assert(m() == n(), ExcNotQuadratic());
1314
1315 if constexpr (running_in_debug_mode())
1316 {
1317 // use operator() in debug mode because
1318 // it checks if this is a valid element
1319 // (in parallel)
1320 return operator()(i, i);
1321 }
1322 else
1323 {
1324 // Trilinos doesn't seem to have a
1325 // more efficient way to access the
1326 // diagonal than by just using the
1327 // standard el(i,j) function.
1328 return el(i, i);
1329 }
1330 }
1331
1332
1333
1334 unsigned int
1336 {
1337 Assert(row < m(), ExcInternalError());
1338
1339 // get a representation of the
1340 // present row
1341 int ncols = -1;
1342 int local_row =
1343 matrix->LRID(static_cast<TrilinosWrappers::types::int_type>(row));
1344 Assert((local_row >= 0), ExcAccessToNonlocalRow(row));
1345
1346 // on the processor who owns this
1347 // row, we'll have a non-negative
1348 // value.
1349 if (local_row >= 0)
1350 {
1351 int ierr = matrix->NumMyRowEntries(local_row, ncols);
1352 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1353 }
1354
1355 return static_cast<unsigned int>(ncols);
1356 }
1357
1358
1359
1360 void
1361 SparseMatrix::set(const std::vector<size_type> &row_indices,
1362 const std::vector<size_type> &col_indices,
1363 const FullMatrix<TrilinosScalar> &values,
1364 const bool elide_zero_values)
1365 {
1366 Assert(row_indices.size() == values.m(),
1367 ExcDimensionMismatch(row_indices.size(), values.m()));
1368 Assert(col_indices.size() == values.n(),
1369 ExcDimensionMismatch(col_indices.size(), values.n()));
1370
1371 for (size_type i = 0; i < row_indices.size(); ++i)
1372 set(row_indices[i],
1373 col_indices.size(),
1374 col_indices.data(),
1375 &values(i, 0),
1376 elide_zero_values);
1377 }
1378
1379
1380
1381 void
1383 const std::vector<size_type> &col_indices,
1384 const std::vector<TrilinosScalar> &values,
1385 const bool elide_zero_values)
1386 {
1387 Assert(col_indices.size() == values.size(),
1388 ExcDimensionMismatch(col_indices.size(), values.size()));
1389
1390 set(row,
1391 col_indices.size(),
1392 col_indices.data(),
1393 values.data(),
1394 elide_zero_values);
1395 }
1396
1397
1398
1399 template <>
1400 void
1401 SparseMatrix::set<TrilinosScalar>(const size_type row,
1402 const size_type n_cols,
1403 const size_type *col_indices,
1404 const TrilinosScalar *values,
1405 const bool elide_zero_values)
1406 {
1407 AssertIndexRange(row, this->m());
1408
1409 int ierr;
1410 if (last_action == Add)
1411 {
1412 ierr =
1413 matrix->GlobalAssemble(*column_space_map, matrix->RowMap(), true);
1414
1415 Assert(ierr == 0, ExcTrilinosError(ierr));
1416 }
1417
1418 last_action = Insert;
1419
1420 const TrilinosWrappers::types::int_type *col_index_ptr;
1421 const TrilinosScalar *col_value_ptr;
1422 const TrilinosWrappers::types::int_type trilinos_row = row;
1424
1425 boost::container::small_vector<TrilinosScalar, 200> local_value_array(
1426 elide_zero_values ? n_cols : 0);
1427 boost::container::small_vector<TrilinosWrappers::types::int_type, 200>
1428 local_index_array(elide_zero_values ? n_cols : 0);
1429
1430 // If we don't elide zeros, the pointers are already available... need to
1431 // cast to non-const pointers as that is the format taken by Trilinos (but
1432 // we will not modify const data)
1433 if (elide_zero_values == false)
1434 {
1435 col_index_ptr =
1436 reinterpret_cast<const TrilinosWrappers::types::int_type *>(
1437 col_indices);
1438 col_value_ptr = values;
1439 n_columns = n_cols;
1440 }
1441 else
1442 {
1443 // Otherwise, extract nonzero values in each row and get the
1444 // respective indices.
1445 col_index_ptr = local_index_array.data();
1446 col_value_ptr = local_value_array.data();
1447
1448 n_columns = 0;
1449 for (size_type j = 0; j < n_cols; ++j)
1450 {
1451 const double value = values[j];
1452 AssertIsFinite(value);
1453 if (value != 0)
1454 {
1455 local_index_array[n_columns] = col_indices[j];
1456 local_value_array[n_columns] = value;
1457 ++n_columns;
1458 }
1459 }
1460
1461 AssertIndexRange(n_columns, n_cols + 1);
1462 }
1463
1464
1465 // If the calling matrix owns the row to which we want to insert values,
1466 // we can directly call the Epetra_CrsMatrix input function, which is much
1467 // faster than the Epetra_FECrsMatrix function. We distinguish between two
1468 // cases: the first one is when the matrix is not filled (i.e., it is
1469 // possible to add new elements to the sparsity pattern), and the second
1470 // one is when the pattern is already fixed. In the former case, we add
1471 // the possibility to insert new values, and in the second we just replace
1472 // data.
1473 if (matrix->RowMap().MyGID(
1474 static_cast<TrilinosWrappers::types::int_type>(row)) == true)
1475 {
1476 if (matrix->Filled() == false)
1477 {
1478 ierr = matrix->Epetra_CrsMatrix::InsertGlobalValues(
1479 row, static_cast<int>(n_columns), col_value_ptr, col_index_ptr);
1480
1481 // When inserting elements, we do not want to create exceptions in
1482 // the case when inserting non-local data (since that's what we
1483 // want to do right now).
1484 if (ierr > 0)
1485 ierr = 0;
1486 }
1487 else
1488 ierr = matrix->Epetra_CrsMatrix::ReplaceGlobalValues(row,
1489 n_columns,
1490 col_value_ptr,
1491 col_index_ptr);
1492 }
1493 else
1494 {
1495 // When we're at off-processor data, we have to stick with the
1496 // standard Insert/ReplaceGlobalValues function. Nevertheless, the way
1497 // we call it is the fastest one (any other will lead to repeated
1498 // allocation and deallocation of memory in order to call the function
1499 // we already use, which is very inefficient if writing one element at
1500 // a time).
1501 compressed = false;
1502
1503 if (matrix->Filled() == false)
1504 {
1505 ierr = matrix->InsertGlobalValues(1,
1506 &trilinos_row,
1507 n_columns,
1508 col_index_ptr,
1509 &col_value_ptr,
1510 Epetra_FECrsMatrix::ROW_MAJOR);
1511 if (ierr > 0)
1512 ierr = 0;
1513 }
1514 else
1515 ierr = matrix->ReplaceGlobalValues(1,
1516 &trilinos_row,
1517 n_columns,
1518 col_index_ptr,
1519 &col_value_ptr,
1520 Epetra_FECrsMatrix::ROW_MAJOR);
1521 // use the FECrsMatrix facilities for set even in the case when we
1522 // have explicitly set the off-processor rows because that only works
1523 // properly when adding elements, not when setting them (since we want
1524 // to only touch elements that have been set explicitly, and there is
1525 // no way on the receiving processor to identify them otherwise)
1526 }
1527
1528 Assert(ierr <= 0, ExcAccessToNonPresentElement(row, col_index_ptr[0]));
1529 AssertThrow(ierr >= 0, ExcTrilinosError(ierr));
1530 }
1531
1532
1533
1534 void
1535 SparseMatrix::add(const std::vector<size_type> &indices,
1536 const FullMatrix<TrilinosScalar> &values,
1537 const bool elide_zero_values)
1538 {
1539 Assert(indices.size() == values.m(),
1540 ExcDimensionMismatch(indices.size(), values.m()));
1541 Assert(values.m() == values.n(), ExcNotQuadratic());
1542
1543 for (size_type i = 0; i < indices.size(); ++i)
1544 add(indices[i],
1545 indices.size(),
1546 indices.data(),
1547 &values(i, 0),
1548 elide_zero_values);
1549 }
1550
1551
1552
1553 void
1554 SparseMatrix::add(const std::vector<size_type> &row_indices,
1555 const std::vector<size_type> &col_indices,
1556 const FullMatrix<TrilinosScalar> &values,
1557 const bool elide_zero_values)
1558 {
1559 Assert(row_indices.size() == values.m(),
1560 ExcDimensionMismatch(row_indices.size(), values.m()));
1561 Assert(col_indices.size() == values.n(),
1562 ExcDimensionMismatch(col_indices.size(), values.n()));
1563
1564 for (size_type i = 0; i < row_indices.size(); ++i)
1565 add(row_indices[i],
1566 col_indices.size(),
1567 col_indices.data(),
1568 &values(i, 0),
1569 elide_zero_values);
1570 }
1571
1572
1573
1574 void
1576 const std::vector<size_type> &col_indices,
1577 const std::vector<TrilinosScalar> &values,
1578 const bool elide_zero_values)
1579 {
1580 Assert(col_indices.size() == values.size(),
1581 ExcDimensionMismatch(col_indices.size(), values.size()));
1582
1583 add(row,
1584 col_indices.size(),
1585 col_indices.data(),
1586 values.data(),
1587 elide_zero_values);
1588 }
1589
1590
1591
1592 void
1594 const size_type n_cols,
1595 const size_type *col_indices,
1596 const TrilinosScalar *values,
1597 const bool elide_zero_values,
1598 const bool /*col_indices_are_sorted*/)
1599 {
1600 AssertIndexRange(row, this->m());
1601 for (size_type n = 0; n < n_cols; ++n)
1602 AssertIndexRange(col_indices[n], this->n());
1603
1604 int ierr;
1605 if (last_action == Insert)
1606 {
1607 // TODO: this could lead to a dead lock when only one processor
1608 // calls GlobalAssemble.
1609 ierr =
1610 matrix->GlobalAssemble(*column_space_map, matrix->RowMap(), false);
1611
1612 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1613 }
1614
1615 last_action = Add;
1616
1617 const TrilinosWrappers::types::int_type *col_index_ptr;
1618 const TrilinosScalar *col_value_ptr;
1619 const TrilinosWrappers::types::int_type trilinos_row = row;
1621
1622 boost::container::small_vector<TrilinosScalar, 100> local_value_array(
1623 n_cols);
1624 boost::container::small_vector<TrilinosWrappers::types::int_type, 100>
1625 local_index_array(n_cols);
1626
1627 // If we don't elide zeros, the pointers are already available... need to
1628 // cast to non-const pointers as that is the format taken by Trilinos (but
1629 // we will not modify const data)
1630 if (elide_zero_values == false)
1631 {
1632 col_index_ptr =
1633 reinterpret_cast<const TrilinosWrappers::types::int_type *>(
1634 col_indices);
1635 col_value_ptr = values;
1636 n_columns = n_cols;
1637 if constexpr (running_in_debug_mode())
1638 {
1639 for (size_type j = 0; j < n_cols; ++j)
1640 AssertIsFinite(values[j]);
1641 }
1642 }
1643 else
1644 {
1645 // Otherwise, extract nonzero values in each row and the corresponding
1646 // index.
1647 col_index_ptr = local_index_array.data();
1648 col_value_ptr = local_value_array.data();
1649
1650 n_columns = 0;
1651 for (size_type j = 0; j < n_cols; ++j)
1652 {
1653 const double value = values[j];
1654
1655 AssertIsFinite(value);
1656 if (value != 0)
1657 {
1658 local_index_array[n_columns] = col_indices[j];
1659 local_value_array[n_columns] = value;
1660 ++n_columns;
1661 }
1662 }
1663
1664 AssertIndexRange(n_columns, n_cols + 1);
1665 }
1666 // Exit early if there is nothing to do
1667 if (n_columns == 0)
1668 {
1669 return;
1670 }
1671
1672 // If the calling processor owns the row to which we want to add values, we
1673 // can directly call the Epetra_CrsMatrix input function, which is much
1674 // faster than the Epetra_FECrsMatrix function.
1675 if (matrix->RowMap().MyGID(
1676 static_cast<TrilinosWrappers::types::int_type>(row)) == true)
1677 {
1678 ierr = matrix->Epetra_CrsMatrix::SumIntoGlobalValues(row,
1679 n_columns,
1680 col_value_ptr,
1681 col_index_ptr);
1682 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1683 }
1684 else if (nonlocal_matrix.get() != nullptr)
1685 {
1686 compressed = false;
1687 // this is the case when we have explicitly set the off-processor rows
1688 // and want to create a separate matrix object for them (to retain
1689 // thread-safety)
1690 Assert(nonlocal_matrix->RowMap().LID(
1691 static_cast<TrilinosWrappers::types::int_type>(row)) != -1,
1692 ExcMessage("Attempted to write into off-processor matrix row "
1693 "that has not be specified as being writable upon "
1694 "initialization"));
1695 ierr = nonlocal_matrix->SumIntoGlobalValues(row,
1696 n_columns,
1697 col_value_ptr,
1698 col_index_ptr);
1699 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1700 }
1701 else
1702 {
1703 // When we're at off-processor data, we have to stick with the
1704 // standard SumIntoGlobalValues function. Nevertheless, the way we
1705 // call it is the fastest one (any other will lead to repeated
1706 // allocation and deallocation of memory in order to call the function
1707 // we already use, which is very inefficient if writing one element at
1708 // a time).
1709 compressed = false;
1710
1711 ierr = matrix->SumIntoGlobalValues(1,
1712 &trilinos_row,
1713 n_columns,
1714 col_index_ptr,
1715 &col_value_ptr,
1716 Epetra_FECrsMatrix::ROW_MAJOR);
1717 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1718 }
1719
1720 if constexpr (running_in_debug_mode())
1721 {
1722 if (ierr > 0)
1723 {
1724 std::cout << "------------------------------------------"
1725 << std::endl;
1726 std::cout << "Got error " << ierr << " in row " << row
1727 << " of proc " << matrix->RowMap().Comm().MyPID()
1728 << " when trying to add the columns:" << std::endl;
1729 for (TrilinosWrappers::types::int_type i = 0; i < n_columns; ++i)
1730 std::cout << col_index_ptr[i] << " ";
1731 std::cout << std::endl << std::endl;
1732 std::cout << "Matrix row "
1733 << (matrix->RowMap().MyGID(
1735 row)) == false ?
1736 "(nonlocal part)" :
1737 "")
1738 << " has the following indices:" << std::endl;
1739 std::vector<TrilinosWrappers::types::int_type> indices;
1740 const Epetra_CrsGraph *graph =
1741 (nonlocal_matrix.get() != nullptr &&
1742 matrix->RowMap().MyGID(
1743 static_cast<TrilinosWrappers::types::int_type>(row)) ==
1744 false) ?
1745 &nonlocal_matrix->Graph() :
1746 &matrix->Graph();
1747
1748 indices.resize(graph->NumGlobalIndices(row));
1749 int n_indices = 0;
1750 graph->ExtractGlobalRowCopy(row,
1751 indices.size(),
1752 n_indices,
1753 indices.data());
1754 AssertDimension(n_indices, indices.size());
1755
1756 for (TrilinosWrappers::types::int_type i = 0; i < n_indices; ++i)
1757 std::cout << indices[i] << " ";
1758 std::cout << std::endl << std::endl;
1759 Assert(ierr <= 0,
1760 ExcAccessToNonPresentElement(row, col_index_ptr[0]));
1761 }
1762 }
1763 Assert(ierr >= 0, ExcTrilinosError(ierr));
1764 }
1765
1766
1767
1768 SparseMatrix &
1770 {
1772 compress(VectorOperation::unknown); // TODO: why do we do this? Should we
1773 // not check for is_compressed?
1774
1775 // As checked above, we are only allowed to use d==0.0, so pass
1776 // a constant zero (instead of a run-time value 'd' that *happens* to
1777 // have a zero value) to the underlying class in hopes that the compiler
1778 // can optimize this somehow.
1779 const int ierr = matrix->PutScalar(/*d=*/0.0);
1780 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1781
1782 if (nonlocal_matrix.get() != nullptr)
1783 {
1784 const int ierr = nonlocal_matrix->PutScalar(/*d=*/0.0);
1785 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1786 }
1787
1788 return *this;
1789 }
1790
1791
1792
1793 void
1795 {
1796 AssertDimension(rhs.m(), m());
1797 AssertDimension(rhs.n(), n());
1800 Assert(matrix->RowMap().SameAs(rhs.matrix->RowMap()),
1801 ExcMessage("Can only add matrices with same distribution of rows"));
1802 Assert(matrix->Filled() && rhs.matrix->Filled(),
1803 ExcMessage("Addition of matrices only allowed if matrices are "
1804 "filled, i.e., compress() has been called"));
1805
1806 const bool same_col_map = matrix->ColMap().SameAs(rhs.matrix->ColMap());
1807
1808 for (const auto row : locally_owned_range_indices())
1809 {
1810 const int row_local = matrix->RowMap().LID(
1811 static_cast<TrilinosWrappers::types::int_type>(row));
1812 Assert((row_local >= 0), ExcAccessToNonlocalRow(row));
1813
1814 // First get a view to the matrix columns of both matrices. Note that
1815 // the data is in local index spaces so we need to be careful not only
1816 // to compare column indices in case they are derived from the same
1817 // map.
1818 int n_entries, rhs_n_entries;
1819 TrilinosScalar *value_ptr, *rhs_value_ptr;
1820 int *index_ptr, *rhs_index_ptr;
1821 int ierr = rhs.matrix->ExtractMyRowView(row_local,
1822 rhs_n_entries,
1823 rhs_value_ptr,
1824 rhs_index_ptr);
1825 Assert(ierr == 0, ExcTrilinosError(ierr));
1826
1827 ierr =
1828 matrix->ExtractMyRowView(row_local, n_entries, value_ptr, index_ptr);
1829 Assert(ierr == 0, ExcTrilinosError(ierr));
1830 bool expensive_checks = (n_entries != rhs_n_entries || !same_col_map);
1831 if (!expensive_checks)
1832 {
1833 // check if the column indices are the same. If yes, can simply
1834 // copy over the data.
1835 expensive_checks = std::memcmp(static_cast<void *>(index_ptr),
1836 static_cast<void *>(rhs_index_ptr),
1837 sizeof(int) * n_entries) != 0;
1838 if (!expensive_checks)
1839 for (int i = 0; i < n_entries; ++i)
1840 value_ptr[i] += rhs_value_ptr[i] * factor;
1841 }
1842 // Now to the expensive case where we need to check all column indices
1843 // against each other (transformed into global index space) and where
1844 // we need to make sure that all entries we are about to add into the
1845 // lhs matrix actually exist
1846 if (expensive_checks)
1847 {
1848 for (int i = 0; i < rhs_n_entries; ++i)
1849 {
1850 if (rhs_value_ptr[i] == 0.)
1851 continue;
1852 const TrilinosWrappers::types::int_type rhs_global_col =
1853 global_column_index(*rhs.matrix, rhs_index_ptr[i]);
1854 int local_col = matrix->ColMap().LID(rhs_global_col);
1855 int *local_index = Utilities::lower_bound(index_ptr,
1856 index_ptr + n_entries,
1857 local_col);
1858 Assert(local_index != index_ptr + n_entries &&
1859 *local_index == local_col,
1860 ExcMessage(
1861 "Adding the entries from the other matrix "
1862 "failed, because the sparsity pattern "
1863 "of that matrix includes more elements than the "
1864 "calling matrix, which is not allowed."));
1865 value_ptr[local_index - index_ptr] += factor * rhs_value_ptr[i];
1866 }
1867 }
1868 }
1869 }
1870
1871
1872
1873 void
1875 {
1876 // This only flips a flag that tells
1877 // Trilinos that any vmult operation
1878 // should be done with the
1879 // transpose. However, the matrix
1880 // structure is not reset.
1881 int ierr;
1882
1883 if (!matrix->UseTranspose())
1884 {
1885 ierr = matrix->SetUseTranspose(true);
1886 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1887 }
1888 else
1889 {
1890 ierr = matrix->SetUseTranspose(false);
1891 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1892 }
1893 }
1894
1895
1896
1897 SparseMatrix &
1899 {
1900 const int ierr = matrix->Scale(a);
1901 Assert(ierr == 0, ExcTrilinosError(ierr));
1902
1903 return *this;
1904 }
1905
1906
1907
1908 SparseMatrix &
1910 {
1911 Assert(a != 0, ExcDivideByZero());
1912
1913 const TrilinosScalar factor = 1. / a;
1914
1915 const int ierr = matrix->Scale(factor);
1916 Assert(ierr == 0, ExcTrilinosError(ierr));
1917
1918 return *this;
1919 }
1920
1921
1922
1925 {
1926 Assert(matrix->Filled(), ExcMatrixNotCompressed());
1927 return matrix->NormOne();
1928 }
1929
1930
1931
1934 {
1935 Assert(matrix->Filled(), ExcMatrixNotCompressed());
1936 return matrix->NormInf();
1937 }
1938
1939
1940
1943 {
1944 Assert(matrix->Filled(), ExcMatrixNotCompressed());
1945 return matrix->NormFrobenius();
1946 }
1947
1948
1949
1950 namespace internal
1951 {
1952 namespace SparseMatrixImplementation
1953 {
1954 template <typename VectorType>
1955 inline void
1956 check_vector_map_equality(const Epetra_CrsMatrix &,
1957 const VectorType &,
1958 const VectorType &)
1959 {}
1960
1961 inline void
1962 check_vector_map_equality(const Epetra_CrsMatrix &m,
1965 {
1966 Assert(in.trilinos_partitioner().SameAs(m.DomainMap()) == true,
1967 ExcMessage("The column partitioning of a matrix does not match "
1968 "the partitioning of a vector you are trying to "
1969 "multiply it with. Are you multiplying the "
1970 "matrix with a vector that has ghost elements?"));
1971 Assert(out.trilinos_partitioner().SameAs(m.RangeMap()) == true,
1972 ExcMessage("The row partitioning of a matrix does not match "
1973 "the partitioning of a vector you are trying to "
1974 "put the result of a matrix-vector product in. "
1975 "Are you trying to put the product of the "
1976 "matrix with a vector into a vector that has "
1977 "ghost elements?"));
1978 }
1979 } // namespace SparseMatrixImplementation
1980 } // namespace internal
1981
1982
1983 template <typename VectorType>
1984 std::enable_if_t<
1985 std::is_same_v<typename VectorType::value_type, TrilinosScalar>>
1986 SparseMatrix::vmult(VectorType &dst, const VectorType &src) const
1987 {
1988 Assert(&src != &dst, ExcSourceEqualsDestination());
1989 Assert(matrix->Filled(), ExcMatrixNotCompressed());
1990
1992 src,
1993 dst);
1994 const size_type dst_local_size = internal::end(dst) - internal::begin(dst);
1995 AssertDimension(dst_local_size, matrix->RangeMap().NumMyPoints());
1996 const size_type src_local_size = internal::end(src) - internal::begin(src);
1997 AssertDimension(src_local_size, matrix->DomainMap().NumMyPoints());
1998
1999 Epetra_MultiVector tril_dst(
2000 View, matrix->RangeMap(), internal::begin(dst), dst_local_size, 1);
2001 Epetra_MultiVector tril_src(View,
2002 matrix->DomainMap(),
2003 const_cast<TrilinosScalar *>(
2004 internal::begin(src)),
2005 src_local_size,
2006 1);
2007
2008 const int ierr = matrix->Multiply(false, tril_src, tril_dst);
2009 Assert(ierr == 0, ExcTrilinosError(ierr));
2010 }
2011
2012
2013
2014 template <typename VectorType>
2015 std::enable_if_t<
2016 !std::is_same_v<typename VectorType::value_type, TrilinosScalar>>
2017 SparseMatrix::vmult(VectorType & /*dst*/, const VectorType & /*src*/) const
2018 {
2020 }
2021
2022
2023
2024 template <typename VectorType>
2025 std::enable_if_t<
2026 std::is_same_v<typename VectorType::value_type, TrilinosScalar>>
2027 SparseMatrix::Tvmult(VectorType &dst, const VectorType &src) const
2028 {
2029 Assert(&src != &dst, ExcSourceEqualsDestination());
2030 Assert(matrix->Filled(), ExcMatrixNotCompressed());
2031
2033 dst,
2034 src);
2035 const size_type dst_local_size = internal::end(dst) - internal::begin(dst);
2036 AssertDimension(dst_local_size, matrix->DomainMap().NumMyPoints());
2037 const size_type src_local_size = internal::end(src) - internal::begin(src);
2038 AssertDimension(src_local_size, matrix->RangeMap().NumMyPoints());
2039
2040 Epetra_MultiVector tril_dst(
2041 View, matrix->DomainMap(), internal::begin(dst), dst_local_size, 1);
2042 Epetra_MultiVector tril_src(View,
2043 matrix->RangeMap(),
2044 const_cast<double *>(internal::begin(src)),
2045 src_local_size,
2046 1);
2047
2048 const int ierr = matrix->Multiply(true, tril_src, tril_dst);
2049 Assert(ierr == 0, ExcTrilinosError(ierr));
2050 }
2051
2052
2053
2054 template <typename VectorType>
2055 std::enable_if_t<
2056 !std::is_same_v<typename VectorType::value_type, TrilinosScalar>>
2057 SparseMatrix::Tvmult(VectorType & /*dst*/, const VectorType & /*src*/) const
2058 {
2060 }
2061
2062
2063
2064 template <typename VectorType>
2065 void
2066 SparseMatrix::vmult_add(VectorType &dst, const VectorType &src) const
2067 {
2068 Assert(&src != &dst, ExcSourceEqualsDestination());
2069
2070 // Reinit a temporary vector with fast argument set, which does not
2071 // overwrite the content (to save time).
2072 VectorType tmp_vector;
2073 tmp_vector.reinit(dst, true);
2074 vmult(tmp_vector, src);
2075 dst += tmp_vector;
2076 }
2077
2078
2079
2080 template <typename VectorType>
2081 void
2082 SparseMatrix::Tvmult_add(VectorType &dst, const VectorType &src) const
2083 {
2084 Assert(&src != &dst, ExcSourceEqualsDestination());
2085
2086 // Reinit a temporary vector with fast argument set, which does not
2087 // overwrite the content (to save time).
2088 VectorType tmp_vector;
2089 tmp_vector.reinit(dst, true);
2090 Tvmult(tmp_vector, src);
2091 dst += tmp_vector;
2092 }
2093
2094
2095
2098 {
2099 AssertDimension(m(), v.size());
2100 Assert(matrix->RowMap().SameAs(matrix->DomainMap()), ExcNotQuadratic());
2101
2102 MPI::Vector temp_vector;
2103 temp_vector.reinit(v, true);
2104
2105 vmult(temp_vector, v);
2106 return temp_vector * v;
2107 }
2108
2109
2110
2113 const MPI::Vector &v) const
2114 {
2115 AssertDimension(m(), u.size());
2116 AssertDimension(m(), v.size());
2117 Assert(matrix->RowMap().SameAs(matrix->DomainMap()), ExcNotQuadratic());
2118
2119 MPI::Vector temp_vector;
2120 temp_vector.reinit(v, true);
2121
2122 vmult(temp_vector, v);
2123 return u * temp_vector;
2124 }
2125
2126
2127
2128 namespace internals
2129 {
2130 void
2131 perform_mmult(const SparseMatrix &inputleft,
2132 const SparseMatrix &inputright,
2133 SparseMatrix &result,
2134 const MPI::Vector &V,
2135 const bool transpose_left)
2136 {
2137 const bool use_vector = (V.size() == inputright.m() ? true : false);
2138 if (transpose_left == false)
2139 {
2140 Assert(inputleft.n() == inputright.m(),
2141 ExcDimensionMismatch(inputleft.n(), inputright.m()));
2142 Assert(inputleft.trilinos_matrix().DomainMap().SameAs(
2143 inputright.trilinos_matrix().RangeMap()),
2144 ExcMessage("Parallel partitioning of A and B does not fit."));
2145 }
2146 else
2147 {
2148 Assert(inputleft.m() == inputright.m(),
2149 ExcDimensionMismatch(inputleft.m(), inputright.m()));
2150 Assert(inputleft.trilinos_matrix().RangeMap().SameAs(
2151 inputright.trilinos_matrix().RangeMap()),
2152 ExcMessage("Parallel partitioning of A and B does not fit."));
2153 }
2154
2155 result.clear();
2156
2157 // create a suitable operator B: in case
2158 // we do not use a vector, all we need to
2159 // do is to set the pointer. Otherwise,
2160 // we insert the data from B, but
2161 // multiply each row with the respective
2162 // vector element.
2163 Teuchos::RCP<Epetra_CrsMatrix> mod_B;
2164 if (use_vector == false)
2165 {
2166 mod_B = Teuchos::rcp(const_cast<Epetra_CrsMatrix *>(
2167 &inputright.trilinos_matrix()),
2168 false);
2169 }
2170 else
2171 {
2172 mod_B = Teuchos::rcp(
2173 new Epetra_CrsMatrix(Copy, inputright.trilinos_sparsity_pattern()),
2174 true);
2175 mod_B->FillComplete(inputright.trilinos_matrix().DomainMap(),
2176 inputright.trilinos_matrix().RangeMap());
2177 Assert(inputright.local_range() == V.local_range(),
2178 ExcMessage("Parallel distribution of matrix B and vector V "
2179 "does not match."));
2180
2181 const int local_N = inputright.local_size();
2182 for (int i = 0; i < local_N; ++i)
2183 {
2184 int N_entries = -1;
2185 double *new_data, *B_data;
2186 mod_B->ExtractMyRowView(i, N_entries, new_data);
2187 inputright.trilinos_matrix().ExtractMyRowView(i,
2188 N_entries,
2189 B_data);
2190 double value = V.trilinos_vector()[0][i];
2191 for (TrilinosWrappers::types::int_type j = 0; j < N_entries; ++j)
2192 new_data[j] = value * B_data[j];
2193 }
2194 }
2195
2196
2197 SparseMatrix tmp_result(transpose_left ?
2198 inputleft.locally_owned_domain_indices() :
2199 inputleft.locally_owned_range_indices(),
2200 inputright.locally_owned_domain_indices(),
2201 inputleft.get_mpi_communicator());
2202
2203# ifdef DEAL_II_TRILINOS_WITH_EPETRAEXT
2204 EpetraExt::MatrixMatrix::Multiply(inputleft.trilinos_matrix(),
2205 transpose_left,
2206 *mod_B,
2207 false,
2208 const_cast<Epetra_CrsMatrix &>(
2209 tmp_result.trilinos_matrix()));
2210# else
2211 Assert(false,
2212 ExcMessage("This function requires that the Trilinos "
2213 "installation found while running the deal.II "
2214 "CMake scripts contains the optional Trilinos "
2215 "package 'EpetraExt'. However, this optional "
2216 "part of Trilinos was not found."));
2217# endif
2218 result.reinit(tmp_result.trilinos_matrix());
2219 }
2220 } // namespace internals
2221
2222
2223 void
2225 const SparseMatrix &B,
2226 const MPI::Vector &V) const
2227 {
2228 internals::perform_mmult(*this, B, C, V, false);
2229 }
2230
2231
2232
2233 void
2235 const SparseMatrix &B,
2236 const MPI::Vector &V) const
2237 {
2238 internals::perform_mmult(*this, B, C, V, true);
2239 }
2240
2241
2242
2243 void
2248
2249
2250
2251 // As of now, no particularly neat
2252 // output is generated in case of
2253 // multiple processors.
2254 void
2255 SparseMatrix::print(std::ostream &out,
2256 const bool print_detailed_trilinos_information) const
2257 {
2258 if (print_detailed_trilinos_information == true)
2259 out << *matrix;
2260 else
2261 {
2262 double *values;
2263 int *indices;
2264 int num_entries;
2265
2266 for (int i = 0; i < matrix->NumMyRows(); ++i)
2267 {
2268 const int ierr =
2269 matrix->ExtractMyRowView(i, num_entries, values, indices);
2270 Assert(ierr == 0, ExcTrilinosError(ierr));
2271
2272 for (TrilinosWrappers::types::int_type j = 0; j < num_entries; ++j)
2274 << ","
2276 << ") " << values[j] << std::endl;
2277 }
2278 }
2279
2280 AssertThrow(out.fail() == false, ExcIO());
2281 }
2282
2283
2284
2287 {
2288 size_type static_memory =
2289 sizeof(*this) + sizeof(*matrix) + sizeof(*matrix->Graph().DataPtr());
2290 return (
2292 matrix->NumMyNonzeros() +
2293 sizeof(int) * local_size() + static_memory);
2294 }
2295
2296
2297
2298 MPI_Comm
2300 {
2301 const Epetra_MpiComm *mpi_comm =
2302 dynamic_cast<const Epetra_MpiComm *>(&matrix->RangeMap().Comm());
2303 Assert(mpi_comm != nullptr, ExcInternalError());
2304 return mpi_comm->Comm();
2305 }
2306} // namespace TrilinosWrappers
2307
2308
2309namespace TrilinosWrappers
2310{
2311 namespace internal
2312 {
2313 namespace LinearOperatorImplementation
2314 {
2316
2318 : use_transpose(false)
2319 , communicator(MPI_COMM_SELF)
2320 , domain_map(IndexSet().make_trilinos_map(communicator.Comm()))
2321 , range_map(IndexSet().make_trilinos_map(communicator.Comm()))
2322 {
2323 vmult = [](Range &, const Domain &) {
2324 Assert(false,
2325 ExcMessage("Uninitialized TrilinosPayload::vmult called "
2326 "(Default constructor)"));
2327 };
2328
2329 Tvmult = [](Domain &, const Range &) {
2330 Assert(false,
2331 ExcMessage("Uninitialized TrilinosPayload::Tvmult called "
2332 "(Default constructor)"));
2333 };
2334
2335 inv_vmult = [](Domain &, const Range &) {
2336 Assert(false,
2337 ExcMessage("Uninitialized TrilinosPayload::inv_vmult called "
2338 "(Default constructor)"));
2339 };
2340
2341 inv_Tvmult = [](Range &, const Domain &) {
2342 Assert(false,
2343 ExcMessage("Uninitialized TrilinosPayload::inv_Tvmult called "
2344 "(Default constructor)"));
2345 };
2346 }
2347
2348
2349
2351 const TrilinosWrappers::SparseMatrix &matrix_exemplar,
2352 const TrilinosWrappers::SparseMatrix &matrix)
2353 : TrilinosPayload(const_cast<Epetra_CrsMatrix &>(
2354 matrix.trilinos_matrix()),
2355 /*op_supports_inverse_operations = */ false,
2356 matrix_exemplar.trilinos_matrix().UseTranspose(),
2357 matrix_exemplar.get_mpi_communicator(),
2358 matrix_exemplar.locally_owned_domain_indices(),
2359 matrix_exemplar.locally_owned_range_indices())
2360 {}
2361
2362
2363
2365 const TrilinosPayload &payload_exemplar,
2366 const TrilinosWrappers::SparseMatrix &matrix)
2367
2368 : TrilinosPayload(const_cast<Epetra_CrsMatrix &>(
2369 matrix.trilinos_matrix()),
2370 /*op_supports_inverse_operations = */ false,
2371 payload_exemplar.UseTranspose(),
2372 payload_exemplar.get_mpi_communicator(),
2373 payload_exemplar.locally_owned_domain_indices(),
2374 payload_exemplar.locally_owned_range_indices())
2375 {}
2376
2377
2378
2380 const TrilinosWrappers::SparseMatrix &matrix_exemplar,
2381 const TrilinosWrappers::PreconditionBase &preconditioner)
2382 : TrilinosPayload(preconditioner.trilinos_operator(),
2383 /*op_supports_inverse_operations = */ true,
2384 matrix_exemplar.trilinos_matrix().UseTranspose(),
2385 matrix_exemplar.get_mpi_communicator(),
2386 matrix_exemplar.locally_owned_domain_indices(),
2387 matrix_exemplar.locally_owned_range_indices())
2388 {}
2389
2390
2391
2393 const TrilinosWrappers::PreconditionBase &preconditioner_exemplar,
2394 const TrilinosWrappers::PreconditionBase &preconditioner)
2396 preconditioner.trilinos_operator(),
2397 /*op_supports_inverse_operations = */ true,
2398 preconditioner_exemplar.trilinos_operator().UseTranspose(),
2399 preconditioner_exemplar.get_mpi_communicator(),
2400 preconditioner_exemplar.locally_owned_domain_indices(),
2401 preconditioner_exemplar.locally_owned_range_indices())
2402 {}
2403
2404
2405
2407 const TrilinosPayload &payload_exemplar,
2408 const TrilinosWrappers::PreconditionBase &preconditioner)
2409 : TrilinosPayload(preconditioner.trilinos_operator(),
2410 /*op_supports_inverse_operations = */ true,
2411 payload_exemplar.UseTranspose(),
2412 payload_exemplar.get_mpi_communicator(),
2413 payload_exemplar.locally_owned_domain_indices(),
2414 payload_exemplar.locally_owned_range_indices())
2415 {}
2416
2417
2418
2420 : vmult(payload.vmult)
2421 , Tvmult(payload.Tvmult)
2422 , inv_vmult(payload.inv_vmult)
2423 , inv_Tvmult(payload.inv_Tvmult)
2424 , use_transpose(payload.use_transpose)
2425 , communicator(payload.communicator)
2426 , domain_map(payload.domain_map)
2427 , range_map(payload.range_map)
2428 {}
2429
2430
2431
2432 // Composite copy constructor
2433 // This is required for PackagedOperations
2435 const TrilinosPayload &second_op)
2436 : use_transpose(false)
2437 , // The combination of operators provides the exact
2438 // definition of the operation
2439 communicator(first_op.communicator)
2440 , domain_map(second_op.domain_map)
2441 , range_map(first_op.range_map)
2442 {}
2443
2444
2445
2448 {
2449 TrilinosPayload return_op(*this);
2450
2451 return_op.vmult = [](Range &tril_dst, const Range &tril_src) {
2452 tril_dst = tril_src;
2453 };
2454
2455 return_op.Tvmult = [](Range &tril_dst, const Range &tril_src) {
2456 tril_dst = tril_src;
2457 };
2458
2459 return_op.inv_vmult = [](Range &tril_dst, const Range &tril_src) {
2460 tril_dst = tril_src;
2461 };
2462
2463 return_op.inv_Tvmult = [](Range &tril_dst, const Range &tril_src) {
2464 tril_dst = tril_src;
2465 };
2466
2467 return return_op;
2468 }
2469
2470
2471
2474 {
2475 TrilinosPayload return_op(*this);
2476
2477 return_op.vmult = [](Range &tril_dst, const Domain &) {
2478 const int ierr = tril_dst.PutScalar(0.0);
2479
2480 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2481 };
2482
2483 return_op.Tvmult = [](Domain &tril_dst, const Range &) {
2484 const int ierr = tril_dst.PutScalar(0.0);
2485
2486 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2487 };
2488
2489 return_op.inv_vmult = [](Domain &tril_dst, const Range &) {
2490 AssertThrow(false,
2491 ExcMessage("Cannot compute inverse of null operator"));
2492
2493 const int ierr = tril_dst.PutScalar(0.0);
2494
2495 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2496 };
2497
2498 return_op.inv_Tvmult = [](Range &tril_dst, const Domain &) {
2499 AssertThrow(false,
2500 ExcMessage("Cannot compute inverse of null operator"));
2501
2502 const int ierr = tril_dst.PutScalar(0.0);
2503
2504 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2505 };
2506
2507 return return_op;
2508 }
2509
2510
2511
2514 {
2515 TrilinosPayload return_op(*this);
2516 return_op.transpose();
2517 return return_op;
2518 }
2519
2520
2521
2522 IndexSet
2527
2528
2529
2530 IndexSet
2535
2536
2537
2538 MPI_Comm
2540 {
2541 return communicator.Comm();
2542 }
2543
2544
2545
2546 void
2551
2552
2553
2554 bool
2556 {
2557 return use_transpose;
2558 }
2559
2560
2561
2562 int
2564 {
2566 {
2568 std::swap(domain_map, range_map);
2569 std::swap(vmult, Tvmult);
2570 std::swap(inv_vmult, inv_Tvmult);
2571 }
2572 return 0;
2573 }
2574
2575
2576
2577 int
2579 {
2580 // The transposedness of the operations is taken care of
2581 // when we hit the transpose flag.
2582 vmult(Y, X);
2583 return 0;
2584 }
2585
2586
2587
2588 int
2590 {
2591 // The transposedness of the operations is taken care of
2592 // when we hit the transpose flag.
2593 inv_vmult(X, Y);
2594 return 0;
2595 }
2596
2597
2598
2599 const char *
2601 {
2602 return "TrilinosPayload";
2603 }
2604
2605
2606
2607 const Epetra_Comm &
2609 {
2610 return communicator;
2611 }
2612
2613
2614
2615 const Epetra_Map &
2617 {
2618 return domain_map;
2619 }
2620
2621
2622
2623 const Epetra_Map &
2625 {
2626 return range_map;
2627 }
2628
2629
2630
2631 bool
2633 {
2634 return false;
2635 }
2636
2637
2638
2639 double
2641 {
2643 return 0.0;
2644 }
2645
2646
2647
2650 const TrilinosPayload &second_op)
2651 {
2652 using Domain = typename TrilinosPayload::Domain;
2653 using Range = typename TrilinosPayload::Range;
2654 using Intermediate = typename TrilinosPayload::VectorType;
2655 using GVMVectorType = TrilinosWrappers::MPI::Vector;
2656
2658 second_op.locally_owned_domain_indices(),
2659 ExcMessage(
2660 "Operators are set to work on incompatible IndexSets."));
2662 second_op.locally_owned_range_indices(),
2663 ExcMessage(
2664 "Operators are set to work on incompatible IndexSets."));
2665
2666 TrilinosPayload return_op(first_op, second_op);
2667
2668 // Capture by copy so the payloads are always valid
2669 return_op.vmult = [first_op, second_op](Range &tril_dst,
2670 const Domain &tril_src) {
2671 // Duplicated from LinearOperator::operator*
2672 // TODO: Template the constructor on GrowingVectorMemory vector type?
2674 VectorMemory<GVMVectorType>::Pointer i(vector_memory);
2675
2676 // Initialize intermediate vector
2677 const Epetra_Map &first_op_init_map = first_op.OperatorDomainMap();
2678 i->reinit(IndexSet(first_op_init_map),
2679 first_op.get_mpi_communicator(),
2680 /*bool omit_zeroing_entries =*/true);
2681
2682 // Duplicated from TrilinosWrappers::SparseMatrix::vmult
2683 const size_type i_local_size = i->end() - i->begin();
2684 AssertDimension(i_local_size, first_op_init_map.NumMyPoints());
2685 const Epetra_Map &second_op_init_map = second_op.OperatorDomainMap();
2686 AssertDimension(i_local_size, second_op_init_map.NumMyPoints());
2687 Intermediate tril_int(View,
2688 first_op_init_map,
2689 const_cast<TrilinosScalar *>(i->begin()),
2690 i_local_size,
2691 1);
2692
2693 // These operators may themselves be transposed or not, so we let them
2694 // decide what the intended outcome is
2695 second_op.Apply(tril_src, tril_int);
2696 first_op.Apply(tril_src, tril_dst);
2697 const int ierr = tril_dst.Update(1.0, tril_int, 1.0);
2698 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2699 };
2700
2701 return_op.Tvmult = [first_op, second_op](Domain &tril_dst,
2702 const Range &tril_src) {
2703 // Duplicated from LinearOperator::operator*
2704 // TODO: Template the constructor on GrowingVectorMemory vector type?
2706 VectorMemory<GVMVectorType>::Pointer i(vector_memory);
2707
2708 // These operators may themselves be transposed or not, so we let them
2709 // decide what the intended outcome is
2710 // We must first transpose the operators to get the right IndexSets
2711 // for the input, intermediate and result vectors
2712 const_cast<TrilinosPayload &>(first_op).transpose();
2713 const_cast<TrilinosPayload &>(second_op).transpose();
2714
2715 // Initialize intermediate vector
2716 const Epetra_Map &first_op_init_map = first_op.OperatorRangeMap();
2717 i->reinit(IndexSet(first_op_init_map),
2718 first_op.get_mpi_communicator(),
2719 /*bool omit_zeroing_entries =*/true);
2720
2721 // Duplicated from TrilinosWrappers::SparseMatrix::vmult
2722 const size_type i_local_size = i->end() - i->begin();
2723 AssertDimension(i_local_size, first_op_init_map.NumMyPoints());
2724 const Epetra_Map &second_op_init_map = second_op.OperatorRangeMap();
2725 AssertDimension(i_local_size, second_op_init_map.NumMyPoints());
2726 Intermediate tril_int(View,
2727 first_op_init_map,
2728 const_cast<TrilinosScalar *>(i->begin()),
2729 i_local_size,
2730 1);
2731
2732 // These operators may themselves be transposed or not, so we let them
2733 // decide what the intended outcome is
2734 second_op.Apply(tril_src, tril_int);
2735 first_op.Apply(tril_src, tril_dst);
2736 const int ierr = tril_dst.Update(1.0, tril_int, 1.0);
2737 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2738
2739 // Reset transpose flag
2740 const_cast<TrilinosPayload &>(first_op).transpose();
2741 const_cast<TrilinosPayload &>(second_op).transpose();
2742 };
2743
2744 return_op.inv_vmult = [first_op, second_op](Domain &tril_dst,
2745 const Range &tril_src) {
2746 // Duplicated from LinearOperator::operator*
2747 // TODO: Template the constructor on GrowingVectorMemory vector type?
2749 VectorMemory<GVMVectorType>::Pointer i(vector_memory);
2750
2751 // Initialize intermediate vector
2752 const Epetra_Map &first_op_init_map = first_op.OperatorRangeMap();
2753 i->reinit(IndexSet(first_op_init_map),
2754 first_op.get_mpi_communicator(),
2755 /*bool omit_zeroing_entries =*/true);
2756
2757 // Duplicated from TrilinosWrappers::SparseMatrix::vmult
2758 const size_type i_local_size = i->end() - i->begin();
2759 AssertDimension(i_local_size, first_op_init_map.NumMyPoints());
2760 const Epetra_Map &second_op_init_map = second_op.OperatorRangeMap();
2761 AssertDimension(i_local_size, second_op_init_map.NumMyPoints());
2762 Intermediate tril_int(View,
2763 first_op_init_map,
2764 const_cast<TrilinosScalar *>(i->begin()),
2765 i_local_size,
2766 1);
2767
2768 // These operators may themselves be transposed or not, so we let them
2769 // decide what the intended outcome is
2770 second_op.ApplyInverse(tril_src, tril_int);
2771 first_op.ApplyInverse(tril_src, tril_dst);
2772 const int ierr = tril_dst.Update(1.0, tril_int, 1.0);
2773 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2774 };
2775
2776 return_op.inv_Tvmult = [first_op, second_op](Range &tril_dst,
2777 const Domain &tril_src) {
2778 // Duplicated from LinearOperator::operator*
2779 // TODO: Template the constructor on GrowingVectorMemory vector type?
2781 VectorMemory<GVMVectorType>::Pointer i(vector_memory);
2782
2783 // These operators may themselves be transposed or not, so we let them
2784 // decide what the intended outcome is
2785 // We must first transpose the operators to get the right IndexSets
2786 // for the input, intermediate and result vectors
2787 const_cast<TrilinosPayload &>(first_op).transpose();
2788 const_cast<TrilinosPayload &>(second_op).transpose();
2789
2790 // Initialize intermediate vector
2791 const Epetra_Map &first_op_init_map = first_op.OperatorDomainMap();
2792 i->reinit(IndexSet(first_op_init_map),
2793 first_op.get_mpi_communicator(),
2794 /*bool omit_zeroing_entries =*/true);
2795
2796 // Duplicated from TrilinosWrappers::SparseMatrix::vmult
2797 const size_type i_local_size = i->end() - i->begin();
2798 AssertDimension(i_local_size, first_op_init_map.NumMyPoints());
2799 const Epetra_Map &second_op_init_map = second_op.OperatorDomainMap();
2800 AssertDimension(i_local_size, second_op_init_map.NumMyPoints());
2801 Intermediate tril_int(View,
2802 first_op_init_map,
2803 const_cast<TrilinosScalar *>(i->begin()),
2804 i_local_size,
2805 1);
2806
2807 // These operators may themselves be transposed or not, so we let them
2808 // decide what the intended outcome is
2809 second_op.ApplyInverse(tril_src, tril_int);
2810 first_op.ApplyInverse(tril_src, tril_dst);
2811 const int ierr = tril_dst.Update(1.0, tril_int, 1.0);
2812 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2813
2814 // Reset transpose flag
2815 const_cast<TrilinosPayload &>(first_op).transpose();
2816 const_cast<TrilinosPayload &>(second_op).transpose();
2817 };
2818
2819 return return_op;
2820 }
2821
2822
2823
2824 TrilinosPayload
2826 const TrilinosPayload &second_op)
2827 {
2828 using Domain = typename TrilinosPayload::Domain;
2829 using Range = typename TrilinosPayload::Range;
2830 using Intermediate = typename TrilinosPayload::VectorType;
2831 using GVMVectorType = TrilinosWrappers::MPI::Vector;
2832
2834 second_op.locally_owned_range_indices(),
2835 ExcMessage(
2836 "Operators are set to work on incompatible IndexSets."));
2837
2838 TrilinosPayload return_op(first_op, second_op);
2839
2840 // Capture by copy so the payloads are always valid
2841 return_op.vmult = [first_op, second_op](Range &tril_dst,
2842 const Domain &tril_src) {
2843 // Duplicated from LinearOperator::operator*
2844 // TODO: Template the constructor on GrowingVectorMemory vector type?
2846 VectorMemory<GVMVectorType>::Pointer i(vector_memory);
2847
2848 // Initialize intermediate vector
2849 const Epetra_Map &first_op_init_map = first_op.OperatorDomainMap();
2850 i->reinit(IndexSet(first_op_init_map),
2851 first_op.get_mpi_communicator(),
2852 /*bool omit_zeroing_entries =*/true);
2853
2854 // Duplicated from TrilinosWrappers::SparseMatrix::vmult
2855 const size_type i_local_size = i->end() - i->begin();
2856 AssertDimension(i_local_size, first_op_init_map.NumMyPoints());
2857 const Epetra_Map &second_op_init_map = second_op.OperatorRangeMap();
2858 AssertDimension(i_local_size, second_op_init_map.NumMyPoints());
2859 Intermediate tril_int(View,
2860 first_op_init_map,
2861 const_cast<TrilinosScalar *>(i->begin()),
2862 i_local_size,
2863 1);
2864
2865 // These operators may themselves be transposed or not, so we let them
2866 // decide what the intended outcome is
2867 second_op.Apply(tril_src, tril_int);
2868 first_op.Apply(tril_int, tril_dst);
2869 };
2870
2871 return_op.Tvmult = [first_op, second_op](Domain &tril_dst,
2872 const Range &tril_src) {
2873 // Duplicated from LinearOperator::operator*
2874 // TODO: Template the constructor on GrowingVectorMemory vector type?
2876 VectorMemory<GVMVectorType>::Pointer i(vector_memory);
2877
2878 // These operators may themselves be transposed or not, so we let them
2879 // decide what the intended outcome is
2880 // We must first transpose the operators to get the right IndexSets
2881 // for the input, intermediate and result vectors
2882 const_cast<TrilinosPayload &>(first_op).transpose();
2883 const_cast<TrilinosPayload &>(second_op).transpose();
2884
2885 // Initialize intermediate vector
2886 const Epetra_Map &first_op_init_map = first_op.OperatorRangeMap();
2887 i->reinit(IndexSet(first_op_init_map),
2888 first_op.get_mpi_communicator(),
2889 /*bool omit_zeroing_entries =*/true);
2890
2891 // Duplicated from TrilinosWrappers::SparseMatrix::vmult
2892 const size_type i_local_size = i->end() - i->begin();
2893 AssertDimension(i_local_size, first_op_init_map.NumMyPoints());
2894 const Epetra_Map &second_op_init_map = second_op.OperatorDomainMap();
2895 AssertDimension(i_local_size, second_op_init_map.NumMyPoints());
2896 Intermediate tril_int(View,
2897 first_op_init_map,
2898 const_cast<TrilinosScalar *>(i->begin()),
2899 i_local_size,
2900 1);
2901
2902 // Apply the operators in the reverse order to vmult
2903 first_op.Apply(tril_src, tril_int);
2904 second_op.Apply(tril_int, tril_dst);
2905
2906 // Reset transpose flag
2907 const_cast<TrilinosPayload &>(first_op).transpose();
2908 const_cast<TrilinosPayload &>(second_op).transpose();
2909 };
2910
2911 return_op.inv_vmult = [first_op, second_op](Domain &tril_dst,
2912 const Range &tril_src) {
2913 // Duplicated from LinearOperator::operator*
2914 // TODO: Template the constructor on GrowingVectorMemory vector type?
2916 VectorMemory<GVMVectorType>::Pointer i(vector_memory);
2917
2918 // Initialize intermediate vector
2919 const Epetra_Map &first_op_init_map = first_op.OperatorRangeMap();
2920 i->reinit(IndexSet(first_op_init_map),
2921 first_op.get_mpi_communicator(),
2922 /*bool omit_zeroing_entries =*/true);
2923
2924 // Duplicated from TrilinosWrappers::SparseMatrix::vmult
2925 const size_type i_local_size = i->end() - i->begin();
2926 AssertDimension(i_local_size, first_op_init_map.NumMyPoints());
2927 const Epetra_Map &second_op_init_map = second_op.OperatorDomainMap();
2928 AssertDimension(i_local_size, second_op_init_map.NumMyPoints());
2929 Intermediate tril_int(View,
2930 first_op_init_map,
2931 const_cast<TrilinosScalar *>(i->begin()),
2932 i_local_size,
2933 1);
2934
2935 // Apply the operators in the reverse order to vmult
2936 // and the same order as Tvmult
2937 first_op.ApplyInverse(tril_src, tril_int);
2938 second_op.ApplyInverse(tril_int, tril_dst);
2939 };
2940
2941 return_op.inv_Tvmult = [first_op, second_op](Range &tril_dst,
2942 const Domain &tril_src) {
2943 // Duplicated from LinearOperator::operator*
2944 // TODO: Template the constructor on GrowingVectorMemory vector type?
2946 VectorMemory<GVMVectorType>::Pointer i(vector_memory);
2947
2948 // These operators may themselves be transposed or not, so we let them
2949 // decide what the intended outcome is
2950 // We must first transpose the operators to get the right IndexSets
2951 // for the input, intermediate and result vectors
2952 const_cast<TrilinosPayload &>(first_op).transpose();
2953 const_cast<TrilinosPayload &>(second_op).transpose();
2954
2955 // Initialize intermediate vector
2956 const Epetra_Map &first_op_init_map = first_op.OperatorDomainMap();
2957 i->reinit(IndexSet(first_op_init_map),
2958 first_op.get_mpi_communicator(),
2959 /*bool omit_zeroing_entries =*/true);
2960
2961 // Duplicated from TrilinosWrappers::SparseMatrix::vmult
2962 const size_type i_local_size = i->end() - i->begin();
2963 AssertDimension(i_local_size, first_op_init_map.NumMyPoints());
2964 const Epetra_Map &second_op_init_map = second_op.OperatorRangeMap();
2965 AssertDimension(i_local_size, second_op_init_map.NumMyPoints());
2966 Intermediate tril_int(View,
2967 first_op_init_map,
2968 const_cast<TrilinosScalar *>(i->begin()),
2969 i_local_size,
2970 1);
2971
2972 // These operators may themselves be transposed or not, so we let them
2973 // decide what the intended outcome is
2974 // Apply the operators in the reverse order to Tvmult
2975 // and the same order as vmult
2976 second_op.ApplyInverse(tril_src, tril_int);
2977 first_op.ApplyInverse(tril_int, tril_dst);
2978
2979 // Reset transpose flag
2980 const_cast<TrilinosPayload &>(first_op).transpose();
2981 const_cast<TrilinosPayload &>(second_op).transpose();
2982 };
2983
2984 return return_op;
2985 }
2986
2987 } // namespace LinearOperatorImplementation
2988 } /* namespace internal */
2989} /* namespace TrilinosWrappers */
2990
2991
2992
2993// explicit instantiations
2994# include "lac/trilinos_sparse_matrix.inst"
2995
2996# ifndef DOXYGEN
2997// TODO: put these instantiations into generic file
2998namespace TrilinosWrappers
2999{
3000 template void
3001 SparseMatrix::reinit(const ::SparsityPattern &);
3002
3003 template void
3005
3006 template void
3008 const IndexSet &,
3009 const ::SparsityPattern &,
3010 const MPI_Comm,
3011 const bool);
3012
3013 template void
3015 const IndexSet &,
3016 const DynamicSparsityPattern &,
3017 const MPI_Comm,
3018 const bool);
3019
3020 template void
3021 SparseMatrix::vmult(MPI::Vector &, const MPI::Vector &) const;
3022
3023 template void
3025 const ::Vector<double> &) const;
3026
3027 template void
3030 const ::LinearAlgebra::distributed::Vector<double, MemorySpace::Host>
3031 &) const;
3032
3033 template void
3036 const ::LinearAlgebra::distributed::Vector<float, MemorySpace::Host>
3037 &) const;
3038
3039# ifdef DEAL_II_TRILINOS_WITH_TPETRA
3040# if defined(HAVE_TPETRA_INST_DOUBLE)
3041 template void
3044 const ::LinearAlgebra::TpetraWrappers::Vector<double,
3046 const;
3047 template void
3050 &,
3051 const ::LinearAlgebra::TpetraWrappers::Vector<double,
3053 const;
3054# endif
3055
3056# if defined(HAVE_TPETRA_INST_FLOAT)
3057 template void
3060 const ::LinearAlgebra::TpetraWrappers::Vector<float,
3062 const;
3063 template void
3066 &,
3067 const ::LinearAlgebra::TpetraWrappers::Vector<float,
3069 const;
3070# endif
3071# endif
3072
3073 template void
3076 const ::LinearAlgebra::EpetraWrappers::Vector &) const;
3077
3078 template void
3079 SparseMatrix::Tvmult(MPI::Vector &, const MPI::Vector &) const;
3080
3081 template void
3083 const ::Vector<double> &) const;
3084
3085 template void
3088 const ::LinearAlgebra::distributed::Vector<double, MemorySpace::Host>
3089 &) const;
3090
3091# ifdef DEAL_II_TRILINOS_WITH_TPETRA
3092# if defined(HAVE_TPETRA_INST_DOUBLE)
3093 template void
3096 const ::LinearAlgebra::TpetraWrappers::Vector<double,
3098 const;
3099 template void
3102 &,
3103 const ::LinearAlgebra::TpetraWrappers::Vector<double,
3105 const;
3106# endif
3107
3108# if defined(HAVE_TPETRA_INST_FLOAT)
3109 template void
3112 const ::LinearAlgebra::TpetraWrappers::Vector<float,
3114 const;
3115 template void
3118 &,
3119 const ::LinearAlgebra::TpetraWrappers::Vector<float,
3121 const;
3122# endif
3123# endif
3124
3125 template void
3128 const ::LinearAlgebra::EpetraWrappers::Vector &) const;
3129
3130 template void
3131 SparseMatrix::vmult_add(MPI::Vector &, const MPI::Vector &) const;
3132
3133 template void
3135 const ::Vector<double> &) const;
3136
3137 template void
3140 const ::LinearAlgebra::distributed::Vector<double, MemorySpace::Host>
3141 &) const;
3142
3143# ifdef DEAL_II_TRILINOS_WITH_TPETRA
3144# if defined(HAVE_TPETRA_INST_DOUBLE)
3145 template void
3148 const ::LinearAlgebra::TpetraWrappers::Vector<double,
3150 const;
3151 template void
3154 &,
3155 const ::LinearAlgebra::TpetraWrappers::Vector<double,
3157 const;
3158# endif
3159
3160# if defined(HAVE_TPETRA_INST_FLOAT)
3161 template void
3164 const ::LinearAlgebra::TpetraWrappers::Vector<float,
3166 const;
3167 template void
3170 &,
3171 const ::LinearAlgebra::TpetraWrappers::Vector<float,
3173 const;
3174# endif
3175# endif
3176
3177 template void
3180 const ::LinearAlgebra::EpetraWrappers::Vector &) const;
3181
3182 template void
3183 SparseMatrix::Tvmult_add(MPI::Vector &, const MPI::Vector &) const;
3184
3185 template void
3187 const ::Vector<double> &) const;
3188
3189 template void
3192 const ::LinearAlgebra::distributed::Vector<double, MemorySpace::Host>
3193 &) const;
3194
3195 template void
3198 const ::LinearAlgebra::distributed::Vector<float, MemorySpace::Host>
3199 &) const;
3200
3201
3202# ifdef DEAL_II_TRILINOS_WITH_TPETRA
3203# if defined(HAVE_TPETRA_INST_DOUBLE)
3204 template void
3207 const ::LinearAlgebra::TpetraWrappers::Vector<double,
3209 const;
3210 template void
3213 &,
3214 const ::LinearAlgebra::TpetraWrappers::Vector<double,
3216 const;
3217# endif
3218
3219# if defined(HAVE_TPETRA_INST_FLOAT)
3220 template void
3223 const ::LinearAlgebra::TpetraWrappers::Vector<float,
3225 const;
3226 template void
3229 &,
3230 const ::LinearAlgebra::TpetraWrappers::Vector<float,
3232 const;
3233# endif
3234# endif
3235
3236 template void
3239 const ::LinearAlgebra::EpetraWrappers::Vector &) const;
3240} // namespace TrilinosWrappers
3241# endif // DOXYGEN
3242
3244
3245#endif // DEAL_II_WITH_TRILINOS
const IndexSet & row_index_set() const
size_type row_length(const size_type row) const
size_type column_number(const size_type row, const size_type index) const
size_type size() const
Definition index_set.h:1791
bool is_element(const size_type index) const
Definition index_set.h:1909
Epetra_Map make_trilinos_map(const MPI_Comm communicator=MPI_COMM_WORLD, const bool overlapping=false) const
size_type n_rows() const
size_type n_cols() const
void reinit(const size_type m, const size_type n, const ArrayView< const unsigned int > &row_lengths)
const Epetra_BlockMap & trilinos_partitioner() const
size_type size() const override
void reinit(const Vector &v, const bool omit_zeroing_entries=false, const bool allow_different_maps=false)
std::shared_ptr< std::vector< TrilinosScalar > > value_cache
std::shared_ptr< std::vector< size_type > > colnum_cache
void set(const size_type i, const size_type j, const TrilinosScalar value)
std::unique_ptr< Epetra_Map > column_space_map
SparseMatrix & operator*=(const TrilinosScalar factor)
void mmult(SparseMatrix &C, const SparseMatrix &B, const MPI::Vector &V=MPI::Vector()) const
std::unique_ptr< Epetra_Export > nonlocal_matrix_exporter
void compress(VectorOperation::values operation)
std::unique_ptr< Epetra_FECrsMatrix > matrix
const Epetra_CrsMatrix & trilinos_matrix() const
TrilinosScalar matrix_norm_square(const MPI::Vector &v) const
IndexSet locally_owned_range_indices() const
void print(std::ostream &out, const bool write_extended_trilinos_info=false) const
void Tmmult(SparseMatrix &C, const SparseMatrix &B, const MPI::Vector &V=MPI::Vector()) const
std::enable_if_t< std::is_same_v< typename VectorType::value_type, TrilinosScalar > > Tvmult(VectorType &dst, const VectorType &src) const
void clear_row(const size_type row, const TrilinosScalar new_diag_value=0)
void clear_rows(const ArrayView< const size_type > &rows, const TrilinosScalar new_diag_value=0)
void reinit(const SparsityPatternType &sparsity_pattern)
void vmult_add(VectorType &dst, const VectorType &src) const
SparseMatrix & operator=(const SparseMatrix &)=delete
SparseMatrix & operator/=(const TrilinosScalar factor)
void Tvmult_add(VectorType &dst, const VectorType &src) const
TrilinosScalar el(const size_type i, const size_type j) const
void reinit(const IndexSet &row_parallel_partitioning, const IndexSet &col_parallel_partitioning, const ::SparseMatrix< number > &dealii_sparse_matrix, const MPI_Comm communicator=MPI_COMM_WORLD, const double drop_tolerance=1e-13, const bool copy_values=true, const ::SparsityPattern *use_this_sparsity=nullptr)
IndexSet locally_owned_domain_indices() const
bool in_local_range(const size_type index) const
void copy_from(const SparseMatrix &source)
unsigned int row_length(const size_type row) const
std::uint64_t n_nonzero_elements() const
std::enable_if_t< std::is_same_v< typename VectorType::value_type, TrilinosScalar > > vmult(VectorType &dst, const VectorType &src) const
const Epetra_CrsGraph & trilinos_sparsity_pattern() const
TrilinosScalar diag_element(const size_type i) const
void add(const size_type i, const size_type j, const TrilinosScalar value)
unsigned int local_size() const
TrilinosScalar operator()(const size_type i, const size_type j) const
TrilinosScalar matrix_scalar_product(const MPI::Vector &u, const MPI::Vector &v) const
std::pair< size_type, size_type > local_range() const
std::unique_ptr< Epetra_CrsMatrix > nonlocal_matrix
std::unique_ptr< Epetra_CrsGraph > nonlocal_graph
const Epetra_FECrsGraph & trilinos_sparsity_pattern() const
::types::global_dof_index size_type
std::function< void(VectorType &, const VectorType &)> inv_Tvmult
virtual int ApplyInverse(const VectorType &Y, VectorType &X) const override
virtual int Apply(const VectorType &X, VectorType &Y) const override
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:35
#define DEAL_II_DISABLE_EXTRA_DIAGNOSTICS
Definition config.h:598
constexpr bool running_in_debug_mode()
Definition config.h:73
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:36
#define DEAL_II_ENABLE_EXTRA_DIAGNOSTICS
Definition config.h:642
DerivativeForm< 1, spacedim, dim, Number > transpose(const DerivativeForm< 1, dim, spacedim, Number > &DF)
#define DEAL_II_NOT_IMPLEMENTED()
Point< 2 > second
Definition grid_out.cc:4633
Point< 2 > first
Definition grid_out.cc:4632
static ::ExceptionBase & ExcIO()
static ::ExceptionBase & ExcNotImplemented()
static ::ExceptionBase & ExcScalarAssignmentOnlyForZeroValue()
static ::ExceptionBase & ExcInvalidIndex(size_type arg1, size_type arg2)
#define Assert(cond, exc)
static ::ExceptionBase & ExcAccessToNonPresentElement(size_type arg1, size_type arg2)
#define AssertIsFinite(number)
static ::ExceptionBase & ExcAccessToNonlocalRow(std::size_t arg1)
#define AssertDimension(dim1, dim2)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcDivideByZero()
static ::ExceptionBase & ExcSourceEqualsDestination()
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
static ::ExceptionBase & ExcNotQuadratic()
static ::ExceptionBase & ExcMatrixNotCompressed()
static ::ExceptionBase & ExcTrilinosError(int arg1)
static ::ExceptionBase & ExcAccessToNonLocalElement(size_type arg1, size_type arg2, size_type arg3, size_type arg4)
static ::ExceptionBase & ExcMessage(std::string arg1)
static ::ExceptionBase & ExcTrilinosError(int arg1)
#define AssertThrow(cond, exc)
IndexSet complete_index_set(const IndexSet::size_type N)
Definition index_set.h:1219
std::vector< index_type > data
Definition mpi.cc:750
bool use_vector
Definition mpi.cc:748
@ matrix
Contents is actually a matrix.
TrilinosPayload operator+(const TrilinosPayload &first_op, const TrilinosPayload &second_op)
TrilinosPayload operator*(const TrilinosPayload &first_op, const TrilinosPayload &second_op)
void check_vector_map_equality(const Epetra_CrsMatrix &, const VectorType &, const VectorType &)
VectorType::value_type * end(VectorType &V)
VectorType::value_type * begin(VectorType &V)
void perform_mmult(const SparseMatrix &inputleft, const SparseMatrix &inputright, SparseMatrix &result, const MPI::Vector &V, const bool transpose_left)
TrilinosWrappers::types::int_type min_my_gid(const Epetra_BlockMap &map)
TrilinosWrappers::types::int64_type n_global_elements(const Epetra_BlockMap &map)
TrilinosWrappers::types::int_type global_column_index(const Epetra_CrsMatrix &matrix, const ::types::global_dof_index i)
TrilinosWrappers::types::int_type max_my_gid(const Epetra_BlockMap &map)
TrilinosWrappers::types::int_type global_row_index(const Epetra_CrsMatrix &matrix, const ::types::global_dof_index i)
TrilinosWrappers::types::int_type n_global_cols(const Epetra_CrsGraph &graph)
const Epetra_Comm & comm_self()
Iterator lower_bound(Iterator first, Iterator last, const T &val)
Definition utilities.h:1033
Definition types.h:32
double TrilinosScalar
Definition types.h:198