Reference documentation for deal.II version 9.4.1
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
trilinos_sparse_matrix.cc
Go to the documentation of this file.
1// ---------------------------------------------------------------------
2//
3// Copyright (C) 2008 - 2022 by the deal.II authors
4//
5// This file is part of the deal.II library.
6//
7// The deal.II library is free software; you can use it, redistribute
8// it, and/or modify it under the terms of the GNU Lesser General
9// Public License as published by the Free Software Foundation; either
10// version 2.1 of the License, or (at your option) any later version.
11// The full text of the license can be found in the file LICENSE.md at
12// the top level directory of deal.II.
13//
14// ---------------------------------------------------------------------
15
18
19#ifdef DEAL_II_WITH_TRILINOS
20
22
30
32# include <boost/container/small_vector.hpp>
34
35# ifdef DEAL_II_TRILINOS_WITH_EPETRAEXT
36# include <EpetraExt_MatrixMatrix.h>
37# endif
38# include <Epetra_Export.h>
39# include <Teuchos_RCP.hpp>
40# include <ml_epetra_utils.h>
41# include <ml_struct.h>
42
43# include <memory>
44
46
47namespace TrilinosWrappers
48{
49 namespace internal
50 {
51 template <typename VectorType>
52 typename VectorType::value_type *
53 begin(VectorType &V)
54 {
55 return V.begin();
56 }
57
58 template <typename VectorType>
59 const typename VectorType::value_type *
60 begin(const VectorType &V)
61 {
62 return V.begin();
63 }
64
65 template <typename VectorType>
66 typename VectorType::value_type *
67 end(VectorType &V)
68 {
69 return V.end();
70 }
71
72 template <typename VectorType>
73 const typename VectorType::value_type *
74 end(const VectorType &V)
75 {
76 return V.end();
77 }
78
79 template <>
80 double *
82 {
83 return V.trilinos_vector()[0];
84 }
85
86 template <>
87 const double *
89 {
90 return V.trilinos_vector()[0];
91 }
92
93 template <>
94 double *
96 {
97 return V.trilinos_vector()[0] + V.trilinos_vector().MyLength();
98 }
99
100 template <>
101 const double *
103 {
104 return V.trilinos_vector()[0] + V.trilinos_vector().MyLength();
105 }
106
107# ifdef DEAL_II_TRILINOS_WITH_TPETRA
108 template <typename Number>
109 Number *
111 {
112 return V.trilinos_vector().getDataNonConst().get();
113 }
114
115 template <typename Number>
116 const Number *
118 {
119 return V.trilinos_vector().getData().get();
120 }
121
122 template <typename Number>
123 Number *
125 {
126 return V.trilinos_vector().getDataNonConst().get() +
127 V.trilinos_vector().getLocalLength();
128 }
129
130 template <typename Number>
131 const Number *
133 {
134 return V.trilinos_vector().getData().get() +
135 V.trilinos_vector().getLocalLength();
136 }
137# endif
138 } // namespace internal
139
140
141 namespace SparseMatrixIterators
142 {
143 void
145 {
146 // if we are asked to visit the past-the-end line, then simply
147 // release all our caches and go on with life.
148 //
149 // do the same if the row we're supposed to visit is not locally
150 // owned. this is simply going to make non-locally owned rows
151 // look like they're empty
152 if ((this->a_row == matrix->m()) ||
153 (matrix->in_local_range(this->a_row) == false))
154 {
155 colnum_cache.reset();
156 value_cache.reset();
157
158 return;
159 }
160
161 // get a representation of the present row
162 int ncols;
164 if (value_cache.get() == nullptr)
165 {
167 std::make_shared<std::vector<TrilinosScalar>>(matrix->n());
168 colnum_cache = std::make_shared<std::vector<size_type>>(matrix->n());
169 }
170 else
171 {
172 value_cache->resize(matrix->n());
173 colnum_cache->resize(matrix->n());
174 }
175
176 int ierr = matrix->trilinos_matrix().ExtractGlobalRowCopy(
177 this->a_row,
178 colnums,
179 ncols,
180 value_cache->data(),
181 reinterpret_cast<TrilinosWrappers::types::int_type *>(
182 colnum_cache->data()));
183 value_cache->resize(ncols);
184 colnum_cache->resize(ncols);
185 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
186
187 // copy it into our caches if the
188 // line isn't empty. if it is, then
189 // we've done something wrong, since
190 // we shouldn't have initialized an
191 // iterator for an empty line (what
192 // would it point to?)
193 }
194 } // namespace SparseMatrixIterators
195
196
197 // The constructor is actually the
198 // only point where we have to check
199 // whether we build a serial or a
200 // parallel Trilinos matrix.
201 // Actually, it does not even matter
202 // how many threads there are, but
203 // only if we use an MPI compiler or
204 // a standard compiler. So, even one
205 // thread on a configuration with
206 // MPI will still get a parallel
207 // interface.
209 : column_space_map(new Epetra_Map(0, 0, Utilities::Trilinos::comm_self()))
210 , matrix(
211 new Epetra_FECrsMatrix(View, *column_space_map, *column_space_map, 0))
212 , last_action(Zero)
213 , compressed(true)
214 {
215 matrix->FillComplete();
216 }
217
218
219
221 const size_type n,
222 const unsigned int n_max_entries_per_row)
223 : column_space_map(
224 new Epetra_Map(static_cast<TrilinosWrappers::types::int_type>(n),
225 0,
226 Utilities::Trilinos::comm_self()))
227 ,
228
229 // on one processor only, we know how the
230 // columns of the matrix will be
231 // distributed (everything on one
232 // processor), so we can hand in this
233 // information to the constructor. we
234 // can't do so in parallel, where the
235 // information from columns is only
236 // available when entries have been added
237 matrix(new Epetra_FECrsMatrix(
238 Copy,
239 Epetra_Map(static_cast<TrilinosWrappers::types::int_type>(m),
240 0,
241 Utilities::Trilinos::comm_self()),
242 *column_space_map,
243 n_max_entries_per_row,
244 false))
245 , last_action(Zero)
246 , compressed(false)
247 {}
248
249
250
252 const size_type n,
253 const std::vector<unsigned int> &n_entries_per_row)
254 : column_space_map(
255 new Epetra_Map(static_cast<TrilinosWrappers::types::int_type>(n),
256 0,
257 Utilities::Trilinos::comm_self()))
258 , matrix(new Epetra_FECrsMatrix(
259 Copy,
260 Epetra_Map(static_cast<TrilinosWrappers::types::int_type>(m),
261 0,
262 Utilities::Trilinos::comm_self()),
263 *column_space_map,
264 reinterpret_cast<int *>(
265 const_cast<unsigned int *>(n_entries_per_row.data())),
266 false))
267 , last_action(Zero)
268 , compressed(false)
269 {}
270
271
272
273 SparseMatrix::SparseMatrix(const IndexSet & parallel_partitioning,
274 const MPI_Comm & communicator,
275 const unsigned int n_max_entries_per_row)
276 : column_space_map(new Epetra_Map(
277 parallel_partitioning.make_trilinos_map(communicator, false)))
278 , matrix(new Epetra_FECrsMatrix(Copy,
279 *column_space_map,
280 n_max_entries_per_row,
281 false))
282 , last_action(Zero)
283 , compressed(false)
284 {}
285
286
287
288 SparseMatrix::SparseMatrix(const IndexSet &parallel_partitioning,
289 const MPI_Comm &communicator,
290 const std::vector<unsigned int> &n_entries_per_row)
291 : column_space_map(new Epetra_Map(
292 parallel_partitioning.make_trilinos_map(communicator, false)))
293 , matrix(new Epetra_FECrsMatrix(Copy,
294 *column_space_map,
295 reinterpret_cast<int *>(
296 const_cast<unsigned int *>(
297 n_entries_per_row.data())),
298 false))
299 , last_action(Zero)
300 , compressed(false)
301 {}
302
303
304
305 SparseMatrix::SparseMatrix(const IndexSet &row_parallel_partitioning,
306 const IndexSet &col_parallel_partitioning,
307 const MPI_Comm &communicator,
308 const size_type n_max_entries_per_row)
309 : column_space_map(new Epetra_Map(
310 col_parallel_partitioning.make_trilinos_map(communicator, false)))
311 , matrix(new Epetra_FECrsMatrix(
312 Copy,
313 row_parallel_partitioning.make_trilinos_map(communicator, false),
314 n_max_entries_per_row,
315 false))
316 , last_action(Zero)
317 , compressed(false)
318 {}
319
320
321
322 SparseMatrix::SparseMatrix(const IndexSet &row_parallel_partitioning,
323 const IndexSet &col_parallel_partitioning,
324 const MPI_Comm &communicator,
325 const std::vector<unsigned int> &n_entries_per_row)
326 : column_space_map(new Epetra_Map(
327 col_parallel_partitioning.make_trilinos_map(communicator, false)))
328 , matrix(new Epetra_FECrsMatrix(
329 Copy,
330 row_parallel_partitioning.make_trilinos_map(communicator, false),
331 reinterpret_cast<int *>(
332 const_cast<unsigned int *>(n_entries_per_row.data())),
333 false))
334 , last_action(Zero)
335 , compressed(false)
336 {}
337
338
339
341 : column_space_map(new Epetra_Map(sparsity_pattern.domain_partitioner()))
342 , matrix(
343 new Epetra_FECrsMatrix(Copy,
344 sparsity_pattern.trilinos_sparsity_pattern(),
345 false))
346 , last_action(Zero)
347 , compressed(true)
348 {
349 Assert(sparsity_pattern.trilinos_sparsity_pattern().Filled() == true,
351 "The Trilinos sparsity pattern has not been compressed."));
353 }
354
355
356
358 : column_space_map(std::move(other.column_space_map))
359 , matrix(std::move(other.matrix))
360 , nonlocal_matrix(std::move(other.nonlocal_matrix))
361 , nonlocal_matrix_exporter(std::move(other.nonlocal_matrix_exporter))
362 , last_action(other.last_action)
363 , compressed(other.compressed)
364 {
365 other.last_action = Zero;
366 other.compressed = false;
367 }
368
369
370
371 void
373 {
374 if (this == &rhs)
375 return;
376
377 nonlocal_matrix.reset();
379
380 // check whether we need to update the whole matrix layout (we have
381 // different maps or if we detect a row where the columns of the two
382 // matrices do not match)
383 bool needs_deep_copy =
384 !matrix->RowMap().SameAs(rhs.matrix->RowMap()) ||
385 !matrix->ColMap().SameAs(rhs.matrix->ColMap()) ||
386 !matrix->DomainMap().SameAs(rhs.matrix->DomainMap()) ||
388 if (!needs_deep_copy)
389 {
390 // Try to copy all the rows of the matrix one by one. In case of error
391 // (i.e., the column indices are different), we need to abort and blow
392 // away the matrix.
393 for (const auto row : locally_owned_range_indices())
394 {
395 const int row_local = matrix->RowMap().LID(
396 static_cast<TrilinosWrappers::types::int_type>(row));
397 Assert((row_local >= 0), ExcAccessToNonlocalRow(row));
398
399 int n_entries, rhs_n_entries;
400 TrilinosScalar *value_ptr, *rhs_value_ptr;
401 int * index_ptr, *rhs_index_ptr;
402 int ierr = rhs.matrix->ExtractMyRowView(row_local,
403 rhs_n_entries,
404 rhs_value_ptr,
405 rhs_index_ptr);
406 (void)ierr;
407 Assert(ierr == 0, ExcTrilinosError(ierr));
408
409 ierr = matrix->ExtractMyRowView(row_local,
410 n_entries,
411 value_ptr,
412 index_ptr);
413 Assert(ierr == 0, ExcTrilinosError(ierr));
414
415 if (n_entries != rhs_n_entries ||
416 std::memcmp(static_cast<void *>(index_ptr),
417 static_cast<void *>(rhs_index_ptr),
418 sizeof(int) * n_entries) != 0)
419 {
420 needs_deep_copy = true;
421 break;
422 }
423
424 for (int i = 0; i < n_entries; ++i)
425 value_ptr[i] = rhs_value_ptr[i];
426 }
427 }
428
429 if (needs_deep_copy)
430 {
432 std::make_unique<Epetra_Map>(rhs.trilinos_matrix().DomainMap());
433
434 // release memory before reallocation
435 matrix = std::make_unique<Epetra_FECrsMatrix>(*rhs.matrix);
436
437 matrix->FillComplete(*column_space_map, matrix->RowMap());
438 }
439
440 if (rhs.nonlocal_matrix.get() != nullptr)
442 std::make_unique<Epetra_CrsMatrix>(Copy, rhs.nonlocal_matrix->Graph());
443 }
444
445
446
447 namespace
448 {
449 using size_type = SparseMatrix::size_type;
450
451 template <typename SparsityPatternType>
452 void
453 reinit_matrix(const IndexSet & row_parallel_partitioning,
454 const IndexSet & column_parallel_partitioning,
455 const SparsityPatternType & sparsity_pattern,
456 const bool exchange_data,
457 const MPI_Comm & communicator,
458 std::unique_ptr<Epetra_Map> &column_space_map,
459 std::unique_ptr<Epetra_FECrsMatrix> &matrix,
460 std::unique_ptr<Epetra_CrsMatrix> & nonlocal_matrix,
461 std::unique_ptr<Epetra_Export> & nonlocal_matrix_exporter)
462 {
463 // release memory before reallocation
464 matrix.reset();
465 nonlocal_matrix.reset();
466 nonlocal_matrix_exporter.reset();
467
468 column_space_map = std::make_unique<Epetra_Map>(
469 column_parallel_partitioning.make_trilinos_map(communicator, false));
470
471 if (column_space_map->Comm().MyPID() == 0)
472 {
473 AssertDimension(sparsity_pattern.n_rows(),
474 row_parallel_partitioning.size());
475 AssertDimension(sparsity_pattern.n_cols(),
476 column_parallel_partitioning.size());
477 }
478
479 Epetra_Map row_space_map =
480 row_parallel_partitioning.make_trilinos_map(communicator, false);
481
482 // if we want to exchange data, build a usual Trilinos sparsity pattern
483 // and let that handle the exchange. otherwise, manually create a
484 // CrsGraph, which consumes considerably less memory because it can set
485 // correct number of indices right from the start
486 if (exchange_data)
487 {
488 SparsityPattern trilinos_sparsity;
489 trilinos_sparsity.reinit(row_parallel_partitioning,
490 column_parallel_partitioning,
491 sparsity_pattern,
492 communicator,
493 exchange_data);
494 matrix = std::make_unique<Epetra_FECrsMatrix>(
495 Copy, trilinos_sparsity.trilinos_sparsity_pattern(), false);
496
497 return;
498 }
499
500 const size_type first_row = TrilinosWrappers::min_my_gid(row_space_map),
501 last_row =
502 TrilinosWrappers::max_my_gid(row_space_map) + 1;
503 std::vector<int> n_entries_per_row(last_row - first_row);
504
505 for (size_type row = first_row; row < last_row; ++row)
506 n_entries_per_row[row - first_row] = sparsity_pattern.row_length(row);
507
508 // The deal.II notation of a Sparsity pattern corresponds to the Epetra
509 // concept of a Graph. Hence, we generate a graph by copying the
510 // sparsity pattern into it, and then build up the matrix from the
511 // graph. This is considerable faster than directly filling elements
512 // into the matrix. Moreover, it consumes less memory, since the
513 // internal reordering is done on ints only, and we can leave the
514 // doubles aside.
515
516 // for more than one processor, need to specify only row map first and
517 // let the matrix entries decide about the column map (which says which
518 // columns are present in the matrix, not to be confused with the
519 // col_map that tells how the domain dofs of the matrix will be
520 // distributed). for only one processor, we can directly assign the
521 // columns as well. Compare this with bug # 4123 in the Sandia Bugzilla.
522 std::unique_ptr<Epetra_CrsGraph> graph;
523 if (row_space_map.Comm().NumProc() > 1)
524 graph = std::make_unique<Epetra_CrsGraph>(Copy,
525 row_space_map,
526 n_entries_per_row.data(),
527 true);
528 else
529 graph = std::make_unique<Epetra_CrsGraph>(Copy,
530 row_space_map,
531 *column_space_map,
532 n_entries_per_row.data(),
533 true);
534
535 // This functions assumes that the sparsity pattern sits on all
536 // processors (completely). The parallel version uses an Epetra graph
537 // that is already distributed.
538
539 // now insert the indices
540 std::vector<TrilinosWrappers::types::int_type> row_indices;
541
542 for (size_type row = first_row; row < last_row; ++row)
543 {
544 const int row_length = sparsity_pattern.row_length(row);
545 if (row_length == 0)
546 continue;
547
548 row_indices.resize(row_length, -1);
549 {
550 typename SparsityPatternType::iterator p =
551 sparsity_pattern.begin(row);
552 for (size_type col = 0; p != sparsity_pattern.end(row); ++p, ++col)
553 row_indices[col] = p->column();
554 }
555 graph->Epetra_CrsGraph::InsertGlobalIndices(row,
556 row_length,
557 row_indices.data());
558 }
559
560 // Eventually, optimize the graph structure (sort indices, make memory
561 // contiguous, etc). note that the documentation of the function indeed
562 // states that we first need to provide the column (domain) map and then
563 // the row (range) map
564 graph->FillComplete(*column_space_map, row_space_map);
565 graph->OptimizeStorage();
566
567 // check whether we got the number of columns right.
568 AssertDimension(sparsity_pattern.n_cols(),
570 (void)n_global_cols;
571
572 // And now finally generate the matrix.
573 matrix = std::make_unique<Epetra_FECrsMatrix>(Copy, *graph, false);
574 }
575
576
577
578 // for the non-local graph, we need to circumvent the problem that some
579 // processors will not add into the non-local graph at all: We do not want
580 // to insert dummy elements on >5000 processors because that gets very
581 // slow. Thus, we set a flag in Epetra_CrsGraph that sets the correct
582 // flag. Since it is protected, we need to expose this information by
583 // deriving a class from Epetra_CrsGraph for the purpose of creating the
584 // data structure
585 class Epetra_CrsGraphMod : public Epetra_CrsGraph
586 {
587 public:
588 Epetra_CrsGraphMod(const Epetra_Map &row_map,
589 const int * n_entries_per_row)
590 : Epetra_CrsGraph(Copy, row_map, n_entries_per_row, true)
591 {}
592
593 void
594 SetIndicesAreGlobal()
595 {
596 this->Epetra_CrsGraph::SetIndicesAreGlobal(true);
597 }
598 };
599
600
601
602 // specialization for DynamicSparsityPattern which can provide us with
603 // more information about the non-locally owned rows
604 template <>
605 void
606 reinit_matrix(const IndexSet & row_parallel_partitioning,
607 const IndexSet & column_parallel_partitioning,
608 const DynamicSparsityPattern &sparsity_pattern,
609 const bool exchange_data,
610 const MPI_Comm & communicator,
611 std::unique_ptr<Epetra_Map> & column_space_map,
612 std::unique_ptr<Epetra_FECrsMatrix> &matrix,
613 std::unique_ptr<Epetra_CrsMatrix> & nonlocal_matrix,
614 std::unique_ptr<Epetra_Export> & nonlocal_matrix_exporter)
615 {
616 matrix.reset();
617 nonlocal_matrix.reset();
618 nonlocal_matrix_exporter.reset();
619
620 column_space_map = std::make_unique<Epetra_Map>(
621 column_parallel_partitioning.make_trilinos_map(communicator, false));
622
623 AssertDimension(sparsity_pattern.n_rows(),
624 row_parallel_partitioning.size());
625 AssertDimension(sparsity_pattern.n_cols(),
626 column_parallel_partitioning.size());
627
628 Epetra_Map row_space_map =
629 row_parallel_partitioning.make_trilinos_map(communicator, false);
630
631 IndexSet relevant_rows(sparsity_pattern.row_index_set());
632 // serial case
633 if (relevant_rows.size() == 0)
634 {
635 relevant_rows.set_size(
637 relevant_rows.add_range(
638 0, TrilinosWrappers::n_global_elements(row_space_map));
639 }
640 relevant_rows.compress();
641 Assert(relevant_rows.n_elements() >=
642 static_cast<unsigned int>(row_space_map.NumMyElements()),
644 "Locally relevant rows of sparsity pattern must contain "
645 "all locally owned rows"));
646
647 // check whether the relevant rows correspond to exactly the same map as
648 // the owned rows. In that case, do not create the nonlocal graph and
649 // fill the columns by demand
650 const bool have_ghost_rows = [&]() {
651 std::vector<::types::global_dof_index> indices;
652 relevant_rows.fill_index_vector(indices);
653 Epetra_Map relevant_map(
655 TrilinosWrappers::types::int_type(relevant_rows.n_elements()),
656 (indices.empty() ?
657 nullptr :
658 reinterpret_cast<TrilinosWrappers::types::int_type *>(
659 indices.data())),
660 0,
661 row_space_map.Comm());
662 return !relevant_map.SameAs(row_space_map);
663 }();
664
665 const unsigned int n_rows = relevant_rows.n_elements();
666 std::vector<TrilinosWrappers::types::int_type> ghost_rows;
667 std::vector<int> n_entries_per_row(row_space_map.NumMyElements());
668 std::vector<int> n_entries_per_ghost_row;
669 for (unsigned int i = 0, own = 0; i < n_rows; ++i)
670 {
671 const TrilinosWrappers::types::int_type global_row =
672 relevant_rows.nth_index_in_set(i);
673 if (row_space_map.MyGID(global_row))
674 n_entries_per_row[own++] = sparsity_pattern.row_length(global_row);
675 else if (sparsity_pattern.row_length(global_row) > 0)
676 {
677 ghost_rows.push_back(global_row);
678 n_entries_per_ghost_row.push_back(
679 sparsity_pattern.row_length(global_row));
680 }
681 }
682
683 Epetra_Map off_processor_map(-1,
684 ghost_rows.size(),
685 (ghost_rows.size() > 0) ?
686 (ghost_rows.data()) :
687 nullptr,
688 0,
689 row_space_map.Comm());
690
691 std::unique_ptr<Epetra_CrsGraph> graph;
692 std::unique_ptr<Epetra_CrsGraphMod> nonlocal_graph;
693 if (row_space_map.Comm().NumProc() > 1)
694 {
695 graph =
696 std::make_unique<Epetra_CrsGraph>(Copy,
697 row_space_map,
698 (n_entries_per_row.size() > 0) ?
699 (n_entries_per_row.data()) :
700 nullptr,
701 exchange_data ? false : true);
702 if (have_ghost_rows == true)
703 nonlocal_graph = std::make_unique<Epetra_CrsGraphMod>(
704 off_processor_map, n_entries_per_ghost_row.data());
705 }
706 else
707 graph =
708 std::make_unique<Epetra_CrsGraph>(Copy,
709 row_space_map,
710 *column_space_map,
711 (n_entries_per_row.size() > 0) ?
712 (n_entries_per_row.data()) :
713 nullptr,
714 true);
715
716 // now insert the indices, select between the right matrix
717 std::vector<TrilinosWrappers::types::int_type> row_indices;
718
719 for (unsigned int i = 0; i < n_rows; ++i)
720 {
721 const TrilinosWrappers::types::int_type global_row =
722 relevant_rows.nth_index_in_set(i);
723 const int row_length = sparsity_pattern.row_length(global_row);
724 if (row_length == 0)
725 continue;
726
727 row_indices.resize(row_length, -1);
728 for (int col = 0; col < row_length; ++col)
729 row_indices[col] = sparsity_pattern.column_number(global_row, col);
730
731 if (row_space_map.MyGID(global_row))
732 graph->InsertGlobalIndices(global_row,
733 row_length,
734 row_indices.data());
735 else
736 {
737 Assert(nonlocal_graph.get() != nullptr, ExcInternalError());
738 nonlocal_graph->InsertGlobalIndices(global_row,
739 row_length,
740 row_indices.data());
741 }
742 }
743
744 // finalize nonlocal graph and create nonlocal matrix
745 if (nonlocal_graph.get() != nullptr)
746 {
747 // must make sure the IndicesAreGlobal flag is set on all processors
748 // because some processors might not call InsertGlobalIndices (and
749 // we do not want to insert dummy indices on all processors for
750 // large-scale simulations due to the bad impact on performance)
751 nonlocal_graph->SetIndicesAreGlobal();
752 Assert(nonlocal_graph->IndicesAreGlobal() == true,
754 nonlocal_graph->FillComplete(*column_space_map, row_space_map);
755 nonlocal_graph->OptimizeStorage();
756
757 // insert data from nonlocal graph into the final sparsity pattern
758 if (exchange_data)
759 {
760 Epetra_Export exporter(nonlocal_graph->RowMap(), row_space_map);
761 int ierr = graph->Export(*nonlocal_graph, exporter, Add);
762 (void)ierr;
763 Assert(ierr == 0, ExcTrilinosError(ierr));
764 }
765
766 nonlocal_matrix =
767 std::make_unique<Epetra_CrsMatrix>(Copy, *nonlocal_graph);
768 }
769
770 graph->FillComplete(*column_space_map, row_space_map);
771 graph->OptimizeStorage();
772
773 AssertDimension(sparsity_pattern.n_cols(),
775
776 matrix = std::make_unique<Epetra_FECrsMatrix>(Copy, *graph, false);
777 }
778 } // namespace
779
780
781
782 template <typename SparsityPatternType>
783 void
784 SparseMatrix::reinit(const SparsityPatternType &sparsity_pattern)
785 {
786 reinit_matrix(complete_index_set(sparsity_pattern.n_rows()),
787 complete_index_set(sparsity_pattern.n_cols()),
788 sparsity_pattern,
789 false,
790 MPI_COMM_SELF,
792 matrix,
795 }
796
797
798
799 template <typename SparsityPatternType>
800 inline typename std::enable_if<
801 !std::is_same<SparsityPatternType,
802 ::SparseMatrix<double>>::value>::type
803 SparseMatrix::reinit(const IndexSet & row_parallel_partitioning,
804 const IndexSet & col_parallel_partitioning,
805 const SparsityPatternType &sparsity_pattern,
806 const MPI_Comm & communicator,
807 const bool exchange_data)
808 {
809 reinit_matrix(row_parallel_partitioning,
810 col_parallel_partitioning,
811 sparsity_pattern,
812 exchange_data,
813 communicator,
815 matrix,
818
819 // In the end, the matrix needs to be compressed in order to be really
820 // ready.
821 last_action = Zero;
823 }
824
825
826
827 void
828 SparseMatrix::reinit(const SparsityPattern &sparsity_pattern)
829 {
830 matrix.reset();
832
833 // reinit with a (parallel) Trilinos sparsity pattern.
835 std::make_unique<Epetra_Map>(sparsity_pattern.domain_partitioner());
836 matrix = std::make_unique<Epetra_FECrsMatrix>(
837 Copy, sparsity_pattern.trilinos_sparsity_pattern(), false);
838
839 if (sparsity_pattern.nonlocal_graph.get() != nullptr)
841 std::make_unique<Epetra_CrsMatrix>(Copy,
842 *sparsity_pattern.nonlocal_graph);
843 else
844 nonlocal_matrix.reset();
845
846 last_action = Zero;
848 }
849
850
851
852 void
853 SparseMatrix::reinit(const SparseMatrix &sparse_matrix)
854 {
855 if (this == &sparse_matrix)
856 return;
857
859 std::make_unique<Epetra_Map>(sparse_matrix.trilinos_matrix().DomainMap());
860 matrix.reset();
862 matrix = std::make_unique<Epetra_FECrsMatrix>(
863 Copy, sparse_matrix.trilinos_sparsity_pattern(), false);
864
865 if (sparse_matrix.nonlocal_matrix != nullptr)
866 nonlocal_matrix = std::make_unique<Epetra_CrsMatrix>(
867 Copy, sparse_matrix.nonlocal_matrix->Graph());
868 else
869 nonlocal_matrix.reset();
870
871 last_action = Zero;
873 }
874
875
876
877 template <typename number>
878 inline void
880 const IndexSet & row_parallel_partitioning,
881 const IndexSet & col_parallel_partitioning,
882 const ::SparseMatrix<number> &dealii_sparse_matrix,
883 const MPI_Comm & communicator,
884 const double drop_tolerance,
885 const bool copy_values,
886 const ::SparsityPattern * use_this_sparsity)
887 {
888 if (copy_values == false)
889 {
890 // in case we do not copy values, just
891 // call the other function.
892 if (use_this_sparsity == nullptr)
893 reinit(row_parallel_partitioning,
894 col_parallel_partitioning,
895 dealii_sparse_matrix.get_sparsity_pattern(),
896 communicator,
897 false);
898 else
899 reinit(row_parallel_partitioning,
900 col_parallel_partitioning,
901 *use_this_sparsity,
902 communicator,
903 false);
904 return;
905 }
906
907 const size_type n_rows = dealii_sparse_matrix.m();
908
909 AssertDimension(row_parallel_partitioning.size(), n_rows);
910 AssertDimension(col_parallel_partitioning.size(), dealii_sparse_matrix.n());
911
912 const ::SparsityPattern &sparsity_pattern =
913 (use_this_sparsity != nullptr) ?
914 *use_this_sparsity :
915 dealii_sparse_matrix.get_sparsity_pattern();
916
917 if (matrix.get() == nullptr || m() != n_rows ||
918 n_nonzero_elements() != sparsity_pattern.n_nonzero_elements())
919 {
920 reinit(row_parallel_partitioning,
921 col_parallel_partitioning,
922 sparsity_pattern,
923 communicator,
924 false);
925 }
926
927 // fill the values. the same as above: go through all rows of the
928 // matrix, and then all columns. since the sparsity patterns of the
929 // input matrix and the specified sparsity pattern might be different,
930 // need to go through the row for both these sparsity structures
931 // simultaneously in order to really set the correct values.
932 size_type maximum_row_length = matrix->MaxNumEntries();
933 std::vector<size_type> row_indices(maximum_row_length);
934 std::vector<TrilinosScalar> values(maximum_row_length);
935
936 for (size_type row = 0; row < n_rows; ++row)
937 // see if the row is locally stored on this processor
938 if (row_parallel_partitioning.is_element(row) == true)
939 {
940 ::SparsityPattern::iterator select_index =
941 sparsity_pattern.begin(row);
942 typename ::SparseMatrix<number>::const_iterator it =
943 dealii_sparse_matrix.begin(row);
944 size_type col = 0;
945 if (sparsity_pattern.n_rows() == sparsity_pattern.n_cols())
946 {
947 // optimized diagonal
948 AssertDimension(it->column(), row);
949 if (std::fabs(it->value()) > drop_tolerance)
950 {
951 values[col] = it->value();
952 row_indices[col++] = it->column();
953 }
954 ++select_index;
955 ++it;
956 }
957
958 while (it != dealii_sparse_matrix.end(row) &&
959 select_index != sparsity_pattern.end(row))
960 {
961 while (select_index->column() < it->column() &&
962 select_index != sparsity_pattern.end(row))
963 ++select_index;
964 while (it->column() < select_index->column() &&
965 it != dealii_sparse_matrix.end(row))
966 ++it;
967
968 if (it == dealii_sparse_matrix.end(row))
969 break;
970 if (std::fabs(it->value()) > drop_tolerance)
971 {
972 values[col] = it->value();
973 row_indices[col++] = it->column();
974 }
975 ++select_index;
976 ++it;
977 }
978 set(row,
979 col,
980 reinterpret_cast<size_type *>(row_indices.data()),
981 values.data(),
982 false);
983 }
985 }
986
987
988
989 template <typename number>
990 void
992 const ::SparseMatrix<number> &dealii_sparse_matrix,
993 const double drop_tolerance,
994 const bool copy_values,
995 const ::SparsityPattern * use_this_sparsity)
996 {
997 reinit(complete_index_set(dealii_sparse_matrix.m()),
998 complete_index_set(dealii_sparse_matrix.n()),
999 dealii_sparse_matrix,
1000 MPI_COMM_SELF,
1001 drop_tolerance,
1002 copy_values,
1003 use_this_sparsity);
1004 }
1005
1006
1007
1008 void
1009 SparseMatrix::reinit(const Epetra_CrsMatrix &input_matrix,
1010 const bool copy_values)
1011 {
1012 Assert(input_matrix.Filled() == true,
1013 ExcMessage("Input CrsMatrix has not called FillComplete()!"));
1014
1015 column_space_map = std::make_unique<Epetra_Map>(input_matrix.DomainMap());
1016
1017 const Epetra_CrsGraph *graph = &input_matrix.Graph();
1018
1019 nonlocal_matrix.reset();
1021 matrix.reset();
1022 matrix = std::make_unique<Epetra_FECrsMatrix>(Copy, *graph, false);
1023
1024 matrix->FillComplete(*column_space_map, input_matrix.RangeMap(), true);
1025
1026 if (copy_values == true)
1027 {
1028 // point to the first data entry in the two
1029 // matrices and copy the content
1030 const TrilinosScalar *in_values = input_matrix[0];
1031 TrilinosScalar * values = (*matrix)[0];
1032 const size_type my_nonzeros = input_matrix.NumMyNonzeros();
1033 std::memcpy(values, in_values, my_nonzeros * sizeof(TrilinosScalar));
1034 }
1035
1036 last_action = Zero;
1038 }
1039
1040
1041
1042 void
1044 {
1045 Epetra_CombineMode mode = last_action;
1046 if (last_action == Zero)
1047 {
1048 if ((operation == ::VectorOperation::add) ||
1049 (operation == ::VectorOperation::unknown))
1050 mode = Add;
1051 else if (operation == ::VectorOperation::insert)
1052 mode = Insert;
1053 else
1054 Assert(
1055 false,
1056 ExcMessage(
1057 "compress() can only be called with VectorOperation add, insert, or unknown"));
1058 }
1059 else
1060 {
1061 Assert(((last_action == Add) &&
1062 (operation != ::VectorOperation::insert)) ||
1063 ((last_action == Insert) &&
1064 (operation != ::VectorOperation::add)),
1065 ExcMessage("Operation and argument to compress() do not match"));
1066 }
1067
1068 // flush buffers
1069 int ierr;
1070 if (nonlocal_matrix.get() != nullptr && mode == Add)
1071 {
1072 // do only export in case of an add() operation, otherwise the owning
1073 // processor must have set the correct entry
1074 nonlocal_matrix->FillComplete(*column_space_map, matrix->RowMap());
1075 if (nonlocal_matrix_exporter.get() == nullptr)
1077 std::make_unique<Epetra_Export>(nonlocal_matrix->RowMap(),
1078 matrix->RowMap());
1079 ierr =
1081 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1082 ierr = matrix->FillComplete(*column_space_map, matrix->RowMap());
1083 nonlocal_matrix->PutScalar(0);
1084 }
1085 else
1086 ierr =
1087 matrix->GlobalAssemble(*column_space_map, matrix->RowMap(), true, mode);
1088
1089 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1090
1091 ierr = matrix->OptimizeStorage();
1092 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1093
1094 last_action = Zero;
1095
1096 compressed = true;
1097 }
1098
1099
1100
1101 void
1103 {
1104 // When we clear the matrix, reset
1105 // the pointer and generate an
1106 // empty matrix.
1108 std::make_unique<Epetra_Map>(0, 0, Utilities::Trilinos::comm_self());
1109 matrix = std::make_unique<Epetra_FECrsMatrix>(View, *column_space_map, 0);
1110 nonlocal_matrix.reset();
1112
1113 matrix->FillComplete();
1114
1115 compressed = true;
1116 }
1117
1118
1119
1120 void
1122 const TrilinosScalar new_diag_value)
1123 {
1124 Assert(matrix->Filled() == true, ExcMatrixNotCompressed());
1125
1126 // Only do this on the rows owned
1127 // locally on this processor.
1128 int local_row =
1129 matrix->LRID(static_cast<TrilinosWrappers::types::int_type>(row));
1130 if (local_row >= 0)
1131 {
1132 TrilinosScalar *values;
1133 int * col_indices;
1134 int num_entries;
1135 const int ierr =
1136 matrix->ExtractMyRowView(local_row, num_entries, values, col_indices);
1137 (void)ierr;
1138
1139 Assert(ierr == 0, ExcTrilinosError(ierr));
1140
1141 const std::ptrdiff_t diag_index =
1142 std::find(col_indices, col_indices + num_entries, local_row) -
1143 col_indices;
1144
1145 for (TrilinosWrappers::types::int_type j = 0; j < num_entries; ++j)
1146 if (diag_index != j || new_diag_value == 0)
1147 values[j] = 0.;
1148
1149 if (diag_index != num_entries)
1150 values[diag_index] = new_diag_value;
1151 }
1152 }
1153
1154
1155
1156 void
1157 SparseMatrix::clear_rows(const std::vector<size_type> &rows,
1158 const TrilinosScalar new_diag_value)
1159 {
1160 for (const auto row : rows)
1161 clear_row(row, new_diag_value);
1162 }
1163
1164
1165
1168 {
1169 // Extract local indices in
1170 // the matrix.
1171 int trilinos_i =
1172 matrix->LRID(static_cast<TrilinosWrappers::types::int_type>(i)),
1173 trilinos_j =
1174 matrix->LCID(static_cast<TrilinosWrappers::types::int_type>(j));
1175 TrilinosScalar value = 0.;
1176
1177 // If the data is not on the
1178 // present processor, we throw
1179 // an exception. This is one of
1180 // the two tiny differences to
1181 // the el(i,j) call, which does
1182 // not throw any assertions.
1183 if (trilinos_i == -1)
1184 {
1185 Assert(false,
1187 i, j, local_range().first, local_range().second - 1));
1188 }
1189 else
1190 {
1191 // Check whether the matrix has
1192 // already been transformed to local
1193 // indices.
1194 Assert(matrix->Filled(), ExcMatrixNotCompressed());
1195
1196 // Prepare pointers for extraction
1197 // of a view of the row.
1198 int nnz_present = matrix->NumMyEntries(trilinos_i);
1199 int nnz_extracted;
1200 int * col_indices;
1201 TrilinosScalar *values;
1202
1203 // Generate the view and make
1204 // sure that we have not generated
1205 // an error.
1206 // TODO Check that col_indices are int and not long long
1207 int ierr = matrix->ExtractMyRowView(trilinos_i,
1208 nnz_extracted,
1209 values,
1210 col_indices);
1211 (void)ierr;
1212 Assert(ierr == 0, ExcTrilinosError(ierr));
1213
1214 Assert(nnz_present == nnz_extracted,
1215 ExcDimensionMismatch(nnz_present, nnz_extracted));
1216
1217 // Search the index where we
1218 // look for the value, and then
1219 // finally get it.
1220 const std::ptrdiff_t local_col_index =
1221 std::find(col_indices, col_indices + nnz_present, trilinos_j) -
1222 col_indices;
1223
1224 // This is actually the only
1225 // difference to the el(i,j)
1226 // function, which means that
1227 // we throw an exception in
1228 // this case instead of just
1229 // returning zero for an
1230 // element that is not present
1231 // in the sparsity pattern.
1232 if (local_col_index == nnz_present)
1233 {
1234 Assert(false, ExcInvalidIndex(i, j));
1235 }
1236 else
1237 value = values[local_col_index];
1238 }
1239
1240 return value;
1241 }
1242
1243
1244
1246 SparseMatrix::el(const size_type i, const size_type j) const
1247 {
1248 // Extract local indices in
1249 // the matrix.
1250 int trilinos_i =
1251 matrix->LRID(static_cast<TrilinosWrappers::types::int_type>(i)),
1252 trilinos_j =
1253 matrix->LCID(static_cast<TrilinosWrappers::types::int_type>(j));
1254 TrilinosScalar value = 0.;
1255
1256 // If the data is not on the
1257 // present processor, we can't
1258 // continue. Just print out zero
1259 // as discussed in the
1260 // documentation of this
1261 // function. if you want error
1262 // checking, use operator().
1263 if ((trilinos_i == -1) || (trilinos_j == -1))
1264 return 0.;
1265 else
1266 {
1267 // Check whether the matrix
1268 // already is transformed to
1269 // local indices.
1270 Assert(matrix->Filled(), ExcMatrixNotCompressed());
1271
1272 // Prepare pointers for extraction
1273 // of a view of the row.
1274 int nnz_present = matrix->NumMyEntries(trilinos_i);
1275 int nnz_extracted;
1276 int * col_indices;
1277 TrilinosScalar *values;
1278
1279 // Generate the view and make
1280 // sure that we have not generated
1281 // an error.
1282 int ierr = matrix->ExtractMyRowView(trilinos_i,
1283 nnz_extracted,
1284 values,
1285 col_indices);
1286 (void)ierr;
1287 Assert(ierr == 0, ExcTrilinosError(ierr));
1288
1289 Assert(nnz_present == nnz_extracted,
1290 ExcDimensionMismatch(nnz_present, nnz_extracted));
1291
1292 // Search the index where we
1293 // look for the value, and then
1294 // finally get it.
1295 const std::ptrdiff_t local_col_index =
1296 std::find(col_indices, col_indices + nnz_present, trilinos_j) -
1297 col_indices;
1298
1299 // This is actually the only
1300 // difference to the () function
1301 // querying (i,j), where we throw an
1302 // exception instead of just
1303 // returning zero for an element
1304 // that is not present in the
1305 // sparsity pattern.
1306 if (local_col_index == nnz_present)
1307 value = 0;
1308 else
1309 value = values[local_col_index];
1310 }
1311
1312 return value;
1313 }
1314
1315
1316
1319 {
1320 Assert(m() == n(), ExcNotQuadratic());
1321
1322# ifdef DEBUG
1323 // use operator() in debug mode because
1324 // it checks if this is a valid element
1325 // (in parallel)
1326 return operator()(i, i);
1327# else
1328 // Trilinos doesn't seem to have a
1329 // more efficient way to access the
1330 // diagonal than by just using the
1331 // standard el(i,j) function.
1332 return el(i, i);
1333# endif
1334 }
1335
1336
1337
1338 unsigned int
1340 {
1341 Assert(row < m(), ExcInternalError());
1342
1343 // get a representation of the
1344 // present row
1345 int ncols = -1;
1346 int local_row =
1347 matrix->LRID(static_cast<TrilinosWrappers::types::int_type>(row));
1348 Assert((local_row >= 0), ExcAccessToNonlocalRow(row));
1349
1350 // on the processor who owns this
1351 // row, we'll have a non-negative
1352 // value.
1353 if (local_row >= 0)
1354 {
1355 int ierr = matrix->NumMyRowEntries(local_row, ncols);
1356 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1357 }
1358
1359 return static_cast<unsigned int>(ncols);
1360 }
1361
1362
1363
1364 void
1365 SparseMatrix::set(const std::vector<size_type> & row_indices,
1366 const std::vector<size_type> & col_indices,
1367 const FullMatrix<TrilinosScalar> &values,
1368 const bool elide_zero_values)
1369 {
1370 Assert(row_indices.size() == values.m(),
1371 ExcDimensionMismatch(row_indices.size(), values.m()));
1372 Assert(col_indices.size() == values.n(),
1373 ExcDimensionMismatch(col_indices.size(), values.n()));
1374
1375 for (size_type i = 0; i < row_indices.size(); ++i)
1376 set(row_indices[i],
1377 col_indices.size(),
1378 col_indices.data(),
1379 &values(i, 0),
1380 elide_zero_values);
1381 }
1382
1383
1384
1385 void
1387 const std::vector<size_type> & col_indices,
1388 const std::vector<TrilinosScalar> &values,
1389 const bool elide_zero_values)
1390 {
1391 Assert(col_indices.size() == values.size(),
1392 ExcDimensionMismatch(col_indices.size(), values.size()));
1393
1394 set(row,
1395 col_indices.size(),
1396 col_indices.data(),
1397 values.data(),
1398 elide_zero_values);
1399 }
1400
1401
1402
1403 template <>
1404 void
1405 SparseMatrix::set<TrilinosScalar>(const size_type row,
1406 const size_type n_cols,
1407 const size_type * col_indices,
1408 const TrilinosScalar *values,
1409 const bool elide_zero_values)
1410 {
1411 AssertIndexRange(row, this->m());
1412
1413 int ierr;
1414 if (last_action == Add)
1415 {
1416 ierr =
1417 matrix->GlobalAssemble(*column_space_map, matrix->RowMap(), true);
1418
1419 Assert(ierr == 0, ExcTrilinosError(ierr));
1420 }
1421
1422 last_action = Insert;
1423
1424 const TrilinosWrappers::types::int_type *col_index_ptr;
1425 const TrilinosScalar * col_value_ptr;
1426 const TrilinosWrappers::types::int_type trilinos_row = row;
1428
1429 boost::container::small_vector<TrilinosScalar, 200> local_value_array(
1430 n_cols);
1431 boost::container::small_vector<TrilinosWrappers::types::int_type, 200>
1432 local_index_array(n_cols);
1433
1434 // If we don't elide zeros, the pointers are already available... need to
1435 // cast to non-const pointers as that is the format taken by Trilinos (but
1436 // we will not modify const data)
1437 if (elide_zero_values == false)
1438 {
1439 col_index_ptr =
1440 reinterpret_cast<const TrilinosWrappers::types::int_type *>(
1441 col_indices);
1442 col_value_ptr = values;
1443 n_columns = n_cols;
1444 }
1445 else
1446 {
1447 // Otherwise, extract nonzero values in each row and get the
1448 // respective indices.
1449 col_index_ptr = local_index_array.data();
1450 col_value_ptr = local_value_array.data();
1451
1452 n_columns = 0;
1453 for (size_type j = 0; j < n_cols; ++j)
1454 {
1455 const double value = values[j];
1456 AssertIsFinite(value);
1457 if (value != 0)
1458 {
1459 local_index_array[n_columns] = col_indices[j];
1460 local_value_array[n_columns] = value;
1461 n_columns++;
1462 }
1463 }
1464
1465 AssertIndexRange(n_columns, n_cols + 1);
1466 }
1467
1468
1469 // If the calling matrix owns the row to which we want to insert values,
1470 // we can directly call the Epetra_CrsMatrix input function, which is much
1471 // faster than the Epetra_FECrsMatrix function. We distinguish between two
1472 // cases: the first one is when the matrix is not filled (i.e., it is
1473 // possible to add new elements to the sparsity pattern), and the second
1474 // one is when the pattern is already fixed. In the former case, we add
1475 // the possibility to insert new values, and in the second we just replace
1476 // data.
1477 if (matrix->RowMap().MyGID(
1478 static_cast<TrilinosWrappers::types::int_type>(row)) == true)
1479 {
1480 if (matrix->Filled() == false)
1481 {
1482 ierr = matrix->Epetra_CrsMatrix::InsertGlobalValues(
1483 row, static_cast<int>(n_columns), col_value_ptr, col_index_ptr);
1484
1485 // When inserting elements, we do not want to create exceptions in
1486 // the case when inserting non-local data (since that's what we
1487 // want to do right now).
1488 if (ierr > 0)
1489 ierr = 0;
1490 }
1491 else
1492 ierr = matrix->Epetra_CrsMatrix::ReplaceGlobalValues(row,
1493 n_columns,
1494 col_value_ptr,
1495 col_index_ptr);
1496 }
1497 else
1498 {
1499 // When we're at off-processor data, we have to stick with the
1500 // standard Insert/ReplaceGlobalValues function. Nevertheless, the way
1501 // we call it is the fastest one (any other will lead to repeated
1502 // allocation and deallocation of memory in order to call the function
1503 // we already use, which is very inefficient if writing one element at
1504 // a time).
1505 compressed = false;
1506
1507 if (matrix->Filled() == false)
1508 {
1509 ierr = matrix->InsertGlobalValues(1,
1510 &trilinos_row,
1511 n_columns,
1512 col_index_ptr,
1513 &col_value_ptr,
1514 Epetra_FECrsMatrix::ROW_MAJOR);
1515 if (ierr > 0)
1516 ierr = 0;
1517 }
1518 else
1519 ierr = matrix->ReplaceGlobalValues(1,
1520 &trilinos_row,
1521 n_columns,
1522 col_index_ptr,
1523 &col_value_ptr,
1524 Epetra_FECrsMatrix::ROW_MAJOR);
1525 // use the FECrsMatrix facilities for set even in the case when we
1526 // have explicitly set the off-processor rows because that only works
1527 // properly when adding elements, not when setting them (since we want
1528 // to only touch elements that have been set explicitly, and there is
1529 // no way on the receiving processor to identify them otherwise)
1530 }
1531
1532 Assert(ierr <= 0, ExcAccessToNonPresentElement(row, col_index_ptr[0]));
1533 AssertThrow(ierr >= 0, ExcTrilinosError(ierr));
1534 }
1535
1536
1537
1538 void
1539 SparseMatrix::add(const std::vector<size_type> & indices,
1540 const FullMatrix<TrilinosScalar> &values,
1541 const bool elide_zero_values)
1542 {
1543 Assert(indices.size() == values.m(),
1544 ExcDimensionMismatch(indices.size(), values.m()));
1545 Assert(values.m() == values.n(), ExcNotQuadratic());
1546
1547 for (size_type i = 0; i < indices.size(); ++i)
1548 add(indices[i],
1549 indices.size(),
1550 indices.data(),
1551 &values(i, 0),
1552 elide_zero_values);
1553 }
1554
1555
1556
1557 void
1558 SparseMatrix::add(const std::vector<size_type> & row_indices,
1559 const std::vector<size_type> & col_indices,
1560 const FullMatrix<TrilinosScalar> &values,
1561 const bool elide_zero_values)
1562 {
1563 Assert(row_indices.size() == values.m(),
1564 ExcDimensionMismatch(row_indices.size(), values.m()));
1565 Assert(col_indices.size() == values.n(),
1566 ExcDimensionMismatch(col_indices.size(), values.n()));
1567
1568 for (size_type i = 0; i < row_indices.size(); ++i)
1569 add(row_indices[i],
1570 col_indices.size(),
1571 col_indices.data(),
1572 &values(i, 0),
1573 elide_zero_values);
1574 }
1575
1576
1577
1578 void
1580 const std::vector<size_type> & col_indices,
1581 const std::vector<TrilinosScalar> &values,
1582 const bool elide_zero_values)
1583 {
1584 Assert(col_indices.size() == values.size(),
1585 ExcDimensionMismatch(col_indices.size(), values.size()));
1586
1587 add(row,
1588 col_indices.size(),
1589 col_indices.data(),
1590 values.data(),
1591 elide_zero_values);
1592 }
1593
1594
1595
1596 void
1598 const size_type n_cols,
1599 const size_type * col_indices,
1600 const TrilinosScalar *values,
1601 const bool elide_zero_values,
1602 const bool /*col_indices_are_sorted*/)
1603 {
1604 AssertIndexRange(row, this->m());
1605 int ierr;
1606 if (last_action == Insert)
1607 {
1608 // TODO: this could lead to a dead lock when only one processor
1609 // calls GlobalAssemble.
1610 ierr =
1611 matrix->GlobalAssemble(*column_space_map, matrix->RowMap(), false);
1612
1613 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1614 }
1615
1616 last_action = Add;
1617
1618 const TrilinosWrappers::types::int_type *col_index_ptr;
1619 const TrilinosScalar * col_value_ptr;
1620 const TrilinosWrappers::types::int_type trilinos_row = row;
1622
1623 boost::container::small_vector<TrilinosScalar, 100> local_value_array(
1624 n_cols);
1625 boost::container::small_vector<TrilinosWrappers::types::int_type, 100>
1626 local_index_array(n_cols);
1627
1628 // If we don't elide zeros, the pointers are already available... need to
1629 // cast to non-const pointers as that is the format taken by Trilinos (but
1630 // we will not modify const data)
1631 if (elide_zero_values == false)
1632 {
1633 col_index_ptr =
1634 reinterpret_cast<const TrilinosWrappers::types::int_type *>(
1635 col_indices);
1636 col_value_ptr = values;
1637 n_columns = n_cols;
1638# ifdef DEBUG
1639 for (size_type j = 0; j < n_cols; ++j)
1640 AssertIsFinite(values[j]);
1641# endif
1642 }
1643 else
1644 {
1645 // Otherwise, extract nonzero values in each row and the corresponding
1646 // index.
1647 col_index_ptr = local_index_array.data();
1648 col_value_ptr = local_value_array.data();
1649
1650 n_columns = 0;
1651 for (size_type j = 0; j < n_cols; ++j)
1652 {
1653 const double value = values[j];
1654
1655 AssertIsFinite(value);
1656 if (value != 0)
1657 {
1658 local_index_array[n_columns] = col_indices[j];
1659 local_value_array[n_columns] = value;
1660 ++n_columns;
1661 }
1662 }
1663
1664 AssertIndexRange(n_columns, n_cols + 1);
1665 }
1666 // Exit early if there is nothing to do
1667 if (n_columns == 0)
1668 {
1669 return;
1670 }
1671
1672 // If the calling processor owns the row to which we want to add values, we
1673 // can directly call the Epetra_CrsMatrix input function, which is much
1674 // faster than the Epetra_FECrsMatrix function.
1675 if (matrix->RowMap().MyGID(
1676 static_cast<TrilinosWrappers::types::int_type>(row)) == true)
1677 {
1678 ierr = matrix->Epetra_CrsMatrix::SumIntoGlobalValues(row,
1679 n_columns,
1680 col_value_ptr,
1681 col_index_ptr);
1682 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1683 }
1684 else if (nonlocal_matrix.get() != nullptr)
1685 {
1686 compressed = false;
1687 // this is the case when we have explicitly set the off-processor rows
1688 // and want to create a separate matrix object for them (to retain
1689 // thread-safety)
1690 Assert(nonlocal_matrix->RowMap().LID(
1691 static_cast<TrilinosWrappers::types::int_type>(row)) != -1,
1692 ExcMessage("Attempted to write into off-processor matrix row "
1693 "that has not be specified as being writable upon "
1694 "initialization"));
1695 ierr = nonlocal_matrix->SumIntoGlobalValues(row,
1696 n_columns,
1697 col_value_ptr,
1698 col_index_ptr);
1699 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1700 }
1701 else
1702 {
1703 // When we're at off-processor data, we have to stick with the
1704 // standard SumIntoGlobalValues function. Nevertheless, the way we
1705 // call it is the fastest one (any other will lead to repeated
1706 // allocation and deallocation of memory in order to call the function
1707 // we already use, which is very inefficient if writing one element at
1708 // a time).
1709 compressed = false;
1710
1711 ierr = matrix->SumIntoGlobalValues(1,
1712 &trilinos_row,
1713 n_columns,
1714 col_index_ptr,
1715 &col_value_ptr,
1716 Epetra_FECrsMatrix::ROW_MAJOR);
1717 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1718 }
1719
1720# ifdef DEBUG
1721 if (ierr > 0)
1722 {
1723 std::cout << "------------------------------------------" << std::endl;
1724 std::cout << "Got error " << ierr << " in row " << row << " of proc "
1725 << matrix->RowMap().Comm().MyPID()
1726 << " when trying to add the columns:" << std::endl;
1727 for (TrilinosWrappers::types::int_type i = 0; i < n_columns; ++i)
1728 std::cout << col_index_ptr[i] << " ";
1729 std::cout << std::endl << std::endl;
1730 std::cout << "Matrix row "
1731 << (matrix->RowMap().MyGID(
1732 static_cast<TrilinosWrappers::types::int_type>(row)) ==
1733 false ?
1734 "(nonlocal part)" :
1735 "")
1736 << " has the following indices:" << std::endl;
1737 std::vector<TrilinosWrappers::types::int_type> indices;
1738 const Epetra_CrsGraph * graph =
1739 (nonlocal_matrix.get() != nullptr &&
1740 matrix->RowMap().MyGID(
1741 static_cast<TrilinosWrappers::types::int_type>(row)) == false) ?
1742 &nonlocal_matrix->Graph() :
1743 &matrix->Graph();
1744
1745 indices.resize(graph->NumGlobalIndices(row));
1746 int n_indices = 0;
1747 graph->ExtractGlobalRowCopy(row,
1748 indices.size(),
1749 n_indices,
1750 indices.data());
1751 AssertDimension(n_indices, indices.size());
1752
1753 for (TrilinosWrappers::types::int_type i = 0; i < n_indices; ++i)
1754 std::cout << indices[i] << " ";
1755 std::cout << std::endl << std::endl;
1756 Assert(ierr <= 0, ExcAccessToNonPresentElement(row, col_index_ptr[0]));
1757 }
1758# endif
1759 Assert(ierr >= 0, ExcTrilinosError(ierr));
1760 }
1761
1762
1763
1764 SparseMatrix &
1766 {
1768 compress(
1769 ::VectorOperation::unknown); // TODO: why do we do this? Should we
1770 // not check for is_compressed?
1771
1772 const int ierr = matrix->PutScalar(d);
1773 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1774 if (nonlocal_matrix.get() != nullptr)
1775 nonlocal_matrix->PutScalar(d);
1776
1777 return *this;
1778 }
1779
1780
1781
1782 void
1784 {
1785 AssertDimension(rhs.m(), m());
1786 AssertDimension(rhs.n(), n());
1789 Assert(matrix->RowMap().SameAs(rhs.matrix->RowMap()),
1790 ExcMessage("Can only add matrices with same distribution of rows"));
1791 Assert(matrix->Filled() && rhs.matrix->Filled(),
1792 ExcMessage("Addition of matrices only allowed if matrices are "
1793 "filled, i.e., compress() has been called"));
1794
1795 const bool same_col_map = matrix->ColMap().SameAs(rhs.matrix->ColMap());
1796
1797 for (const auto row : locally_owned_range_indices())
1798 {
1799 const int row_local = matrix->RowMap().LID(
1800 static_cast<TrilinosWrappers::types::int_type>(row));
1801 Assert((row_local >= 0), ExcAccessToNonlocalRow(row));
1802
1803 // First get a view to the matrix columns of both matrices. Note that
1804 // the data is in local index spaces so we need to be careful not only
1805 // to compare column indices in case they are derived from the same
1806 // map.
1807 int n_entries, rhs_n_entries;
1808 TrilinosScalar *value_ptr, *rhs_value_ptr;
1809 int * index_ptr, *rhs_index_ptr;
1810 int ierr = rhs.matrix->ExtractMyRowView(row_local,
1811 rhs_n_entries,
1812 rhs_value_ptr,
1813 rhs_index_ptr);
1814 (void)ierr;
1815 Assert(ierr == 0, ExcTrilinosError(ierr));
1816
1817 ierr =
1818 matrix->ExtractMyRowView(row_local, n_entries, value_ptr, index_ptr);
1819 Assert(ierr == 0, ExcTrilinosError(ierr));
1820 bool expensive_checks = (n_entries != rhs_n_entries || !same_col_map);
1821 if (!expensive_checks)
1822 {
1823 // check if the column indices are the same. If yes, can simply
1824 // copy over the data.
1825 expensive_checks = std::memcmp(static_cast<void *>(index_ptr),
1826 static_cast<void *>(rhs_index_ptr),
1827 sizeof(int) * n_entries) != 0;
1828 if (!expensive_checks)
1829 for (int i = 0; i < n_entries; ++i)
1830 value_ptr[i] += rhs_value_ptr[i] * factor;
1831 }
1832 // Now to the expensive case where we need to check all column indices
1833 // against each other (transformed into global index space) and where
1834 // we need to make sure that all entries we are about to add into the
1835 // lhs matrix actually exist
1836 if (expensive_checks)
1837 {
1838 for (int i = 0; i < rhs_n_entries; ++i)
1839 {
1840 if (rhs_value_ptr[i] == 0.)
1841 continue;
1842 const TrilinosWrappers::types::int_type rhs_global_col =
1843 global_column_index(*rhs.matrix, rhs_index_ptr[i]);
1844 int local_col = matrix->ColMap().LID(rhs_global_col);
1845 int *local_index = Utilities::lower_bound(index_ptr,
1846 index_ptr + n_entries,
1847 local_col);
1848 Assert(local_index != index_ptr + n_entries &&
1849 *local_index == local_col,
1850 ExcMessage(
1851 "Adding the entries from the other matrix "
1852 "failed, because the sparsity pattern "
1853 "of that matrix includes more elements than the "
1854 "calling matrix, which is not allowed."));
1855 value_ptr[local_index - index_ptr] += factor * rhs_value_ptr[i];
1856 }
1857 }
1858 }
1859 }
1860
1861
1862
1863 void
1865 {
1866 // This only flips a flag that tells
1867 // Trilinos that any vmult operation
1868 // should be done with the
1869 // transpose. However, the matrix
1870 // structure is not reset.
1871 int ierr;
1872
1873 if (!matrix->UseTranspose())
1874 {
1875 ierr = matrix->SetUseTranspose(true);
1876 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1877 }
1878 else
1879 {
1880 ierr = matrix->SetUseTranspose(false);
1881 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1882 }
1883 }
1884
1885
1886
1887 SparseMatrix &
1889 {
1890 const int ierr = matrix->Scale(a);
1891 Assert(ierr == 0, ExcTrilinosError(ierr));
1892 (void)ierr; // removes -Wunused-variable in optimized mode
1893
1894 return *this;
1895 }
1896
1897
1898
1899 SparseMatrix &
1901 {
1902 Assert(a != 0, ExcDivideByZero());
1903
1904 const TrilinosScalar factor = 1. / a;
1905
1906 const int ierr = matrix->Scale(factor);
1907 Assert(ierr == 0, ExcTrilinosError(ierr));
1908 (void)ierr; // removes -Wunused-variable in optimized mode
1909
1910 return *this;
1911 }
1912
1913
1914
1917 {
1918 Assert(matrix->Filled(), ExcMatrixNotCompressed());
1919 return matrix->NormOne();
1920 }
1921
1922
1923
1926 {
1927 Assert(matrix->Filled(), ExcMatrixNotCompressed());
1928 return matrix->NormInf();
1929 }
1930
1931
1932
1935 {
1936 Assert(matrix->Filled(), ExcMatrixNotCompressed());
1937 return matrix->NormFrobenius();
1938 }
1939
1940
1941
1942 namespace internal
1943 {
1944 namespace SparseMatrixImplementation
1945 {
1946 template <typename VectorType>
1947 inline void
1948 check_vector_map_equality(const Epetra_CrsMatrix &,
1949 const VectorType &,
1950 const VectorType &)
1951 {}
1952
1953 inline void
1954 check_vector_map_equality(const Epetra_CrsMatrix & m,
1957 {
1958 Assert(in.trilinos_partitioner().SameAs(m.DomainMap()) == true,
1959 ExcMessage("The column partitioning of a matrix does not match "
1960 "the partitioning of a vector you are trying to "
1961 "multiply it with. Are you multiplying the "
1962 "matrix with a vector that has ghost elements?"));
1963 Assert(out.trilinos_partitioner().SameAs(m.RangeMap()) == true,
1964 ExcMessage("The row partitioning of a matrix does not match "
1965 "the partitioning of a vector you are trying to "
1966 "put the result of a matrix-vector product in. "
1967 "Are you trying to put the product of the "
1968 "matrix with a vector into a vector that has "
1969 "ghost elements?"));
1970 (void)m;
1971 (void)in;
1972 (void)out;
1973 }
1974 } // namespace SparseMatrixImplementation
1975 } // namespace internal
1976
1977
1978 template <typename VectorType>
1979 typename std::enable_if<
1980 std::is_same<typename VectorType::value_type, TrilinosScalar>::value>::type
1981 SparseMatrix::vmult(VectorType &dst, const VectorType &src) const
1982 {
1983 Assert(&src != &dst, ExcSourceEqualsDestination());
1984 Assert(matrix->Filled(), ExcMatrixNotCompressed());
1985 (void)src;
1986 (void)dst;
1987
1989 src,
1990 dst);
1991 const size_type dst_local_size = internal::end(dst) - internal::begin(dst);
1992 AssertDimension(dst_local_size, matrix->RangeMap().NumMyPoints());
1993 const size_type src_local_size = internal::end(src) - internal::begin(src);
1994 AssertDimension(src_local_size, matrix->DomainMap().NumMyPoints());
1995
1996 Epetra_MultiVector tril_dst(
1997 View, matrix->RangeMap(), internal::begin(dst), dst_local_size, 1);
1998 Epetra_MultiVector tril_src(View,
1999 matrix->DomainMap(),
2000 const_cast<TrilinosScalar *>(
2001 internal::begin(src)),
2002 src_local_size,
2003 1);
2004
2005 const int ierr = matrix->Multiply(false, tril_src, tril_dst);
2006 Assert(ierr == 0, ExcTrilinosError(ierr));
2007 (void)ierr; // removes -Wunused-variable in optimized mode
2008 }
2009
2010
2011
2012 template <typename VectorType>
2013 typename std::enable_if<
2014 !std::is_same<typename VectorType::value_type, TrilinosScalar>::value>::type
2015 SparseMatrix::vmult(VectorType & /*dst*/, const VectorType & /*src*/) const
2016 {
2018 }
2019
2020
2021
2022 template <typename VectorType>
2023 typename std::enable_if<
2024 std::is_same<typename VectorType::value_type, TrilinosScalar>::value>::type
2025 SparseMatrix::Tvmult(VectorType &dst, const VectorType &src) const
2026 {
2027 Assert(&src != &dst, ExcSourceEqualsDestination());
2028 Assert(matrix->Filled(), ExcMatrixNotCompressed());
2029
2031 dst,
2032 src);
2033 const size_type dst_local_size = internal::end(dst) - internal::begin(dst);
2034 AssertDimension(dst_local_size, matrix->DomainMap().NumMyPoints());
2035 const size_type src_local_size = internal::end(src) - internal::begin(src);
2036 AssertDimension(src_local_size, matrix->RangeMap().NumMyPoints());
2037
2038 Epetra_MultiVector tril_dst(
2039 View, matrix->DomainMap(), internal::begin(dst), dst_local_size, 1);
2040 Epetra_MultiVector tril_src(View,
2041 matrix->RangeMap(),
2042 const_cast<double *>(internal::begin(src)),
2043 src_local_size,
2044 1);
2045
2046 const int ierr = matrix->Multiply(true, tril_src, tril_dst);
2047 Assert(ierr == 0, ExcTrilinosError(ierr));
2048 (void)ierr; // removes -Wunused-variable in optimized mode
2049 }
2050
2051
2052
2053 template <typename VectorType>
2054 typename std::enable_if<
2055 !std::is_same<typename VectorType::value_type, TrilinosScalar>::value>::type
2056 SparseMatrix::Tvmult(VectorType & /*dst*/, const VectorType & /*src*/) const
2057 {
2059 }
2060
2061
2062
2063 template <typename VectorType>
2064 void
2065 SparseMatrix::vmult_add(VectorType &dst, const VectorType &src) const
2066 {
2067 Assert(&src != &dst, ExcSourceEqualsDestination());
2068
2069 // Reinit a temporary vector with fast argument set, which does not
2070 // overwrite the content (to save time).
2071 VectorType tmp_vector;
2072 tmp_vector.reinit(dst, true);
2073 vmult(tmp_vector, src);
2074 dst += tmp_vector;
2075 }
2076
2077
2078
2079 template <typename VectorType>
2080 void
2081 SparseMatrix::Tvmult_add(VectorType &dst, const VectorType &src) const
2082 {
2083 Assert(&src != &dst, ExcSourceEqualsDestination());
2084
2085 // Reinit a temporary vector with fast argument set, which does not
2086 // overwrite the content (to save time).
2087 VectorType tmp_vector;
2088 tmp_vector.reinit(dst, true);
2089 Tvmult(tmp_vector, src);
2090 dst += tmp_vector;
2091 }
2092
2093
2094
2097 {
2098 Assert(matrix->RowMap().SameAs(matrix->DomainMap()), ExcNotQuadratic());
2099
2100 MPI::Vector temp_vector;
2101 temp_vector.reinit(v, true);
2102
2103 vmult(temp_vector, v);
2104 return temp_vector * v;
2105 }
2106
2107
2108
2111 const MPI::Vector &v) const
2112 {
2113 Assert(matrix->RowMap().SameAs(matrix->DomainMap()), ExcNotQuadratic());
2114
2115 MPI::Vector temp_vector;
2116 temp_vector.reinit(v, true);
2117
2118 vmult(temp_vector, v);
2119 return u * temp_vector;
2120 }
2121
2122
2123
2126 const MPI::Vector &x,
2127 const MPI::Vector &b) const
2128 {
2129 vmult(dst, x);
2130 dst -= b;
2131 dst *= -1.;
2132
2133 return dst.l2_norm();
2134 }
2135
2136
2137
2138 namespace internals
2139 {
2141
2142 void
2143 perform_mmult(const SparseMatrix &inputleft,
2144 const SparseMatrix &inputright,
2145 SparseMatrix & result,
2146 const MPI::Vector & V,
2147 const bool transpose_left)
2148 {
2149 const bool use_vector = (V.size() == inputright.m() ? true : false);
2150 if (transpose_left == false)
2151 {
2152 Assert(inputleft.n() == inputright.m(),
2153 ExcDimensionMismatch(inputleft.n(), inputright.m()));
2154 Assert(inputleft.trilinos_matrix().DomainMap().SameAs(
2155 inputright.trilinos_matrix().RangeMap()),
2156 ExcMessage("Parallel partitioning of A and B does not fit."));
2157 }
2158 else
2159 {
2160 Assert(inputleft.m() == inputright.m(),
2161 ExcDimensionMismatch(inputleft.m(), inputright.m()));
2162 Assert(inputleft.trilinos_matrix().RangeMap().SameAs(
2163 inputright.trilinos_matrix().RangeMap()),
2164 ExcMessage("Parallel partitioning of A and B does not fit."));
2165 }
2166
2167 result.clear();
2168
2169 // create a suitable operator B: in case
2170 // we do not use a vector, all we need to
2171 // do is to set the pointer. Otherwise,
2172 // we insert the data from B, but
2173 // multiply each row with the respective
2174 // vector element.
2175 Teuchos::RCP<Epetra_CrsMatrix> mod_B;
2176 if (use_vector == false)
2177 {
2178 mod_B = Teuchos::rcp(const_cast<Epetra_CrsMatrix *>(
2179 &inputright.trilinos_matrix()),
2180 false);
2181 }
2182 else
2183 {
2184 mod_B = Teuchos::rcp(
2185 new Epetra_CrsMatrix(Copy, inputright.trilinos_sparsity_pattern()),
2186 true);
2187 mod_B->FillComplete(inputright.trilinos_matrix().DomainMap(),
2188 inputright.trilinos_matrix().RangeMap());
2189 Assert(inputright.local_range() == V.local_range(),
2190 ExcMessage("Parallel distribution of matrix B and vector V "
2191 "does not match."));
2192
2193 const int local_N = inputright.local_size();
2194 for (int i = 0; i < local_N; ++i)
2195 {
2196 int N_entries = -1;
2197 double *new_data, *B_data;
2198 mod_B->ExtractMyRowView(i, N_entries, new_data);
2199 inputright.trilinos_matrix().ExtractMyRowView(i,
2200 N_entries,
2201 B_data);
2202 double value = V.trilinos_vector()[0][i];
2203 for (TrilinosWrappers::types::int_type j = 0; j < N_entries; ++j)
2204 new_data[j] = value * B_data[j];
2205 }
2206 }
2207
2208
2209 SparseMatrix tmp_result(transpose_left ?
2210 inputleft.locally_owned_domain_indices() :
2211 inputleft.locally_owned_range_indices(),
2212 inputright.locally_owned_domain_indices(),
2213 inputleft.get_mpi_communicator());
2214
2215# ifdef DEAL_II_TRILINOS_WITH_EPETRAEXT
2216 EpetraExt::MatrixMatrix::Multiply(inputleft.trilinos_matrix(),
2217 transpose_left,
2218 *mod_B,
2219 false,
2220 const_cast<Epetra_CrsMatrix &>(
2221 tmp_result.trilinos_matrix()));
2222# else
2223 Assert(false,
2224 ExcMessage("This function requires that the Trilinos "
2225 "installation found while running the deal.II "
2226 "CMake scripts contains the optional Trilinos "
2227 "package 'EpetraExt'. However, this optional "
2228 "part of Trilinos was not found."));
2229# endif
2230 result.reinit(tmp_result.trilinos_matrix());
2231 }
2232 } // namespace internals
2233
2234
2235 void
2237 const SparseMatrix &B,
2238 const MPI::Vector & V) const
2239 {
2240 internals::perform_mmult(*this, B, C, V, false);
2241 }
2242
2243
2244
2245 void
2247 const SparseMatrix &B,
2248 const MPI::Vector & V) const
2249 {
2250 internals::perform_mmult(*this, B, C, V, true);
2251 }
2252
2253
2254
2255 void
2257 {
2258 Assert(false, ExcNotImplemented());
2259 }
2260
2261
2262
2263 // As of now, no particularly neat
2264 // output is generated in case of
2265 // multiple processors.
2266 void
2267 SparseMatrix::print(std::ostream &out,
2268 const bool print_detailed_trilinos_information) const
2269 {
2270 if (print_detailed_trilinos_information == true)
2271 out << *matrix;
2272 else
2273 {
2274 double *values;
2275 int * indices;
2276 int num_entries;
2277
2278 for (int i = 0; i < matrix->NumMyRows(); ++i)
2279 {
2280 const int ierr =
2281 matrix->ExtractMyRowView(i, num_entries, values, indices);
2282 (void)ierr;
2283 Assert(ierr == 0, ExcTrilinosError(ierr));
2284
2285 for (TrilinosWrappers::types::int_type j = 0; j < num_entries; ++j)
2287 << ","
2289 << ") " << values[j] << std::endl;
2290 }
2291 }
2292
2293 AssertThrow(out.fail() == false, ExcIO());
2294 }
2295
2296
2297
2300 {
2301 size_type static_memory =
2302 sizeof(*this) + sizeof(*matrix) + sizeof(*matrix->Graph().DataPtr());
2303 return (
2305 matrix->NumMyNonzeros() +
2306 sizeof(int) * local_size() + static_memory);
2307 }
2308
2309
2310
2311 MPI_Comm
2313 {
2314 const Epetra_MpiComm *mpi_comm =
2315 dynamic_cast<const Epetra_MpiComm *>(&matrix->RangeMap().Comm());
2316 Assert(mpi_comm != nullptr, ExcInternalError());
2317 return mpi_comm->Comm();
2318 }
2319} // namespace TrilinosWrappers
2320
2321
2322namespace TrilinosWrappers
2323{
2324 namespace internal
2325 {
2326 namespace LinearOperatorImplementation
2327 {
2329 : use_transpose(false)
2330 , communicator(MPI_COMM_SELF)
2331 , domain_map(IndexSet().make_trilinos_map(communicator.Comm()))
2332 , range_map(IndexSet().make_trilinos_map(communicator.Comm()))
2333 {
2334 vmult = [](Range &, const Domain &) {
2335 Assert(false,
2336 ExcMessage("Uninitialized TrilinosPayload::vmult called "
2337 "(Default constructor)"));
2338 };
2339
2340 Tvmult = [](Domain &, const Range &) {
2341 Assert(false,
2342 ExcMessage("Uninitialized TrilinosPayload::Tvmult called "
2343 "(Default constructor)"));
2344 };
2345
2346 inv_vmult = [](Domain &, const Range &) {
2347 Assert(false,
2348 ExcMessage("Uninitialized TrilinosPayload::inv_vmult called "
2349 "(Default constructor)"));
2350 };
2351
2352 inv_Tvmult = [](Range &, const Domain &) {
2353 Assert(false,
2354 ExcMessage("Uninitialized TrilinosPayload::inv_Tvmult called "
2355 "(Default constructor)"));
2356 };
2357 }
2358
2359
2360
2362 const TrilinosWrappers::SparseMatrix &matrix_exemplar,
2363 const TrilinosWrappers::SparseMatrix &matrix)
2364 : TrilinosPayload(const_cast<Epetra_CrsMatrix &>(
2365 matrix.trilinos_matrix()),
2366 /*op_supports_inverse_operations = */ false,
2367 matrix_exemplar.trilinos_matrix().UseTranspose(),
2368 matrix_exemplar.get_mpi_communicator(),
2369 matrix_exemplar.locally_owned_domain_indices(),
2370 matrix_exemplar.locally_owned_range_indices())
2371 {}
2372
2373
2374
2376 const TrilinosPayload & payload_exemplar,
2377 const TrilinosWrappers::SparseMatrix &matrix)
2378
2379 : TrilinosPayload(const_cast<Epetra_CrsMatrix &>(
2380 matrix.trilinos_matrix()),
2381 /*op_supports_inverse_operations = */ false,
2382 payload_exemplar.UseTranspose(),
2383 payload_exemplar.get_mpi_communicator(),
2384 payload_exemplar.locally_owned_domain_indices(),
2385 payload_exemplar.locally_owned_range_indices())
2386 {}
2387
2388
2389
2391 const TrilinosWrappers::SparseMatrix & matrix_exemplar,
2392 const TrilinosWrappers::PreconditionBase &preconditioner)
2393 : TrilinosPayload(preconditioner.trilinos_operator(),
2394 /*op_supports_inverse_operations = */ true,
2395 matrix_exemplar.trilinos_matrix().UseTranspose(),
2396 matrix_exemplar.get_mpi_communicator(),
2397 matrix_exemplar.locally_owned_domain_indices(),
2398 matrix_exemplar.locally_owned_range_indices())
2399 {}
2400
2401
2402
2404 const TrilinosWrappers::PreconditionBase &preconditioner_exemplar,
2405 const TrilinosWrappers::PreconditionBase &preconditioner)
2407 preconditioner.trilinos_operator(),
2408 /*op_supports_inverse_operations = */ true,
2409 preconditioner_exemplar.trilinos_operator().UseTranspose(),
2410 preconditioner_exemplar.get_mpi_communicator(),
2411 preconditioner_exemplar.locally_owned_domain_indices(),
2412 preconditioner_exemplar.locally_owned_range_indices())
2413 {}
2414
2415
2416
2418 const TrilinosPayload & payload_exemplar,
2419 const TrilinosWrappers::PreconditionBase &preconditioner)
2420 : TrilinosPayload(preconditioner.trilinos_operator(),
2421 /*op_supports_inverse_operations = */ true,
2422 payload_exemplar.UseTranspose(),
2423 payload_exemplar.get_mpi_communicator(),
2424 payload_exemplar.locally_owned_domain_indices(),
2425 payload_exemplar.locally_owned_range_indices())
2426 {}
2427
2428
2429
2431 : vmult(payload.vmult)
2432 , Tvmult(payload.Tvmult)
2433 , inv_vmult(payload.inv_vmult)
2434 , inv_Tvmult(payload.inv_Tvmult)
2435 , use_transpose(payload.use_transpose)
2436 , communicator(payload.communicator)
2437 , domain_map(payload.domain_map)
2438 , range_map(payload.range_map)
2439 {}
2440
2441
2442
2443 // Composite copy constructor
2444 // This is required for PackagedOperations
2446 const TrilinosPayload &second_op)
2447 : use_transpose(false)
2448 , // The combination of operators provides the exact
2449 // definition of the operation
2450 communicator(first_op.communicator)
2451 , domain_map(second_op.domain_map)
2452 , range_map(first_op.range_map)
2453 {}
2454
2455
2456
2459 {
2460 TrilinosPayload return_op(*this);
2461
2462 return_op.vmult = [](Range &tril_dst, const Range &tril_src) {
2463 tril_dst = tril_src;
2464 };
2465
2466 return_op.Tvmult = [](Range &tril_dst, const Range &tril_src) {
2467 tril_dst = tril_src;
2468 };
2469
2470 return_op.inv_vmult = [](Range &tril_dst, const Range &tril_src) {
2471 tril_dst = tril_src;
2472 };
2473
2474 return_op.inv_Tvmult = [](Range &tril_dst, const Range &tril_src) {
2475 tril_dst = tril_src;
2476 };
2477
2478 return return_op;
2479 }
2480
2481
2482
2485 {
2486 TrilinosPayload return_op(*this);
2487
2488 return_op.vmult = [](Range &tril_dst, const Domain &) {
2489 const int ierr = tril_dst.PutScalar(0.0);
2490
2491 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2492 };
2493
2494 return_op.Tvmult = [](Domain &tril_dst, const Range &) {
2495 const int ierr = tril_dst.PutScalar(0.0);
2496
2497 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2498 };
2499
2500 return_op.inv_vmult = [](Domain &tril_dst, const Range &) {
2501 AssertThrow(false,
2502 ExcMessage("Cannot compute inverse of null operator"));
2503
2504 const int ierr = tril_dst.PutScalar(0.0);
2505
2506 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2507 };
2508
2509 return_op.inv_Tvmult = [](Range &tril_dst, const Domain &) {
2510 AssertThrow(false,
2511 ExcMessage("Cannot compute inverse of null operator"));
2512
2513 const int ierr = tril_dst.PutScalar(0.0);
2514
2515 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2516 };
2517
2518 return return_op;
2519 }
2520
2521
2522
2525 {
2526 TrilinosPayload return_op(*this);
2527 return_op.transpose();
2528 return return_op;
2529 }
2530
2531
2532
2533 IndexSet
2535 {
2536 return IndexSet(domain_map);
2537 }
2538
2539
2540
2541 IndexSet
2543 {
2544 return IndexSet(range_map);
2545 }
2546
2547
2548
2549 MPI_Comm
2551 {
2552 return communicator.Comm();
2553 }
2554
2555
2556
2557 void
2559 {
2561 }
2562
2563
2564
2565 bool
2567 {
2568 return use_transpose;
2569 }
2570
2571
2572
2573 int
2575 {
2577 {
2579 std::swap(domain_map, range_map);
2580 std::swap(vmult, Tvmult);
2581 std::swap(inv_vmult, inv_Tvmult);
2582 }
2583 return 0;
2584 }
2585
2586
2587
2588 int
2590 {
2591 // The transposedness of the operations is taken care of
2592 // when we hit the transpose flag.
2593 vmult(Y, X);
2594 return 0;
2595 }
2596
2597
2598
2599 int
2601 {
2602 // The transposedness of the operations is taken care of
2603 // when we hit the transpose flag.
2604 inv_vmult(X, Y);
2605 return 0;
2606 }
2607
2608
2609
2610 const char *
2612 {
2613 return "TrilinosPayload";
2614 }
2615
2616
2617
2618 const Epetra_Comm &
2620 {
2621 return communicator;
2622 }
2623
2624
2625
2626 const Epetra_Map &
2628 {
2629 return domain_map;
2630 }
2631
2632
2633
2634 const Epetra_Map &
2636 {
2637 return range_map;
2638 }
2639
2640
2641
2642 bool
2644 {
2645 return false;
2646 }
2647
2648
2649
2650 double
2652 {
2654 return 0.0;
2655 }
2656
2657
2658
2661 const TrilinosPayload &second_op)
2662 {
2663 using Domain = typename TrilinosPayload::Domain;
2664 using Range = typename TrilinosPayload::Range;
2665 using Intermediate = typename TrilinosPayload::VectorType;
2666 using GVMVectorType = TrilinosWrappers::MPI::Vector;
2667
2669 second_op.locally_owned_domain_indices(),
2670 ExcMessage(
2671 "Operators are set to work on incompatible IndexSets."));
2673 second_op.locally_owned_range_indices(),
2674 ExcMessage(
2675 "Operators are set to work on incompatible IndexSets."));
2676
2677 TrilinosPayload return_op(first_op, second_op);
2678
2679 // Capture by copy so the payloads are always valid
2680 return_op.vmult = [first_op, second_op](Range & tril_dst,
2681 const Domain &tril_src) {
2682 // Duplicated from LinearOperator::operator*
2683 // TODO: Template the constructor on GrowingVectorMemory vector type?
2685 VectorMemory<GVMVectorType>::Pointer i(vector_memory);
2686
2687 // Initialize intermediate vector
2688 const Epetra_Map &first_op_init_map = first_op.OperatorDomainMap();
2689 i->reinit(IndexSet(first_op_init_map),
2690 first_op.get_mpi_communicator(),
2691 /*bool omit_zeroing_entries =*/true);
2692
2693 // Duplicated from TrilinosWrappers::SparseMatrix::vmult
2694 const size_type i_local_size = i->end() - i->begin();
2695 AssertDimension(i_local_size, first_op_init_map.NumMyPoints());
2696 const Epetra_Map &second_op_init_map = second_op.OperatorDomainMap();
2697 AssertDimension(i_local_size, second_op_init_map.NumMyPoints());
2698 (void)second_op_init_map;
2699 Intermediate tril_int(View,
2700 first_op_init_map,
2701 const_cast<TrilinosScalar *>(i->begin()),
2702 i_local_size,
2703 1);
2704
2705 // These operators may themselves be transposed or not, so we let them
2706 // decide what the intended outcome is
2707 second_op.Apply(tril_src, tril_int);
2708 first_op.Apply(tril_src, tril_dst);
2709 const int ierr = tril_dst.Update(1.0, tril_int, 1.0);
2710 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2711 };
2712
2713 return_op.Tvmult = [first_op, second_op](Domain & tril_dst,
2714 const Range &tril_src) {
2715 // Duplicated from LinearOperator::operator*
2716 // TODO: Template the constructor on GrowingVectorMemory vector type?
2718 VectorMemory<GVMVectorType>::Pointer i(vector_memory);
2719
2720 // These operators may themselves be transposed or not, so we let them
2721 // decide what the intended outcome is
2722 // We must first transpose the operators to get the right IndexSets
2723 // for the input, intermediate and result vectors
2724 const_cast<TrilinosPayload &>(first_op).transpose();
2725 const_cast<TrilinosPayload &>(second_op).transpose();
2726
2727 // Initialize intermediate vector
2728 const Epetra_Map &first_op_init_map = first_op.OperatorRangeMap();
2729 i->reinit(IndexSet(first_op_init_map),
2730 first_op.get_mpi_communicator(),
2731 /*bool omit_zeroing_entries =*/true);
2732
2733 // Duplicated from TrilinosWrappers::SparseMatrix::vmult
2734 const size_type i_local_size = i->end() - i->begin();
2735 AssertDimension(i_local_size, first_op_init_map.NumMyPoints());
2736 const Epetra_Map &second_op_init_map = second_op.OperatorRangeMap();
2737 AssertDimension(i_local_size, second_op_init_map.NumMyPoints());
2738 (void)second_op_init_map;
2739 Intermediate tril_int(View,
2740 first_op_init_map,
2741 const_cast<TrilinosScalar *>(i->begin()),
2742 i_local_size,
2743 1);
2744
2745 // These operators may themselves be transposed or not, so we let them
2746 // decide what the intended outcome is
2747 second_op.Apply(tril_src, tril_int);
2748 first_op.Apply(tril_src, tril_dst);
2749 const int ierr = tril_dst.Update(1.0, tril_int, 1.0);
2750 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2751
2752 // Reset transpose flag
2753 const_cast<TrilinosPayload &>(first_op).transpose();
2754 const_cast<TrilinosPayload &>(second_op).transpose();
2755 };
2756
2757 return_op.inv_vmult = [first_op, second_op](Domain & tril_dst,
2758 const Range &tril_src) {
2759 // Duplicated from LinearOperator::operator*
2760 // TODO: Template the constructor on GrowingVectorMemory vector type?
2762 VectorMemory<GVMVectorType>::Pointer i(vector_memory);
2763
2764 // Initialize intermediate vector
2765 const Epetra_Map &first_op_init_map = first_op.OperatorRangeMap();
2766 i->reinit(IndexSet(first_op_init_map),
2767 first_op.get_mpi_communicator(),
2768 /*bool omit_zeroing_entries =*/true);
2769
2770 // Duplicated from TrilinosWrappers::SparseMatrix::vmult
2771 const size_type i_local_size = i->end() - i->begin();
2772 AssertDimension(i_local_size, first_op_init_map.NumMyPoints());
2773 const Epetra_Map &second_op_init_map = second_op.OperatorRangeMap();
2774 AssertDimension(i_local_size, second_op_init_map.NumMyPoints());
2775 (void)second_op_init_map;
2776 Intermediate tril_int(View,
2777 first_op_init_map,
2778 const_cast<TrilinosScalar *>(i->begin()),
2779 i_local_size,
2780 1);
2781
2782 // These operators may themselves be transposed or not, so we let them
2783 // decide what the intended outcome is
2784 second_op.ApplyInverse(tril_src, tril_int);
2785 first_op.ApplyInverse(tril_src, tril_dst);
2786 const int ierr = tril_dst.Update(1.0, tril_int, 1.0);
2787 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2788 };
2789
2790 return_op.inv_Tvmult = [first_op, second_op](Range & tril_dst,
2791 const Domain &tril_src) {
2792 // Duplicated from LinearOperator::operator*
2793 // TODO: Template the constructor on GrowingVectorMemory vector type?
2795 VectorMemory<GVMVectorType>::Pointer i(vector_memory);
2796
2797 // These operators may themselves be transposed or not, so we let them
2798 // decide what the intended outcome is
2799 // We must first transpose the operators to get the right IndexSets
2800 // for the input, intermediate and result vectors
2801 const_cast<TrilinosPayload &>(first_op).transpose();
2802 const_cast<TrilinosPayload &>(second_op).transpose();
2803
2804 // Initialize intermediate vector
2805 const Epetra_Map &first_op_init_map = first_op.OperatorDomainMap();
2806 i->reinit(IndexSet(first_op_init_map),
2807 first_op.get_mpi_communicator(),
2808 /*bool omit_zeroing_entries =*/true);
2809
2810 // Duplicated from TrilinosWrappers::SparseMatrix::vmult
2811 const size_type i_local_size = i->end() - i->begin();
2812 AssertDimension(i_local_size, first_op_init_map.NumMyPoints());
2813 const Epetra_Map &second_op_init_map = second_op.OperatorDomainMap();
2814 AssertDimension(i_local_size, second_op_init_map.NumMyPoints());
2815 (void)second_op_init_map;
2816 Intermediate tril_int(View,
2817 first_op_init_map,
2818 const_cast<TrilinosScalar *>(i->begin()),
2819 i_local_size,
2820 1);
2821
2822 // These operators may themselves be transposed or not, so we let them
2823 // decide what the intended outcome is
2824 second_op.ApplyInverse(tril_src, tril_int);
2825 first_op.ApplyInverse(tril_src, tril_dst);
2826 const int ierr = tril_dst.Update(1.0, tril_int, 1.0);
2827 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2828
2829 // Reset transpose flag
2830 const_cast<TrilinosPayload &>(first_op).transpose();
2831 const_cast<TrilinosPayload &>(second_op).transpose();
2832 };
2833
2834 return return_op;
2835 }
2836
2837
2838
2839 TrilinosPayload
2841 const TrilinosPayload &second_op)
2842 {
2843 using Domain = typename TrilinosPayload::Domain;
2844 using Range = typename TrilinosPayload::Range;
2845 using Intermediate = typename TrilinosPayload::VectorType;
2846 using GVMVectorType = TrilinosWrappers::MPI::Vector;
2847
2849 second_op.locally_owned_range_indices(),
2850 ExcMessage(
2851 "Operators are set to work on incompatible IndexSets."));
2852
2853 TrilinosPayload return_op(first_op, second_op);
2854
2855 // Capture by copy so the payloads are always valid
2856 return_op.vmult = [first_op, second_op](Range & tril_dst,
2857 const Domain &tril_src) {
2858 // Duplicated from LinearOperator::operator*
2859 // TODO: Template the constructor on GrowingVectorMemory vector type?
2861 VectorMemory<GVMVectorType>::Pointer i(vector_memory);
2862
2863 // Initialize intermediate vector
2864 const Epetra_Map &first_op_init_map = first_op.OperatorDomainMap();
2865 i->reinit(IndexSet(first_op_init_map),
2866 first_op.get_mpi_communicator(),
2867 /*bool omit_zeroing_entries =*/true);
2868
2869 // Duplicated from TrilinosWrappers::SparseMatrix::vmult
2870 const size_type i_local_size = i->end() - i->begin();
2871 AssertDimension(i_local_size, first_op_init_map.NumMyPoints());
2872 const Epetra_Map &second_op_init_map = second_op.OperatorRangeMap();
2873 AssertDimension(i_local_size, second_op_init_map.NumMyPoints());
2874 (void)second_op_init_map;
2875 Intermediate tril_int(View,
2876 first_op_init_map,
2877 const_cast<TrilinosScalar *>(i->begin()),
2878 i_local_size,
2879 1);
2880
2881 // These operators may themselves be transposed or not, so we let them
2882 // decide what the intended outcome is
2883 second_op.Apply(tril_src, tril_int);
2884 first_op.Apply(tril_int, tril_dst);
2885 };
2886
2887 return_op.Tvmult = [first_op, second_op](Domain & tril_dst,
2888 const Range &tril_src) {
2889 // Duplicated from LinearOperator::operator*
2890 // TODO: Template the constructor on GrowingVectorMemory vector type?
2892 VectorMemory<GVMVectorType>::Pointer i(vector_memory);
2893
2894 // These operators may themselves be transposed or not, so we let them
2895 // decide what the intended outcome is
2896 // We must first transpose the operators to get the right IndexSets
2897 // for the input, intermediate and result vectors
2898 const_cast<TrilinosPayload &>(first_op).transpose();
2899 const_cast<TrilinosPayload &>(second_op).transpose();
2900
2901 // Initialize intermediate vector
2902 const Epetra_Map &first_op_init_map = first_op.OperatorRangeMap();
2903 i->reinit(IndexSet(first_op_init_map),
2904 first_op.get_mpi_communicator(),
2905 /*bool omit_zeroing_entries =*/true);
2906
2907 // Duplicated from TrilinosWrappers::SparseMatrix::vmult
2908 const size_type i_local_size = i->end() - i->begin();
2909 AssertDimension(i_local_size, first_op_init_map.NumMyPoints());
2910 const Epetra_Map &second_op_init_map = second_op.OperatorDomainMap();
2911 AssertDimension(i_local_size, second_op_init_map.NumMyPoints());
2912 (void)second_op_init_map;
2913 Intermediate tril_int(View,
2914 first_op_init_map,
2915 const_cast<TrilinosScalar *>(i->begin()),
2916 i_local_size,
2917 1);
2918
2919 // Apply the operators in the reverse order to vmult
2920 first_op.Apply(tril_src, tril_int);
2921 second_op.Apply(tril_int, tril_dst);
2922
2923 // Reset transpose flag
2924 const_cast<TrilinosPayload &>(first_op).transpose();
2925 const_cast<TrilinosPayload &>(second_op).transpose();
2926 };
2927
2928 return_op.inv_vmult = [first_op, second_op](Domain & tril_dst,
2929 const Range &tril_src) {
2930 // Duplicated from LinearOperator::operator*
2931 // TODO: Template the constructor on GrowingVectorMemory vector type?
2933 VectorMemory<GVMVectorType>::Pointer i(vector_memory);
2934
2935 // Initialize intermediate vector
2936 const Epetra_Map &first_op_init_map = first_op.OperatorRangeMap();
2937 i->reinit(IndexSet(first_op_init_map),
2938 first_op.get_mpi_communicator(),
2939 /*bool omit_zeroing_entries =*/true);
2940
2941 // Duplicated from TrilinosWrappers::SparseMatrix::vmult
2942 const size_type i_local_size = i->end() - i->begin();
2943 AssertDimension(i_local_size, first_op_init_map.NumMyPoints());
2944 const Epetra_Map &second_op_init_map = second_op.OperatorDomainMap();
2945 AssertDimension(i_local_size, second_op_init_map.NumMyPoints());
2946 (void)second_op_init_map;
2947 Intermediate tril_int(View,
2948 first_op_init_map,
2949 const_cast<TrilinosScalar *>(i->begin()),
2950 i_local_size,
2951 1);
2952
2953 // Apply the operators in the reverse order to vmult
2954 // and the same order as Tvmult
2955 first_op.ApplyInverse(tril_src, tril_int);
2956 second_op.ApplyInverse(tril_int, tril_dst);
2957 };
2958
2959 return_op.inv_Tvmult = [first_op, second_op](Range & tril_dst,
2960 const Domain &tril_src) {
2961 // Duplicated from LinearOperator::operator*
2962 // TODO: Template the constructor on GrowingVectorMemory vector type?
2964 VectorMemory<GVMVectorType>::Pointer i(vector_memory);
2965
2966 // These operators may themselves be transposed or not, so we let them
2967 // decide what the intended outcome is
2968 // We must first transpose the operators to get the right IndexSets
2969 // for the input, intermediate and result vectors
2970 const_cast<TrilinosPayload &>(first_op).transpose();
2971 const_cast<TrilinosPayload &>(second_op).transpose();
2972
2973 // Initialize intermediate vector
2974 const Epetra_Map &first_op_init_map = first_op.OperatorDomainMap();
2975 i->reinit(IndexSet(first_op_init_map),
2976 first_op.get_mpi_communicator(),
2977 /*bool omit_zeroing_entries =*/true);
2978
2979 // Duplicated from TrilinosWrappers::SparseMatrix::vmult
2980 const size_type i_local_size = i->end() - i->begin();
2981 AssertDimension(i_local_size, first_op_init_map.NumMyPoints());
2982 const Epetra_Map &second_op_init_map = second_op.OperatorRangeMap();
2983 AssertDimension(i_local_size, second_op_init_map.NumMyPoints());
2984 (void)second_op_init_map;
2985 Intermediate tril_int(View,
2986 first_op_init_map,
2987 const_cast<TrilinosScalar *>(i->begin()),
2988 i_local_size,
2989 1);
2990
2991 // These operators may themselves be transposed or not, so we let them
2992 // decide what the intended outcome is
2993 // Apply the operators in the reverse order to Tvmult
2994 // and the same order as vmult
2995 second_op.ApplyInverse(tril_src, tril_int);
2996 first_op.ApplyInverse(tril_int, tril_dst);
2997
2998 // Reset transpose flag
2999 const_cast<TrilinosPayload &>(first_op).transpose();
3000 const_cast<TrilinosPayload &>(second_op).transpose();
3001 };
3002
3003 return return_op;
3004 }
3005
3006 } // namespace LinearOperatorImplementation
3007 } /* namespace internal */
3008} /* namespace TrilinosWrappers */
3009
3010
3011
3012// explicit instantiations
3013# include "trilinos_sparse_matrix.inst"
3014
3015# ifndef DOXYGEN
3016// TODO: put these instantiations into generic file
3017namespace TrilinosWrappers
3018{
3019 template void
3020 SparseMatrix::reinit(const ::SparsityPattern &);
3021
3022 template void
3024
3025 template void
3027 const IndexSet &,
3028 const ::SparsityPattern &,
3029 const MPI_Comm &,
3030 const bool);
3031
3032 template void
3034 const IndexSet &,
3035 const DynamicSparsityPattern &,
3036 const MPI_Comm &,
3037 const bool);
3038
3039 template void
3040 SparseMatrix::vmult(MPI::Vector &, const MPI::Vector &) const;
3041
3042 template void
3044 const ::Vector<double> &) const;
3045
3046 template void
3049 const ::LinearAlgebra::distributed::Vector<double> &) const;
3050
3051# ifdef DEAL_II_TRILINOS_WITH_TPETRA
3052 template void
3055 const ::LinearAlgebra::TpetraWrappers::Vector<double> &) const;
3056
3057 template void
3060 const ::LinearAlgebra::TpetraWrappers::Vector<float> &) const;
3061# endif
3062
3063 template void
3066 const ::LinearAlgebra::EpetraWrappers::Vector &) const;
3067
3068 template void
3069 SparseMatrix::Tvmult(MPI::Vector &, const MPI::Vector &) const;
3070
3071 template void
3073 const ::Vector<double> &) const;
3074
3075 template void
3078 const ::LinearAlgebra::distributed::Vector<double> &) const;
3079
3080# ifdef DEAL_II_TRILINOS_WITH_TPETRA
3081 template void
3084 const ::LinearAlgebra::TpetraWrappers::Vector<double> &) const;
3085
3086 template void
3089 const ::LinearAlgebra::TpetraWrappers::Vector<float> &) const;
3090# endif
3091
3092 template void
3095 const ::LinearAlgebra::EpetraWrappers::Vector &) const;
3096
3097 template void
3098 SparseMatrix::vmult_add(MPI::Vector &, const MPI::Vector &) const;
3099
3100 template void
3102 const ::Vector<double> &) const;
3103
3104 template void
3107 const ::LinearAlgebra::distributed::Vector<double> &) const;
3108
3109# ifdef DEAL_II_TRILINOS_WITH_TPETRA
3110 template void
3113 const ::LinearAlgebra::TpetraWrappers::Vector<double> &) const;
3114
3115 template void
3118 const ::LinearAlgebra::TpetraWrappers::Vector<float> &) const;
3119# endif
3120
3121 template void
3124 const ::LinearAlgebra::EpetraWrappers::Vector &) const;
3125
3126 template void
3127 SparseMatrix::Tvmult_add(MPI::Vector &, const MPI::Vector &) const;
3128
3129 template void
3131 const ::Vector<double> &) const;
3132
3133 template void
3136 const ::LinearAlgebra::distributed::Vector<double> &) const;
3137
3138# ifdef DEAL_II_TRILINOS_WITH_TPETRA
3139 template void
3142 const ::LinearAlgebra::TpetraWrappers::Vector<double> &) const;
3143
3144 template void
3147 const ::LinearAlgebra::TpetraWrappers::Vector<float> &) const;
3148# endif
3149
3150 template void
3153 const ::LinearAlgebra::EpetraWrappers::Vector &) const;
3154} // namespace TrilinosWrappers
3155# endif // DOXYGEN
3156
3158
3159#endif // DEAL_II_WITH_TRILINOS
size_type size() const
Definition: index_set.h:1636
bool is_element(const size_type index) const
Definition: index_set.h:1767
Epetra_Map make_trilinos_map(const MPI_Comm &communicator=MPI_COMM_WORLD, const bool overlapping=false) const
Definition: index_set.cc:601
const Epetra_FEVector & trilinos_vector() const
const Tpetra::Vector< Number, int, types::global_dof_index > & trilinos_vector() const
virtual void reinit(const size_type m, const size_type n, const ArrayView< const unsigned int > &row_lengths) override
std::shared_ptr< std::vector< TrilinosScalar > > value_cache
std::shared_ptr< std::vector< size_type > > colnum_cache
void set(const size_type i, const size_type j, const TrilinosScalar value)
std::unique_ptr< Epetra_Map > column_space_map
std::enable_if< std::is_same< typenameVectorType::value_type, TrilinosScalar >::value >::type vmult(VectorType &dst, const VectorType &src) const
SparseMatrix & operator*=(const TrilinosScalar factor)
void mmult(SparseMatrix &C, const SparseMatrix &B, const MPI::Vector &V=MPI::Vector()) const
std::unique_ptr< Epetra_Export > nonlocal_matrix_exporter
std::unique_ptr< Epetra_FECrsMatrix > matrix
::types::global_dof_index size_type
const Epetra_CrsMatrix & trilinos_matrix() const
TrilinosScalar matrix_norm_square(const MPI::Vector &v) const
IndexSet locally_owned_range_indices() const
void print(std::ostream &out, const bool write_extended_trilinos_info=false) const
void Tmmult(SparseMatrix &C, const SparseMatrix &B, const MPI::Vector &V=MPI::Vector()) const
void clear_row(const size_type row, const TrilinosScalar new_diag_value=0)
void reinit(const SparsityPatternType &sparsity_pattern)
void vmult_add(VectorType &dst, const VectorType &src) const
SparseMatrix & operator=(const SparseMatrix &)=delete
void compress(::VectorOperation::values operation)
SparseMatrix & operator/=(const TrilinosScalar factor)
void Tvmult_add(VectorType &dst, const VectorType &src) const
void clear_rows(const std::vector< size_type > &rows, const TrilinosScalar new_diag_value=0)
TrilinosScalar el(const size_type i, const size_type j) const
IndexSet locally_owned_domain_indices() const
TrilinosScalar residual(MPI::Vector &dst, const MPI::Vector &x, const MPI::Vector &b) const
bool in_local_range(const size_type index) const
void copy_from(const SparseMatrix &source)
unsigned int row_length(const size_type row) const
std::uint64_t n_nonzero_elements() const
const Epetra_CrsGraph & trilinos_sparsity_pattern() const
TrilinosScalar diag_element(const size_type i) const
void add(const size_type i, const size_type j, const TrilinosScalar value)
unsigned int local_size() const
std::enable_if< std::is_same< typenameVectorType::value_type, TrilinosScalar >::value >::type Tvmult(VectorType &dst, const VectorType &src) const
TrilinosScalar operator()(const size_type i, const size_type j) const
TrilinosScalar matrix_scalar_product(const MPI::Vector &u, const MPI::Vector &v) const
std::pair< size_type, size_type > local_range() const
std::unique_ptr< Epetra_CrsMatrix > nonlocal_matrix
const Epetra_Map & domain_partitioner() const
const Epetra_FECrsGraph & trilinos_sparsity_pattern() const
std::function< void(VectorType &, const VectorType &)> inv_Tvmult
virtual int ApplyInverse(const VectorType &Y, VectorType &X) const override
virtual int Apply(const VectorType &X, VectorType &Y) const override
std::function< void(VectorType &, const VectorType &)> inv_vmult
Definition: vector.h:109
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:442
#define DEAL_II_DISABLE_EXTRA_DIAGNOSTICS
Definition: config.h:456
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:443
#define DEAL_II_ENABLE_EXTRA_DIAGNOSTICS
Definition: config.h:495
DerivativeForm< 1, spacedim, dim, Number > transpose(const DerivativeForm< 1, dim, spacedim, Number > &DF)
Point< 2 > second
Definition: grid_out.cc:4604
Point< 2 > first
Definition: grid_out.cc:4603
static ::ExceptionBase & ExcIO()
static ::ExceptionBase & ExcNotImplemented()
static ::ExceptionBase & ExcScalarAssignmentOnlyForZeroValue()
static ::ExceptionBase & ExcInvalidIndex(size_type arg1, size_type arg2)
#define Assert(cond, exc)
Definition: exceptions.h:1473
std::unique_ptr< Epetra_CrsGraph > nonlocal_graph
static ::ExceptionBase & ExcAccessToNonPresentElement(size_type arg1, size_type arg2)
#define AssertIsFinite(number)
Definition: exceptions.h:1758
static ::ExceptionBase & ExcAccessToNonlocalRow(std::size_t arg1)
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1667
#define AssertIndexRange(index, range)
Definition: exceptions.h:1732
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcDivideByZero()
static ::ExceptionBase & ExcSourceEqualsDestination()
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
static ::ExceptionBase & ExcNotQuadratic()
static ::ExceptionBase & ExcMatrixNotCompressed()
static ::ExceptionBase & ExcTrilinosError(int arg1)
static ::ExceptionBase & ExcAccessToNonLocalElement(size_type arg1, size_type arg2, size_type arg3, size_type arg4)
static ::ExceptionBase & ExcMessage(std::string arg1)
static ::ExceptionBase & ExcTrilinosError(int arg1)
#define AssertThrow(cond, exc)
Definition: exceptions.h:1583
const IndexSet & row_index_set() const
size_type row_length(const size_type row) const
size_type column_number(const size_type row, const size_type index) const
const Epetra_BlockMap & trilinos_partitioner() const
const Epetra_MultiVector & trilinos_vector() const
void reinit(const Vector &v, const bool omit_zeroing_entries=false, const bool allow_different_maps=false)
std::pair< size_type, size_type > local_range() const
IndexSet complete_index_set(const IndexSet::size_type N)
Definition: index_set.h:1014
@ matrix
Contents is actually a matrix.
types::global_dof_index size_type
Definition: cuda_kernels.h:45
TrilinosPayload operator+(const TrilinosPayload &first_op, const TrilinosPayload &second_op)
TrilinosPayload operator*(const TrilinosPayload &first_op, const TrilinosPayload &second_op)
void check_vector_map_equality(const Epetra_CrsMatrix &, const VectorType &, const VectorType &)
VectorType::value_type * end(VectorType &V)
VectorType::value_type * begin(VectorType &V)
void perform_mmult(const SparseMatrix &inputleft, const SparseMatrix &inputright, SparseMatrix &result, const MPI::Vector &V, const bool transpose_left)
TrilinosWrappers::types::int_type min_my_gid(const Epetra_BlockMap &map)
TrilinosWrappers::types::int64_type n_global_elements(const Epetra_BlockMap &map)
TrilinosWrappers::types::int_type global_column_index(const Epetra_CrsMatrix &matrix, const ::types::global_dof_index i)
TrilinosWrappers::types::int_type max_my_gid(const Epetra_BlockMap &map)
TrilinosWrappers::types::int_type global_row_index(const Epetra_CrsMatrix &matrix, const ::types::global_dof_index i)
TrilinosWrappers::types::int_type n_global_cols(const Epetra_CrsGraph &graph)
const Epetra_Comm & comm_self()
Definition: utilities.cc:1110
Iterator lower_bound(Iterator first, Iterator last, const T &val)
Definition: utilities.h:1153
Definition: types.h:32
unsigned int global_dof_index
Definition: types.h:76
double TrilinosScalar
Definition: types.h:163