Reference documentation for deal.II version 9.2.0
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Classes | Public Types | Public Member Functions | Static Public Member Functions | List of all members

#include <deal.II/lac/trilinos_sparse_matrix.h>

Inheritance diagram for TrilinosWrappers::SparseMatrix:
[legend]

Classes

struct  Traits
 

Public Types

using size_type = ::types::global_dof_index
 
using iterator = SparseMatrixIterators::Iterator< false >
 
using const_iterator = SparseMatrixIterators::Iterator< true >
 
using value_type = TrilinosScalar
 

Public Member Functions

template<>
void set (const size_type row, const size_type n_cols, const size_type *col_indices, const TrilinosScalar *values, const bool elide_zero_values)
 
Constructors and initialization.
 SparseMatrix ()
 
 SparseMatrix (const size_type m, const size_type n, const unsigned int n_max_entries_per_row)
 
 SparseMatrix (const size_type m, const size_type n, const std::vector< unsigned int > &n_entries_per_row)
 
 SparseMatrix (const SparsityPattern &InputSparsityPattern)
 
 SparseMatrix (SparseMatrix &&other) noexcept
 
 SparseMatrix (const SparseMatrix &)=delete
 
SparseMatrixoperator= (const SparseMatrix &)=delete
 
virtual ~SparseMatrix () override=default
 
template<typename SparsityPatternType >
void reinit (const SparsityPatternType &sparsity_pattern)
 
void reinit (const SparsityPattern &sparsity_pattern)
 
void reinit (const SparseMatrix &sparse_matrix)
 
template<typename number >
void reinit (const ::SparseMatrix< number > &dealii_sparse_matrix, const double drop_tolerance=1e-13, const bool copy_values=true, const ::SparsityPattern *use_this_sparsity=nullptr)
 
void reinit (const Epetra_CrsMatrix &input_matrix, const bool copy_values=true)
 
Constructors and initialization using an IndexSet description
 SparseMatrix (const IndexSet &parallel_partitioning, const MPI_Comm &communicator=MPI_COMM_WORLD, const unsigned int n_max_entries_per_row=0)
 
 SparseMatrix (const IndexSet &parallel_partitioning, const MPI_Comm &communicator, const std::vector< unsigned int > &n_entries_per_row)
 
 SparseMatrix (const IndexSet &row_parallel_partitioning, const IndexSet &col_parallel_partitioning, const MPI_Comm &communicator=MPI_COMM_WORLD, const size_type n_max_entries_per_row=0)
 
 SparseMatrix (const IndexSet &row_parallel_partitioning, const IndexSet &col_parallel_partitioning, const MPI_Comm &communicator, const std::vector< unsigned int > &n_entries_per_row)
 
template<typename SparsityPatternType >
void reinit (const IndexSet &parallel_partitioning, const SparsityPatternType &sparsity_pattern, const MPI_Comm &communicator=MPI_COMM_WORLD, const bool exchange_data=false)
 
template<typename SparsityPatternType >
std::enable_if< !std::is_same< SparsityPatternType, ::SparseMatrix< double > >::value >::type reinit (const IndexSet &row_parallel_partitioning, const IndexSet &col_parallel_partitioning, const SparsityPatternType &sparsity_pattern, const MPI_Comm &communicator=MPI_COMM_WORLD, const bool exchange_data=false)
 
template<typename number >
void reinit (const IndexSet &parallel_partitioning, const ::SparseMatrix< number > &dealii_sparse_matrix, const MPI_Comm &communicator=MPI_COMM_WORLD, const double drop_tolerance=1e-13, const bool copy_values=true, const ::SparsityPattern *use_this_sparsity=nullptr)
 
template<typename number >
void reinit (const IndexSet &row_parallel_partitioning, const IndexSet &col_parallel_partitioning, const ::SparseMatrix< number > &dealii_sparse_matrix, const MPI_Comm &communicator=MPI_COMM_WORLD, const double drop_tolerance=1e-13, const bool copy_values=true, const ::SparsityPattern *use_this_sparsity=nullptr)
 
Information on the matrix
size_type m () const
 
size_type n () const
 
unsigned int local_size () const
 
std::pair< size_type, size_typelocal_range () const
 
bool in_local_range (const size_type index) const
 
size_type n_nonzero_elements () const
 
unsigned int row_length (const size_type row) const
 
bool is_compressed () const
 
size_type memory_consumption () const
 
MPI_Comm get_mpi_communicator () const
 
Modifying entries
SparseMatrixoperator= (const double d)
 
void clear ()
 
void compress (::VectorOperation::values operation)
 
void set (const size_type i, const size_type j, const TrilinosScalar value)
 
void set (const std::vector< size_type > &indices, const FullMatrix< TrilinosScalar > &full_matrix, const bool elide_zero_values=false)
 
void set (const std::vector< size_type > &row_indices, const std::vector< size_type > &col_indices, const FullMatrix< TrilinosScalar > &full_matrix, const bool elide_zero_values=false)
 
void set (const size_type row, const std::vector< size_type > &col_indices, const std::vector< TrilinosScalar > &values, const bool elide_zero_values=false)
 
template<typename Number >
void set (const size_type row, const size_type n_cols, const size_type *col_indices, const Number *values, const bool elide_zero_values=false)
 
void add (const size_type i, const size_type j, const TrilinosScalar value)
 
void add (const std::vector< size_type > &indices, const FullMatrix< TrilinosScalar > &full_matrix, const bool elide_zero_values=true)
 
void add (const std::vector< size_type > &row_indices, const std::vector< size_type > &col_indices, const FullMatrix< TrilinosScalar > &full_matrix, const bool elide_zero_values=true)
 
void add (const size_type row, const std::vector< size_type > &col_indices, const std::vector< TrilinosScalar > &values, const bool elide_zero_values=true)
 
void add (const size_type row, const size_type n_cols, const size_type *col_indices, const TrilinosScalar *values, const bool elide_zero_values=true, const bool col_indices_are_sorted=false)
 
SparseMatrixoperator*= (const TrilinosScalar factor)
 
SparseMatrixoperator/= (const TrilinosScalar factor)
 
void copy_from (const SparseMatrix &source)
 
void add (const TrilinosScalar factor, const SparseMatrix &matrix)
 
void clear_row (const size_type row, const TrilinosScalar new_diag_value=0)
 
void clear_rows (const std::vector< size_type > &rows, const TrilinosScalar new_diag_value=0)
 
void transpose ()
 
Entry Access
TrilinosScalar operator() (const size_type i, const size_type j) const
 
TrilinosScalar el (const size_type i, const size_type j) const
 
TrilinosScalar diag_element (const size_type i) const
 
Multiplications
template<typename VectorType >
std::enable_if< std::is_same< typename VectorType::value_type, TrilinosScalar >::value >::type vmult (VectorType &dst, const VectorType &src) const
 
template<typename VectorType >
std::enable_if<!std::is_same< typename VectorType::value_type, TrilinosScalar >::value >::type vmult (VectorType &dst, const VectorType &src) const
 
template<typename VectorType >
std::enable_if< std::is_same< typename VectorType::value_type, TrilinosScalar >::value >::type Tvmult (VectorType &dst, const VectorType &src) const
 
template<typename VectorType >
std::enable_if<!std::is_same< typename VectorType::value_type, TrilinosScalar >::value >::type Tvmult (VectorType &dst, const VectorType &src) const
 
template<typename VectorType >
void vmult_add (VectorType &dst, const VectorType &src) const
 
template<typename VectorType >
void Tvmult_add (VectorType &dst, const VectorType &src) const
 
TrilinosScalar matrix_norm_square (const MPI::Vector &v) const
 
TrilinosScalar matrix_scalar_product (const MPI::Vector &u, const MPI::Vector &v) const
 
TrilinosScalar residual (MPI::Vector &dst, const MPI::Vector &x, const MPI::Vector &b) const
 
void mmult (SparseMatrix &C, const SparseMatrix &B, const MPI::Vector &V=MPI::Vector()) const
 
void Tmmult (SparseMatrix &C, const SparseMatrix &B, const MPI::Vector &V=MPI::Vector()) const
 
Matrix norms
TrilinosScalar l1_norm () const
 
TrilinosScalar linfty_norm () const
 
TrilinosScalar frobenius_norm () const
 
Access to underlying Trilinos data
const Epetra_CrsMatrix & trilinos_matrix () const
 
const Epetra_CrsGraph & trilinos_sparsity_pattern () const
 
Partitioners
IndexSet locally_owned_domain_indices () const
 
IndexSet locally_owned_range_indices () const
 
Iterators
const_iterator begin () const
 
iterator begin ()
 
const_iterator end () const
 
iterator end ()
 
const_iterator begin (const size_type r) const
 
iterator begin (const size_type r)
 
const_iterator end (const size_type r) const
 
iterator end (const size_type r)
 
- Public Member Functions inherited from Subscriptor
 Subscriptor ()
 
 Subscriptor (const Subscriptor &)
 
 Subscriptor (Subscriptor &&) noexcept
 
virtual ~Subscriptor ()
 
Subscriptoroperator= (const Subscriptor &)
 
Subscriptoroperator= (Subscriptor &&) noexcept
 
void subscribe (std::atomic< bool > *const validity, const std::string &identifier="") const
 
void unsubscribe (std::atomic< bool > *const validity, const std::string &identifier="") const
 
unsigned int n_subscriptions () const
 
template<typename StreamType >
void list_subscribers (StreamType &stream) const
 
void list_subscribers () const
 
template<class Archive >
void serialize (Archive &ar, const unsigned int version)
 

Static Public Member Functions

static ::ExceptionBaseExcAccessToNonlocalRow (std::size_t arg1)
 
- Static Public Member Functions inherited from Subscriptor
static ::ExceptionBaseExcInUse (int arg1, std::string arg2, std::string arg3)
 
static ::ExceptionBaseExcNoSubscriber (std::string arg1, std::string arg2)
 

Input/Output

std::unique_ptr< Epetra_Map > column_space_map
 
std::unique_ptr< Epetra_FECrsMatrix > matrix
 
std::unique_ptr< Epetra_CrsMatrix > nonlocal_matrix
 
std::unique_ptr< Epetra_Export > nonlocal_matrix_exporter
 
Epetra_CombineMode last_action
 
bool compressed
 
class BlockMatrixBase< SparseMatrix >
 
static ::ExceptionBaseExcTrilinosError (int arg1)
 
static ::ExceptionBaseExcInvalidIndex (size_type arg1, size_type arg2)
 
static ::ExceptionBaseExcSourceEqualsDestination ()
 
static ::ExceptionBaseExcMatrixNotCompressed ()
 
static ::ExceptionBaseExcAccessToNonLocalElement (size_type arg1, size_type arg2, size_type arg3, size_type arg4)
 
static ::ExceptionBaseExcAccessToNonPresentElement (size_type arg1, size_type arg2)
 
void write_ascii ()
 
void print (std::ostream &out, const bool write_extended_trilinos_info=false) const
 
void prepare_add ()
 
void prepare_set ()
 

Detailed Description

This class implements a wrapper to use the Trilinos distributed sparse matrix class Epetra_FECrsMatrix. This is precisely the kind of matrix we deal with all the time - we most likely get it from some assembly process, where also entries not locally owned might need to be written and hence need to be forwarded to the owner process. This class is designed to be used in a distributed memory architecture with an MPI compiler on the bottom, but works equally well also for serial processes. The only requirement for this class to work is that Trilinos has been installed with the same compiler as is used for generating deal.II.

The interface of this class is modeled after the existing SparseMatrix class in deal.II. It has almost the same member functions, and is often exchangeable. However, since Trilinos only supports a single scalar type (double), it is not templated, and only works with doubles.

Note that Trilinos only guarantees that operations do what you expect if the functions GlobalAssemble has been called after matrix assembly. Therefore, you need to call SparseMatrix::compress() before you actually use the matrix. This also calls FillComplete that compresses the storage format for sparse matrices by discarding unused elements. Trilinos allows to continue with assembling the matrix after calls to these functions, though.

Thread safety of Trilinos matrices

When writing into Trilinos matrices from several threads in shared memory, several things must be kept in mind as there is no built-in locks in this class to prevent data races. Simultaneous access to the same matrix row at the same time can lead to data races and must be explicitly avoided by the user. However, it is possible to access different rows of the matrix from several threads simultaneously under the following three conditions:

Note that all other reinit methods and constructors of TrilinosWrappers::SparsityPattern will result in a matrix that needs to allocate off-processor entries on demand, which breaks thread-safety. Of course, using the respective reinit method for the block Trilinos sparsity pattern and block matrix also results in thread-safety.

Author
Martin Kronbichler, Wolfgang Bangerth, 2008, 2009

Definition at line 515 of file trilinos_sparse_matrix.h.

Member Typedef Documentation

◆ size_type

Declare the type for container size.

Definition at line 521 of file trilinos_sparse_matrix.h.

◆ iterator

Declare an alias for the iterator class.

Definition at line 552 of file trilinos_sparse_matrix.h.

◆ const_iterator

Declare an alias for the const iterator class.

Definition at line 557 of file trilinos_sparse_matrix.h.

◆ value_type

Declare an alias in analogy to all the other container classes.

Definition at line 562 of file trilinos_sparse_matrix.h.

Constructor & Destructor Documentation

◆ SparseMatrix() [1/10]

SparseMatrix< number >::SparseMatrix ( )

Default constructor. Generates an empty (zero-size) matrix.

Definition at line 208 of file trilinos_sparse_matrix.cc.

◆ SparseMatrix() [2/10]

SparseMatrix< number >::SparseMatrix ( const size_type  m,
const size_type  n,
const unsigned int  n_max_entries_per_row 
)

Generate a matrix that is completely stored locally, having m rows and n columns.

The number of columns entries per row is specified as the maximum number of entries argument.

Definition at line 220 of file trilinos_sparse_matrix.cc.

◆ SparseMatrix() [3/10]

SparseMatrix< number >::SparseMatrix ( const size_type  m,
const size_type  n,
const std::vector< unsigned int > &  n_entries_per_row 
)

Generate a matrix that is completely stored locally, having m rows and n columns.

The vector n_entries_per_row specifies the number of entries in each row.

Definition at line 251 of file trilinos_sparse_matrix.cc.

◆ SparseMatrix() [4/10]

SparseMatrix< number >::SparseMatrix ( const SparsityPattern InputSparsityPattern)

Generate a matrix from a Trilinos sparsity pattern object.

Definition at line 340 of file trilinos_sparse_matrix.cc.

◆ SparseMatrix() [5/10]

SparseMatrix< number >::SparseMatrix ( SparseMatrix &&  other)
noexcept

Move constructor. Create a new sparse matrix by stealing the internal data.

Definition at line 357 of file trilinos_sparse_matrix.cc.

◆ SparseMatrix() [6/10]

TrilinosWrappers::SparseMatrix::SparseMatrix ( const SparseMatrix )
delete

Copy constructor is deleted.

◆ ~SparseMatrix()

virtual TrilinosWrappers::SparseMatrix::~SparseMatrix ( )
overridevirtualdefault

Destructor. Made virtual so that one can use pointers to this class.

◆ SparseMatrix() [7/10]

SparseMatrix< number >::SparseMatrix ( const IndexSet parallel_partitioning,
const MPI_Comm communicator = MPI_COMM_WORLD,
const unsigned int  n_max_entries_per_row = 0 
)

Constructor using an IndexSet and an MPI communicator to describe the parallel partitioning. The parameter n_max_entries_per_row sets the number of nonzero entries in each row that will be allocated. Note that this number does not need to be exact, and it is even allowed that the actual matrix structure has more nonzero entries than specified in the constructor. However it is still advantageous to provide good estimates here since this will considerably increase the performance of the matrix setup. However, there is no effect in the performance of matrix- vector products, since Trilinos reorganizes the matrix memory prior to use (in the compress() step).

Definition at line 273 of file trilinos_sparse_matrix.cc.

◆ SparseMatrix() [8/10]

SparseMatrix< number >::SparseMatrix ( const IndexSet parallel_partitioning,
const MPI_Comm communicator,
const std::vector< unsigned int > &  n_entries_per_row 
)

Same as before, but now set the number of nonzeros in each matrix row separately. Since we know the number of elements in the matrix exactly in this case, we can already allocate the right amount of memory, which makes the creation process including the insertion of nonzero elements by the respective SparseMatrix::reinit call considerably faster.

Definition at line 288 of file trilinos_sparse_matrix.cc.

◆ SparseMatrix() [9/10]

SparseMatrix< number >::SparseMatrix ( const IndexSet row_parallel_partitioning,
const IndexSet col_parallel_partitioning,
const MPI_Comm communicator = MPI_COMM_WORLD,
const size_type  n_max_entries_per_row = 0 
)

This constructor is similar to the one above, but it now takes two different IndexSet partitions for row and columns. This interface is meant to be used for generating rectangular matrices, where the first index set describes the parallel partitioning of the degrees of freedom associated with the matrix rows and the second one the partitioning of the matrix columns. The second index set specifies the partitioning of the vectors this matrix is to be multiplied with, not the distribution of the elements that actually appear in the matrix.

The parameter n_max_entries_per_row defines how much memory will be allocated for each row. This number does not need to be accurate, as the structure is reorganized in the compress() call.

Definition at line 305 of file trilinos_sparse_matrix.cc.

◆ SparseMatrix() [10/10]

SparseMatrix< number >::SparseMatrix ( const IndexSet row_parallel_partitioning,
const IndexSet col_parallel_partitioning,
const MPI_Comm communicator,
const std::vector< unsigned int > &  n_entries_per_row 
)

This constructor is similar to the one above, but it now takes two different Epetra maps for rows and columns. This interface is meant to be used for generating rectangular matrices, where one map specifies the parallel distribution of degrees of freedom associated with matrix rows and the second one specifies the parallel distribution the dofs associated with columns in the matrix. The second map also provides information for the internal arrangement in matrix vector products (i.e., the distribution of vector this matrix is to be multiplied with), but is not used for the distribution of the columns – rather, all column elements of a row are stored on the same processor in any case. The vector n_entries_per_row specifies the number of entries in each row of the newly generated matrix.

Definition at line 322 of file trilinos_sparse_matrix.cc.

Member Function Documentation

◆ operator=() [1/2]

SparseMatrix& TrilinosWrappers::SparseMatrix::operator= ( const SparseMatrix )
delete

operator= is deleted.

◆ reinit() [1/9]

template<typename SparsityPatternType >
void SparseMatrix< SparsityPatternType >::reinit ( const SparsityPatternType &  sparsity_pattern)

This function initializes the Trilinos matrix with a deal.II sparsity pattern, i.e. it makes the Trilinos Epetra matrix know the position of nonzero entries according to the sparsity pattern. This function is meant for use in serial programs, where there is no need to specify how the matrix is going to be distributed among different processors. This function works in parallel, too, but it is recommended to manually specify the parallel partitioning of the matrix using an Epetra_Map. When run in parallel, it is currently necessary that each processor holds the sparsity_pattern structure because each processor sets its rows.

This is a collective operation that needs to be called on all processors in order to avoid a dead lock.

Definition at line 781 of file trilinos_sparse_matrix.cc.

◆ reinit() [2/9]

void SparseMatrix< number >::reinit ( const SparsityPattern sparsity_pattern)

This function reinitializes the Trilinos sparse matrix from a (possibly distributed) Trilinos sparsity pattern.

This is a collective operation that needs to be called on all processors in order to avoid a dead lock.

If you want to write to the matrix from several threads and use MPI, you need to use this reinit method with a sparsity pattern that has been created with explicitly stating writeable rows. In all other cases, you cannot mix MPI with multithreaded writing into the matrix.

Definition at line 825 of file trilinos_sparse_matrix.cc.

◆ reinit() [3/9]

void SparseMatrix< number >::reinit ( const SparseMatrix sparse_matrix)

This function copies the layout of sparse_matrix to the calling matrix. The values are not copied, but you can use copy_from() for this.

This is a collective operation that needs to be called on all processors in order to avoid a dead lock.

Definition at line 849 of file trilinos_sparse_matrix.cc.

◆ reinit() [4/9]

template<typename number >
void SparseMatrix< number >::reinit ( const ::SparseMatrix< number > &  dealii_sparse_matrix,
const double  drop_tolerance = 1e-13,
const bool  copy_values = true,
const ::SparsityPattern use_this_sparsity = nullptr 
)

This function initializes the Trilinos matrix using the deal.II sparse matrix and the entries stored therein. It uses a threshold to copy only elements with modulus larger than the threshold (so zeros in the deal.II matrix can be filtered away).

The optional parameter copy_values decides whether only the sparsity structure of the input matrix should be used or the matrix entries should be copied, too.

This is a collective operation that needs to be called on all processors in order to avoid a deadlock.

Note
If a different sparsity pattern is given in the last argument (i.e., one that differs from the one used in the sparse matrix given in the first argument), then the resulting Trilinos matrix will have the sparsity pattern so given. This of course also means that all entries in the given matrix that are not part of this separate sparsity pattern will in fact be dropped.

Definition at line 987 of file trilinos_sparse_matrix.cc.

◆ reinit() [5/9]

void SparseMatrix< number >::reinit ( const Epetra_CrsMatrix &  input_matrix,
const bool  copy_values = true 
)

This reinit function takes as input a Trilinos Epetra_CrsMatrix and copies its sparsity pattern. If so requested, even the content (values) will be copied.

Definition at line 1005 of file trilinos_sparse_matrix.cc.

◆ reinit() [6/9]

template<typename SparsityPatternType >
void TrilinosWrappers::SparseMatrix::reinit ( const IndexSet parallel_partitioning,
const SparsityPatternType &  sparsity_pattern,
const MPI_Comm communicator = MPI_COMM_WORLD,
const bool  exchange_data = false 
)

This function is initializes the Trilinos Epetra matrix according to the specified sparsity_pattern, and also reassigns the matrix rows to different processes according to a user-supplied index set and parallel communicator. In programs following the style of the tutorial programs, this function (and the respective call for a rectangular matrix) are the natural way to initialize the matrix size, its distribution among the MPI processes (if run in parallel) as well as the location of non-zero elements. Trilinos stores the sparsity pattern internally, so it won't be needed any more after this call, in contrast to the deal.II own object. The optional argument exchange_data can be used for reinitialization with a sparsity pattern that is not fully constructed. This feature is only implemented for input sparsity patterns of type DynamicSparsityPattern. If the flag is not set, each processor just sets the elements in the sparsity pattern that belong to its rows.

This is a collective operation that needs to be called on all processors in order to avoid a dead lock.

◆ reinit() [7/9]

template<typename SparsityPatternType >
std::enable_if< !std::is_same< SparsityPatternType,::SparseMatrix< double > >::value >::type SparseMatrix< SparsityPatternType >::reinit ( const IndexSet row_parallel_partitioning,
const IndexSet col_parallel_partitioning,
const SparsityPatternType &  sparsity_pattern,
const MPI_Comm communicator = MPI_COMM_WORLD,
const bool  exchange_data = false 
)
inline

This function is similar to the other initialization function above, but now also reassigns the matrix rows and columns according to two user-supplied index sets. To be used for rectangular matrices. The optional argument exchange_data can be used for reinitialization with a sparsity pattern that is not fully constructed. This feature is only implemented for input sparsity patterns of type DynamicSparsityPattern.

This is a collective operation that needs to be called on all processors in order to avoid a dead lock.

Definition at line 800 of file trilinos_sparse_matrix.cc.

◆ reinit() [8/9]

template<typename number >
void TrilinosWrappers::SparseMatrix::reinit ( const IndexSet parallel_partitioning,
const ::SparseMatrix< number > &  dealii_sparse_matrix,
const MPI_Comm communicator = MPI_COMM_WORLD,
const double  drop_tolerance = 1e-13,
const bool  copy_values = true,
const ::SparsityPattern use_this_sparsity = nullptr 
)

This function initializes the Trilinos matrix using the deal.II sparse matrix and the entries stored therein. It uses a threshold to copy only elements with modulus larger than the threshold (so zeros in the deal.II matrix can be filtered away). In contrast to the other reinit function with deal.II sparse matrix argument, this function takes a parallel partitioning specified by the user instead of internally generating it.

The optional parameter copy_values decides whether only the sparsity structure of the input matrix should be used or the matrix entries should be copied, too.

This is a collective operation that needs to be called on all processors in order to avoid a dead lock.

◆ reinit() [9/9]

template<typename number >
void SparseMatrix< number >::reinit ( const IndexSet row_parallel_partitioning,
const IndexSet col_parallel_partitioning,
const ::SparseMatrix< number > &  dealii_sparse_matrix,
const MPI_Comm communicator = MPI_COMM_WORLD,
const double  drop_tolerance = 1e-13,
const bool  copy_values = true,
const ::SparsityPattern use_this_sparsity = nullptr 
)
inline

This function is similar to the other initialization function with deal.II sparse matrix input above, but now takes index sets for both the rows and the columns of the matrix. Chosen for rectangular matrices.

The optional parameter copy_values decides whether only the sparsity structure of the input matrix should be used or the matrix entries should be copied, too.

This is a collective operation that needs to be called on all processors in order to avoid a dead lock.

Definition at line 875 of file trilinos_sparse_matrix.cc.

◆ m()

size_type TrilinosWrappers::SparseMatrix::m ( ) const

Return the number of rows in this matrix.

◆ n()

size_type TrilinosWrappers::SparseMatrix::n ( ) const

Return the number of columns in this matrix.

◆ local_size()

unsigned int TrilinosWrappers::SparseMatrix::local_size ( ) const

Return the local dimension of the matrix, i.e. the number of rows stored on the present MPI process. For sequential matrices, this number is the same as m(), but for parallel matrices it may be smaller.

To figure out which elements exactly are stored locally, use local_range().

◆ local_range()

std::pair<size_type, size_type> TrilinosWrappers::SparseMatrix::local_range ( ) const

Return a pair of indices indicating which rows of this matrix are stored locally. The first number is the index of the first row stored, the second the index of the one past the last one that is stored locally. If this is a sequential matrix, then the result will be the pair (0,m()), otherwise it will be a pair (i,i+n), where n=local_size().

◆ in_local_range()

bool TrilinosWrappers::SparseMatrix::in_local_range ( const size_type  index) const

Return whether index is in the local range or not, see also local_range().

◆ n_nonzero_elements()

size_type TrilinosWrappers::SparseMatrix::n_nonzero_elements ( ) const

Return the total number of nonzero elements of this matrix (summed over all MPI processes).

◆ row_length()

unsigned int SparseMatrix< number >::row_length ( const size_type  row) const

Number of entries in a specific row.

Definition at line 1340 of file trilinos_sparse_matrix.cc.

◆ is_compressed()

bool TrilinosWrappers::SparseMatrix::is_compressed ( ) const

Return the state of the matrix, i.e., whether compress() needs to be called after an operation requiring data exchange. A call to compress() is also needed when the method set() has been called (even when working in serial).

◆ memory_consumption()

SparseMatrix::size_type SparseMatrix< number >::memory_consumption ( ) const

Determine an estimate for the memory consumption (in bytes) of this object. Note that only the memory reserved on the current processor is returned in case this is called in an MPI-based program.

Definition at line 2288 of file trilinos_sparse_matrix.cc.

◆ get_mpi_communicator()

MPI_Comm SparseMatrix< number >::get_mpi_communicator ( ) const

Return the MPI communicator object in use with this matrix.

Definition at line 2301 of file trilinos_sparse_matrix.cc.

◆ operator=() [2/2]

SparseMatrix & SparseMatrix< number >::operator= ( const double  d)

This operator assigns a scalar to a matrix. Since this does usually not make much sense (should we set all matrix entries to this value? Only the nonzero entries of the sparsity pattern?), this operation is only allowed if the actual value to be assigned is zero. This operator only exists to allow for the obvious notation matrix=0, which sets all elements of the matrix to zero, but keeps the sparsity pattern previously used.

Definition at line 1766 of file trilinos_sparse_matrix.cc.

◆ clear()

void SparseMatrix< number >::clear ( )

Release all memory and return to a state just like after having called the default constructor.

This is a collective operation that needs to be called on all processors in order to avoid a dead lock.

Definition at line 1099 of file trilinos_sparse_matrix.cc.

◆ compress()

void SparseMatrix< number >::compress ( ::VectorOperation::values  operation)

This command does two things:

  • If the matrix was initialized without a sparsity pattern, elements have been added manually using the set() command. When this process is completed, a call to compress() reorganizes the internal data structures (sparsity pattern) so that a fast access to data is possible in matrix-vector products.
  • If the matrix structure has already been fixed (either by initialization with a sparsity pattern or by calling compress() during the setup phase), this command does the parallel exchange of data. This is necessary when we perform assembly on more than one (MPI) process, because then some non-local row data will accumulate on nodes that belong to the current's processor element, but are actually held by another. This command is usually called after all elements have been traversed.

In both cases, this function compresses the data structures and allows the resulting matrix to be used in all other operations like matrix- vector products. This is a collective operation, i.e., it needs to be run on all processors when used in parallel.

See Compressing distributed objects for more information.

Definition at line 1040 of file trilinos_sparse_matrix.cc.

◆ set() [1/6]

void TrilinosWrappers::SparseMatrix::set ( const size_type  i,
const size_type  j,
const TrilinosScalar  value 
)

Set the element (i,j) to value.

This function is able to insert new elements into the matrix as long as compress() has not been called, so the sparsity pattern will be extended. When compress() is called for the first time (or in case the matrix is initialized from a sparsity pattern), no new elements can be added and an insertion of elements at positions which have not been initialized will throw an exception.

For the case that the matrix is constructed without a sparsity pattern and new matrix entries are added on demand, please note the following behavior imposed by the underlying Epetra_FECrsMatrix data structure: If the same matrix entry is inserted more than once, the matrix entries will be added upon calling compress() (since Epetra does not track values to the same entry before the final compress() is called), even if VectorOperation::insert is specified as argument to compress(). In the case you cannot make sure that matrix entries are only set once, initialize the matrix with a sparsity pattern to fix the matrix structure before inserting elements.

◆ set() [2/6]

void TrilinosWrappers::SparseMatrix::set ( const std::vector< size_type > &  indices,
const FullMatrix< TrilinosScalar > &  full_matrix,
const bool  elide_zero_values = false 
)

Set all elements given in a FullMatrix<double> into the sparse matrix locations given by indices. In other words, this function writes the elements in full_matrix into the calling matrix, using the local-to-global indexing specified by indices for both the rows and the columns of the matrix. This function assumes a quadratic sparse matrix and a quadratic full_matrix, the usual situation in FE calculations.

This function is able to insert new elements into the matrix as long as compress() has not been called, so the sparsity pattern will be extended. After compress() has been called for the first time or the matrix has been initialized from a sparsity pattern, extending the sparsity pattern is no longer possible and an insertion of elements at positions which have not been initialized will throw an exception.

The optional parameter elide_zero_values can be used to specify whether zero values should be inserted anyway or they should be filtered away. The default value is false, i.e., even zero values are inserted/replaced.

For the case that the matrix is constructed without a sparsity pattern and new matrix entries are added on demand, please note the following behavior imposed by the underlying Epetra_FECrsMatrix data structure: If the same matrix entry is inserted more than once, the matrix entries will be added upon calling compress() (since Epetra does not track values to the same entry before the final compress() is called), even if VectorOperation::insert is specified as argument to compress(). In the case you cannot make sure that matrix entries are only set once, initialize the matrix with a sparsity pattern to fix the matrix structure before inserting elements.

◆ set() [3/6]

void SparseMatrix< number >::set ( const std::vector< size_type > &  row_indices,
const std::vector< size_type > &  col_indices,
const FullMatrix< TrilinosScalar > &  full_matrix,
const bool  elide_zero_values = false 
)

Same function as before, but now including the possibility to use rectangular full_matrices and different local-to-global indexing on rows and columns, respectively.

Definition at line 1366 of file trilinos_sparse_matrix.cc.

◆ set() [4/6]

void SparseMatrix< number >::set ( const size_type  row,
const std::vector< size_type > &  col_indices,
const std::vector< TrilinosScalar > &  values,
const bool  elide_zero_values = false 
)

Set several elements in the specified row of the matrix with column indices as given by col_indices to the respective value.

This function is able to insert new elements into the matrix as long as compress() has not been called, so the sparsity pattern will be extended. After compress() has been called for the first time or the matrix has been initialized from a sparsity pattern, extending the sparsity pattern is no longer possible and an insertion of elements at positions which have not been initialized will throw an exception.

The optional parameter elide_zero_values can be used to specify whether zero values should be inserted anyway or they should be filtered away. The default value is false, i.e., even zero values are inserted/replaced.

For the case that the matrix is constructed without a sparsity pattern and new matrix entries are added on demand, please note the following behavior imposed by the underlying Epetra_FECrsMatrix data structure: If the same matrix entry is inserted more than once, the matrix entries will be added upon calling compress() (since Epetra does not track values to the same entry before the final compress() is called), even if VectorOperation::insert is specified as argument to compress(). In the case you cannot make sure that matrix entries are only set once, initialize the matrix with a sparsity pattern to fix the matrix structure before inserting elements.

Definition at line 1387 of file trilinos_sparse_matrix.cc.

◆ set() [5/6]

template<typename Number >
void TrilinosWrappers::SparseMatrix::set ( const size_type  row,
const size_type  n_cols,
const size_type col_indices,
const Number *  values,
const bool  elide_zero_values = false 
)

Set several elements to values given by values in a given row in columns given by col_indices into the sparse matrix.

This function is able to insert new elements into the matrix as long as compress() has not been called, so the sparsity pattern will be extended. After compress() has been called for the first time or the matrix has been initialized from a sparsity pattern, extending the sparsity pattern is no longer possible and an insertion of elements at positions which have not been initialized will throw an exception.

The optional parameter elide_zero_values can be used to specify whether zero values should be inserted anyway or they should be filtered away. The default value is false, i.e., even zero values are inserted/replaced.

For the case that the matrix is constructed without a sparsity pattern and new matrix entries are added on demand, please note the following behavior imposed by the underlying Epetra_FECrsMatrix data structure: If the same matrix entry is inserted more than once, the matrix entries will be added upon calling compress() (since Epetra does not track values to the same entry before the final compress() is called), even if VectorOperation::insert is specified as argument to compress(). In the case you cannot make sure that matrix entries are only set once, initialize the matrix with a sparsity pattern to fix the matrix structure before inserting elements.

◆ add() [1/6]

void TrilinosWrappers::SparseMatrix::add ( const size_type  i,
const size_type  j,
const TrilinosScalar  value 
)

Add value to the element (i,j).

Just as the respective call in deal.II SparseMatrix<Number> class (but in contrast to the situation for PETSc based matrices), this function throws an exception if an entry does not exist in the sparsity pattern. Moreover, if value is not a finite number an exception is thrown.

◆ add() [2/6]

void SparseMatrix< number >::add ( const std::vector< size_type > &  indices,
const FullMatrix< TrilinosScalar > &  full_matrix,
const bool  elide_zero_values = true 
)

Add all elements given in a FullMatrix<double> into sparse matrix locations given by indices. In other words, this function adds the elements in full_matrix to the respective entries in calling matrix, using the local-to-global indexing specified by indices for both the rows and the columns of the matrix. This function assumes a quadratic sparse matrix and a quadratic full_matrix, the usual situation in FE calculations.

Just as the respective call in deal.II SparseMatrix<Number> class (but in contrast to the situation for PETSc based matrices), this function throws an exception if an entry does not exist in the sparsity pattern.

The optional parameter elide_zero_values can be used to specify whether zero values should be added anyway or these should be filtered away and only non-zero data is added. The default value is true, i.e., zero values won't be added into the matrix.

Definition at line 1540 of file trilinos_sparse_matrix.cc.

◆ add() [3/6]

void SparseMatrix< number >::add ( const std::vector< size_type > &  row_indices,
const std::vector< size_type > &  col_indices,
const FullMatrix< TrilinosScalar > &  full_matrix,
const bool  elide_zero_values = true 
)

Same function as before, but now including the possibility to use rectangular full_matrices and different local-to-global indexing on rows and columns, respectively.

Definition at line 1559 of file trilinos_sparse_matrix.cc.

◆ add() [4/6]

void SparseMatrix< number >::add ( const size_type  row,
const std::vector< size_type > &  col_indices,
const std::vector< TrilinosScalar > &  values,
const bool  elide_zero_values = true 
)

Set several elements in the specified row of the matrix with column indices as given by col_indices to the respective value.

Just as the respective call in deal.II SparseMatrix<Number> class (but in contrast to the situation for PETSc based matrices), this function throws an exception if an entry does not exist in the sparsity pattern.

The optional parameter elide_zero_values can be used to specify whether zero values should be added anyway or these should be filtered away and only non-zero data is added. The default value is true, i.e., zero values won't be added into the matrix.

Definition at line 1580 of file trilinos_sparse_matrix.cc.

◆ add() [5/6]

void SparseMatrix< number >::add ( const size_type  row,
const size_type  n_cols,
const size_type col_indices,
const TrilinosScalar values,
const bool  elide_zero_values = true,
const bool  col_indices_are_sorted = false 
)

Add an array of values given by values in the given global matrix row at columns specified by col_indices in the sparse matrix.

Just as the respective call in deal.II SparseMatrix<Number> class (but in contrast to the situation for PETSc based matrices), this function throws an exception if an entry does not exist in the sparsity pattern.

The optional parameter elide_zero_values can be used to specify whether zero values should be added anyway or these should be filtered away and only non-zero data is added. The default value is true, i.e., zero values won't be added into the matrix.

Definition at line 1598 of file trilinos_sparse_matrix.cc.

◆ operator*=()

SparseMatrix & SparseMatrix< number >::operator*= ( const TrilinosScalar  factor)

Multiply the entire matrix by a fixed factor.

Definition at line 1889 of file trilinos_sparse_matrix.cc.

◆ operator/=()

SparseMatrix & SparseMatrix< number >::operator/= ( const TrilinosScalar  factor)

Divide the entire matrix by a fixed factor.

Definition at line 1901 of file trilinos_sparse_matrix.cc.

◆ copy_from()

void SparseMatrix< number >::copy_from ( const SparseMatrix source)

Copy the given (Trilinos) matrix (sparsity pattern and entries).

Definition at line 372 of file trilinos_sparse_matrix.cc.

◆ add() [6/6]

void SparseMatrix< number >::add ( const TrilinosScalar  factor,
const SparseMatrix matrix 
)

Add matrix scaled by factor to this matrix, i.e. the matrix factor*matrix is added to this. If the sparsity pattern of the calling matrix does not contain all the elements in the sparsity pattern of the input matrix, this function will throw an exception.

Definition at line 1784 of file trilinos_sparse_matrix.cc.

◆ clear_row()

void SparseMatrix< number >::clear_row ( const size_type  row,
const TrilinosScalar  new_diag_value = 0 
)

Remove all elements from this row by setting them to zero. The function does not modify the number of allocated nonzero entries, it only sets the entries to zero.

This operation is used in eliminating constraints (e.g. due to hanging nodes) and makes sure that we can write this modification to the matrix without having to read entries (such as the locations of non-zero elements) from it — without this operation, removing constraints on parallel matrices is a rather complicated procedure.

The second parameter can be used to set the diagonal entry of this row to a value different from zero. The default is to set it to zero.

Note
If the matrix is stored in parallel across multiple processors using MPI, this function only touches rows that are locally stored and simply ignores all other row indices. Further, in the context of parallel computations, you will get into trouble if you clear a row while other processors still have pending writes or additions into the same row. In other words, if another processor still wants to add something to an element of a row and you call this function to zero out the row, then the next time you call compress() may add the remote value to the zero you just created. Consequently, you will want to call compress() after you made the last modifications to a matrix and before starting to clear rows.

Definition at line 1121 of file trilinos_sparse_matrix.cc.

◆ clear_rows()

void SparseMatrix< number >::clear_rows ( const std::vector< size_type > &  rows,
const TrilinosScalar  new_diag_value = 0 
)

Same as clear_row(), except that it works on a number of rows at once.

The second parameter can be used to set the diagonal entries of all cleared rows to something different from zero. Note that all of these diagonal entries get the same value – if you want different values for the diagonal entries, you have to set them by hand.

Note
If the matrix is stored in parallel across multiple processors using MPI, this function only touches rows that are locally stored and simply ignores all other row indices. Further, in the context of parallel computations, you will get into trouble if you clear a row while other processors still have pending writes or additions into the same row. In other words, if another processor still wants to add something to an element of a row and you call this function to zero out the row, then the next time you call compress() may add the remote value to the zero you just created. Consequently, you will want to call compress() after you made the last modifications to a matrix and before starting to clear rows.

Definition at line 1158 of file trilinos_sparse_matrix.cc.

◆ transpose()

void SparseMatrix< number >::transpose ( )

Sets an internal flag so that all operations performed by the matrix, i.e., multiplications, are done in transposed order. However, this does not reshape the matrix to transposed form directly, so care should be taken when using this flag.

Note
Calling this function any even number of times in succession will return the object to its original state.

Definition at line 1865 of file trilinos_sparse_matrix.cc.

◆ operator()()

TrilinosScalar SparseMatrix< number >::operator() ( const size_type  i,
const size_type  j 
) const

Return the value of the entry (i,j). This may be an expensive operation and you should always take care where to call this function. As in the deal.II sparse matrix class, we throw an exception if the respective entry doesn't exist in the sparsity pattern of this class, which is requested from Trilinos. Moreover, an exception will be thrown when the requested element is not saved on the calling process.

Definition at line 1168 of file trilinos_sparse_matrix.cc.

◆ el()

TrilinosScalar SparseMatrix< number >::el ( const size_type  i,
const size_type  j 
) const

Return the value of the matrix entry (i,j). If this entry does not exist in the sparsity pattern, then zero is returned. While this may be convenient in some cases, note that it is simple to write algorithms that are slow compared to an optimal solution, since the sparsity of the matrix is not used. On the other hand, if you want to be sure the entry exists, you should use operator() instead.

The lack of error checking in this function can also yield surprising results if you have a parallel matrix. In that case, just because you get a zero result from this function does not mean that either the entry does not exist in the sparsity pattern or that it does but has a value of zero. Rather, it could also be that it simply isn't stored on the current processor; in that case, it may be stored on a different processor, and possibly so with a nonzero value.

Definition at line 1247 of file trilinos_sparse_matrix.cc.

◆ diag_element()

TrilinosScalar SparseMatrix< number >::diag_element ( const size_type  i) const

Return the main diagonal element in the ith row. This function throws an error if the matrix is not quadratic and it also throws an error if (i,i) is not element of the local matrix. See also the comment in trilinos_sparse_matrix.cc.

Definition at line 1319 of file trilinos_sparse_matrix.cc.

◆ vmult() [1/2]

template<typename VectorType >
std::enable_if< !std::is_same< typename VectorType::value_type, TrilinosScalar >::value >::type SparseMatrix< VectorType >::vmult ( VectorType dst,
const VectorType src 
) const

Matrix-vector multiplication: let dst = M*src with M being this matrix.

Source and destination must not be the same vector.

This function can be called with several types of vector objects, namely VectorType can be

When using vectors of type TrilinosWrappers::MPI::Vector, the vector dst has to be initialized with the same IndexSet that was used for the row indices of the matrix and the vector src has to be initialized with the same IndexSet that was used for the column indices of the matrix.

This function will be called when the underlying number type for the matrix object and the one for the vector object are the same. Despite looking complicated, the return type is just void.

In case of a serial vector, this function will only work when running on one processor, since the matrix object is inherently distributed. Otherwise, an exception will be thrown.

Definition at line 1975 of file trilinos_sparse_matrix.cc.

◆ vmult() [2/2]

template<typename VectorType >
std::enable_if<!std::is_same<typename VectorType::value_type, TrilinosScalar>::value>::type TrilinosWrappers::SparseMatrix::vmult ( VectorType dst,
const VectorType src 
) const

Same as the function above for the case that the underlying number type for the matrix object and the one for the vector object do not coincide. This case is not implemented. Calling it will result in a runtime error. Despite looking complicated, the return type is just void.

◆ Tvmult() [1/2]

template<typename VectorType >
std::enable_if< !std::is_same< typename VectorType::value_type, TrilinosScalar >::value >::type SparseMatrix< VectorType >::Tvmult ( VectorType dst,
const VectorType src 
) const

Matrix-vector multiplication: let dst = MT*src with M being this matrix. This function does the same as vmult() but takes the transposed matrix.

Source and destination must not be the same vector.

This function can be called with several types of vector objects, see the discussion about VectorType in vmult().

This function will be called when the underlying number type for the matrix object and the one for the vector object are the same. Despite looking complicated, the return type is just void.

Definition at line 2019 of file trilinos_sparse_matrix.cc.

◆ Tvmult() [2/2]

template<typename VectorType >
std::enable_if<!std::is_same<typename VectorType::value_type, TrilinosScalar>::value>::type TrilinosWrappers::SparseMatrix::Tvmult ( VectorType dst,
const VectorType src 
) const

Same as the function above for the case that the underlying number type for the matrix object and the one for the vector object do not coincide. This case is not implemented. Calling it will result in a runtime error. Despite looking complicated, the return type is just void.

◆ vmult_add()

template<typename VectorType >
void SparseMatrix< VectorType >::vmult_add ( VectorType dst,
const VectorType src 
) const

Adding matrix-vector multiplication. Add M*src on dst with M being this matrix.

Source and destination must not be the same vector.

This function can be called with several types of vector objects, see the discussion about VectorType in vmult().

Definition at line 2059 of file trilinos_sparse_matrix.cc.

◆ Tvmult_add()

template<typename VectorType >
void SparseMatrix< VectorType >::Tvmult_add ( VectorType dst,
const VectorType src 
) const

Adding matrix-vector multiplication. Add MT*src to dst with M being this matrix. This function does the same as vmult_add() but takes the transposed matrix.

Source and destination must not be the same vector.

This function can be called with several types of vector objects, see the discussion about VectorType in vmult().

Definition at line 2075 of file trilinos_sparse_matrix.cc.

◆ matrix_norm_square()

TrilinosScalar SparseMatrix< number >::matrix_norm_square ( const MPI::Vector v) const

Return the square of the norm of the vector \(v\) with respect to the norm induced by this matrix, i.e., \(\left(v,Mv\right)\). This is useful, e.g. in the finite element context, where the \(L_2\) norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

Obviously, the matrix needs to be quadratic for this operation.

The implementation of this function is not as efficient as the one in the SparseMatrix class used in deal.II (i.e. the original one, not the Trilinos wrapper class) since Trilinos doesn't support this operation and needs a temporary vector.

The vector has to be initialized with the same IndexSet the matrix was initialized with.

In case of a localized Vector, this function will only work when running on one processor, since the matrix object is inherently distributed. Otherwise, an exception will be thrown.

Definition at line 2090 of file trilinos_sparse_matrix.cc.

◆ matrix_scalar_product()

TrilinosScalar SparseMatrix< number >::matrix_scalar_product ( const MPI::Vector u,
const MPI::Vector v 
) const

Compute the matrix scalar product \(\left(u,Mv\right)\).

The implementation of this function is not as efficient as the one in the SparseMatrix class used in deal.II (i.e. the original one, not the Trilinos wrapper class) since Trilinos doesn't support this operation and needs a temporary vector.

The vector u has to be initialized with the same IndexSet that was used for the row indices of the matrix and the vector v has to be initialized with the same IndexSet that was used for the column indices of the matrix.

In case of a localized Vector, this function will only work when running on one processor, since the matrix object is inherently distributed. Otherwise, an exception will be thrown.

This function is only implemented for square matrices.

Definition at line 2104 of file trilinos_sparse_matrix.cc.

◆ residual()

TrilinosScalar SparseMatrix< number >::residual ( MPI::Vector dst,
const MPI::Vector x,
const MPI::Vector b 
) const

Compute the residual of an equation Mx=b, where the residual is defined to be r=b-Mx. Write the residual into dst. The l2 norm of the residual vector is returned.

Source x and destination dst must not be the same vector.

The vectors dst and b have to be initialized with the same IndexSet that was used for the row indices of the matrix and the vector x has to be initialized with the same IndexSet that was used for the column indices of the matrix.

In case of a localized Vector, this function will only work when running on one processor, since the matrix object is inherently distributed. Otherwise, an exception will be thrown.

Definition at line 2119 of file trilinos_sparse_matrix.cc.

◆ mmult()

void SparseMatrix< number >::mmult ( SparseMatrix C,
const SparseMatrix B,
const MPI::Vector V = MPI::Vector() 
) const

Perform the matrix-matrix multiplication C = A * B, or, if an optional vector argument is given, C = A * diag(V) * B, where diag(V) defines a diagonal matrix with the vector entries.

This function assumes that the calling matrix A and B have compatible sizes. The size of C will be set within this function.

The content as well as the sparsity pattern of the matrix C will be changed by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

Definition at line 2225 of file trilinos_sparse_matrix.cc.

◆ Tmmult()

void SparseMatrix< number >::Tmmult ( SparseMatrix C,
const SparseMatrix B,
const MPI::Vector V = MPI::Vector() 
) const

Perform the matrix-matrix multiplication with the transpose of this, i.e., C = AT * B, or, if an optional vector argument is given, C = AT * diag(V) * B, where diag(V) defines a diagonal matrix with the vector entries.

This function assumes that the calling matrix A and B have compatible sizes. The size of C will be set within this function.

The content as well as the sparsity pattern of the matrix C will be changed by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

Definition at line 2235 of file trilinos_sparse_matrix.cc.

◆ l1_norm()

TrilinosScalar SparseMatrix< number >::l1_norm ( ) const

Return the l1-norm of the matrix, that is \(|M|_1= \max_{\mathrm{all\ columns\ } j} \sum_{\mathrm{all\ rows\ } i} |M_{ij}|\), (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. \(|Mv|_1 \leq |M|_1 |v|_1\). (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

Definition at line 1917 of file trilinos_sparse_matrix.cc.

◆ linfty_norm()

TrilinosScalar SparseMatrix< number >::linfty_norm ( ) const

Return the linfty-norm of the matrix, that is \(|M|_\infty=\max_{\mathrm{all\ rows\ } i}\sum_{\mathrm{all\ columns\ } j} |M_{ij}|\), (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. \(|Mv|_\infty \leq |M|_\infty |v|_\infty\). (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

Definition at line 1926 of file trilinos_sparse_matrix.cc.

◆ frobenius_norm()

TrilinosScalar SparseMatrix< number >::frobenius_norm ( ) const

Return the frobenius norm of the matrix, i.e. the square root of the sum of squares of all entries in the matrix.

Definition at line 1935 of file trilinos_sparse_matrix.cc.

◆ trilinos_matrix()

const Epetra_CrsMatrix& TrilinosWrappers::SparseMatrix::trilinos_matrix ( ) const

Return a const reference to the underlying Trilinos Epetra_CrsMatrix data.

◆ trilinos_sparsity_pattern()

const Epetra_CrsGraph& TrilinosWrappers::SparseMatrix::trilinos_sparsity_pattern ( ) const

Return a const reference to the underlying Trilinos Epetra_CrsGraph data that stores the sparsity pattern of the matrix.

◆ locally_owned_domain_indices()

IndexSet TrilinosWrappers::SparseMatrix::locally_owned_domain_indices ( ) const

Return the partitioning of the domain space of this matrix, i.e., the partitioning of the vectors this matrix has to be multiplied with.

◆ locally_owned_range_indices()

IndexSet TrilinosWrappers::SparseMatrix::locally_owned_range_indices ( ) const

Return the partitioning of the range space of this matrix, i.e., the partitioning of the vectors that are result from matrix-vector products.

◆ begin() [1/4]

const_iterator TrilinosWrappers::SparseMatrix::begin ( ) const

Return an iterator pointing to the first element of the matrix.

The elements accessed by iterators within each row are ordered in the way in which Trilinos stores them, though the implementation guarantees that all elements of one row are accessed before the elements of the next row. If your algorithm relies on visiting elements within one row, you will need to consult with the Trilinos documentation on the order in which it stores data. It is, however, generally not a good and long- term stable idea to rely on the order in which receive elements if you iterate over them.

When you iterate over the elements of a parallel matrix, you will only be able to access the locally owned rows. (You can access the other rows as well, but they will look empty.) In that case, you probably want to call the begin() function that takes the row as an argument to limit the range of elements to loop over.

◆ begin() [2/4]

iterator TrilinosWrappers::SparseMatrix::begin ( )

Like the function above, but for non-const matrices.

◆ end() [1/4]

const_iterator TrilinosWrappers::SparseMatrix::end ( ) const

Return an iterator pointing the element past the last one of this matrix.

◆ end() [2/4]

iterator TrilinosWrappers::SparseMatrix::end ( )

Like the function above, but for non-const matrices.

◆ begin() [3/4]

const_iterator TrilinosWrappers::SparseMatrix::begin ( const size_type  r) const

Return an iterator pointing to the first element of row r.

Note that if the given row is empty, i.e. does not contain any nonzero entries, then the iterator returned by this function equals end(r). The returned iterator may not be dereferenceable in that case if neither row r nor any of the following rows contain any nonzero entries.

The elements accessed by iterators within each row are ordered in the way in which Trilinos stores them, though the implementation guarantees that all elements of one row are accessed before the elements of the next row. If your algorithm relies on visiting elements within one row, you will need to consult with the Trilinos documentation on the order in which it stores data. It is, however, generally not a good and long- term stable idea to rely on the order in which receive elements if you iterate over them.

Note
When you access the elements of a parallel matrix, you can only access the elements of rows that are actually stored locally. (You can access the other rows as well, but they will look empty.) Even then, if another processor has since written into, or added to, an element of the matrix that is stored on the current processor, then you will still see the old value of this entry unless you have called compress() between modifying the matrix element on the remote processor and accessing it on the current processor. See the documentation of the compress() function for more information.

◆ begin() [4/4]

iterator TrilinosWrappers::SparseMatrix::begin ( const size_type  r)

Like the function above, but for non-const matrices.

◆ end() [3/4]

const_iterator TrilinosWrappers::SparseMatrix::end ( const size_type  r) const

Return an iterator pointing the element past the last one of row r , or past the end of the entire sparsity pattern if none of the rows after r contain any entries at all.

Note that the end iterator is not necessarily dereferenceable. This is in particular the case if it is the end iterator for the last row of a matrix.

◆ end() [4/4]

iterator TrilinosWrappers::SparseMatrix::end ( const size_type  r)

Like the function above, but for non-const matrices.

◆ write_ascii()

void SparseMatrix< number >::write_ascii ( )

Abstract Trilinos object that helps view in ASCII other Trilinos objects. Currently this function is not implemented. TODO: Not implemented.

Definition at line 2245 of file trilinos_sparse_matrix.cc.

◆ print()

void SparseMatrix< number >::print ( std::ostream &  out,
const bool  write_extended_trilinos_info = false 
) const

Print the matrix to the given stream, using the format (line,col) value, i.e. one nonzero entry of the matrix per line. The optional flag outputs the sparsity pattern in Trilinos style, where the data is sorted according to the processor number when printed to the stream, as well as a summary of the matrix like the global size.

Definition at line 2256 of file trilinos_sparse_matrix.cc.

◆ prepare_add()

void TrilinosWrappers::SparseMatrix::prepare_add ( )
protected

For some matrix storage formats, in particular for the PETSc distributed blockmatrices, set and add operations on individual elements can not be freely mixed. Rather, one has to synchronize operations when one wants to switch from setting elements to adding to elements. BlockMatrixBase automatically synchronizes the access by calling this helper function for each block. This function ensures that the matrix is in a state that allows adding elements; if it previously already was in this state, the function does nothing.

◆ prepare_set()

void TrilinosWrappers::SparseMatrix::prepare_set ( )
protected

Same as prepare_add() but prepare the matrix for setting elements if the representation of elements in this class requires such an operation.

◆ set() [6/6]

template<>
void TrilinosWrappers::SparseMatrix::set ( const size_type  row,
const size_type  n_cols,
const size_type col_indices,
const TrilinosScalar values,
const bool  elide_zero_values 
)

Definition at line 1406 of file trilinos_sparse_matrix.cc.

Friends And Related Function Documentation

◆ BlockMatrixBase< SparseMatrix >

friend class BlockMatrixBase< SparseMatrix >
friend

Definition at line 1927 of file trilinos_sparse_matrix.h.

Member Data Documentation

◆ column_space_map

std::unique_ptr<Epetra_Map> TrilinosWrappers::SparseMatrix::column_space_map
private

Pointer to the user-supplied Epetra Trilinos mapping of the matrix columns that assigns parts of the matrix to the individual processes.

Definition at line 1886 of file trilinos_sparse_matrix.h.

◆ matrix

std::unique_ptr<Epetra_FECrsMatrix> TrilinosWrappers::SparseMatrix::matrix
private

A sparse matrix object in Trilinos to be used for finite element based problems which allows for assembling into non-local elements. The actual type, a sparse matrix, is set in the constructor.

Definition at line 1893 of file trilinos_sparse_matrix.h.

◆ nonlocal_matrix

std::unique_ptr<Epetra_CrsMatrix> TrilinosWrappers::SparseMatrix::nonlocal_matrix
private

A sparse matrix object in Trilinos to be used for collecting the non- local elements if the matrix was constructed from a Trilinos sparsity pattern with the respective option.

Definition at line 1900 of file trilinos_sparse_matrix.h.

◆ nonlocal_matrix_exporter

std::unique_ptr<Epetra_Export> TrilinosWrappers::SparseMatrix::nonlocal_matrix_exporter
private

An export object used to communicate the nonlocal matrix.

Definition at line 1905 of file trilinos_sparse_matrix.h.

◆ last_action

Epetra_CombineMode TrilinosWrappers::SparseMatrix::last_action
private

Trilinos doesn't allow to mix additions to matrix entries and overwriting them (to make synchronization of parallel computations simpler). The way we do it is to, for each access operation, store whether it is an insertion or an addition. If the previous one was of different type, then we first have to flush the Trilinos buffers; otherwise, we can simply go on. Luckily, Trilinos has an object for this which does already all the parallel communications in such a case, so we simply use their model, which stores whether the last operation was an addition or an insertion.

Definition at line 1918 of file trilinos_sparse_matrix.h.

◆ compressed

bool TrilinosWrappers::SparseMatrix::compressed
private

A boolean variable to hold information on whether the vector is compressed or not.

Definition at line 1924 of file trilinos_sparse_matrix.h.


The documentation for this class was generated from the following files: