Reference documentation for deal.II version 9.0.0
Classes | Public Types | Public Member Functions | Static Public Member Functions | Private Member Functions | Private Attributes | Friends | List of all members
PETScWrappers::MPI::SparseMatrix Class Reference

#include <deal.II/lac/petsc_parallel_sparse_matrix.h>

Inheritance diagram for PETScWrappers::MPI::SparseMatrix:
[legend]

Classes

struct  Traits
 

Public Types

typedef types::global_dof_index size_type
 
- Public Types inherited from PETScWrappers::MatrixBase
typedef MatrixIterators::const_iterator const_iterator
 
typedef types::global_dof_index size_type
 
typedef PetscScalar value_type
 

Public Member Functions

 SparseMatrix ()
 
 ~SparseMatrix ()
 
 SparseMatrix (const MPI_Comm &communicator, const size_type m, const size_type n, const size_type local_rows, const size_type local_columns, const size_type n_nonzero_per_row, const bool is_symmetric=false, const size_type n_offdiag_nonzero_per_row=0)
 
 SparseMatrix (const MPI_Comm &communicator, const size_type m, const size_type n, const size_type local_rows, const size_type local_columns, const std::vector< size_type > &row_lengths, const bool is_symmetric=false, const std::vector< size_type > &offdiag_row_lengths=std::vector< size_type >())
 
template<typename SparsityPatternType >
 SparseMatrix (const MPI_Comm &communicator, const SparsityPatternType &sparsity_pattern, const std::vector< size_type > &local_rows_per_process, const std::vector< size_type > &local_columns_per_process, const unsigned int this_process, const bool preset_nonzero_locations=true)
 
SparseMatrixoperator= (const value_type d)
 
void copy_from (const SparseMatrix &other)
 
void reinit (const MPI_Comm &communicator, const size_type m, const size_type n, const size_type local_rows, const size_type local_columns, const size_type n_nonzero_per_row, const bool is_symmetric=false, const size_type n_offdiag_nonzero_per_row=0)
 
void reinit (const MPI_Comm &communicator, const size_type m, const size_type n, const size_type local_rows, const size_type local_columns, const std::vector< size_type > &row_lengths, const bool is_symmetric=false, const std::vector< size_type > &offdiag_row_lengths=std::vector< size_type >())
 
template<typename SparsityPatternType >
void reinit (const MPI_Comm &communicator, const SparsityPatternType &sparsity_pattern, const std::vector< size_type > &local_rows_per_process, const std::vector< size_type > &local_columns_per_process, const unsigned int this_process, const bool preset_nonzero_locations=true)
 
template<typename SparsityPatternType >
void reinit (const IndexSet &local_rows, const IndexSet &local_columns, const SparsityPatternType &sparsity_pattern, const MPI_Comm &communicator)
 
void reinit (const SparseMatrix &other)
 
virtual const MPI_Comm & get_mpi_communicator () const
 
PetscScalar matrix_norm_square (const Vector &v) const
 
PetscScalar matrix_scalar_product (const Vector &u, const Vector &v) const
 
IndexSet locally_owned_domain_indices () const
 
IndexSet locally_owned_range_indices () const
 
void mmult (SparseMatrix &C, const SparseMatrix &B, const MPI::Vector &V=MPI::Vector()) const
 
void Tmmult (SparseMatrix &C, const SparseMatrix &B, const MPI::Vector &V=MPI::Vector()) const
 
- Public Member Functions inherited from PETScWrappers::MatrixBase
 MatrixBase ()
 
 MatrixBase (const MatrixBase &)=delete
 
MatrixBaseoperator= (const MatrixBase &)=delete
 
virtual ~MatrixBase ()
 
MatrixBaseoperator= (const value_type d)
 
void clear ()
 
void set (const size_type i, const size_type j, const PetscScalar value)
 
void set (const std::vector< size_type > &indices, const FullMatrix< PetscScalar > &full_matrix, const bool elide_zero_values=false)
 
void set (const std::vector< size_type > &row_indices, const std::vector< size_type > &col_indices, const FullMatrix< PetscScalar > &full_matrix, const bool elide_zero_values=false)
 
void set (const size_type row, const std::vector< size_type > &col_indices, const std::vector< PetscScalar > &values, const bool elide_zero_values=false)
 
void set (const size_type row, const size_type n_cols, const size_type *col_indices, const PetscScalar *values, const bool elide_zero_values=false)
 
void add (const size_type i, const size_type j, const PetscScalar value)
 
void add (const std::vector< size_type > &indices, const FullMatrix< PetscScalar > &full_matrix, const bool elide_zero_values=true)
 
void add (const std::vector< size_type > &row_indices, const std::vector< size_type > &col_indices, const FullMatrix< PetscScalar > &full_matrix, const bool elide_zero_values=true)
 
void add (const size_type row, const std::vector< size_type > &col_indices, const std::vector< PetscScalar > &values, const bool elide_zero_values=true)
 
void add (const size_type row, const size_type n_cols, const size_type *col_indices, const PetscScalar *values, const bool elide_zero_values=true, const bool col_indices_are_sorted=false)
 
void clear_row (const size_type row, const PetscScalar new_diag_value=0)
 
void clear_rows (const std::vector< size_type > &rows, const PetscScalar new_diag_value=0)
 
void compress (const VectorOperation::values operation)
 
PetscScalar operator() (const size_type i, const size_type j) const
 
PetscScalar el (const size_type i, const size_type j) const
 
PetscScalar diag_element (const size_type i) const
 
size_type m () const
 
size_type n () const
 
size_type local_size () const
 
std::pair< size_type, size_typelocal_range () const
 
bool in_local_range (const size_type index) const
 
size_type n_nonzero_elements () const
 
size_type row_length (const size_type row) const
 
PetscReal l1_norm () const
 
PetscReal linfty_norm () const
 
PetscReal frobenius_norm () const
 
PetscScalar matrix_norm_square (const VectorBase &v) const
 
PetscScalar matrix_scalar_product (const VectorBase &u, const VectorBase &v) const
 
PetscScalar trace () const
 
MatrixBaseoperator*= (const PetscScalar factor)
 
MatrixBaseoperator/= (const PetscScalar factor)
 
MatrixBaseadd (const PetscScalar factor, const MatrixBase &other)
 
MatrixBaseadd (const MatrixBase &other, const PetscScalar factor)
 
void vmult (VectorBase &dst, const VectorBase &src) const
 
void Tvmult (VectorBase &dst, const VectorBase &src) const
 
void vmult_add (VectorBase &dst, const VectorBase &src) const
 
void Tvmult_add (VectorBase &dst, const VectorBase &src) const
 
PetscScalar residual (VectorBase &dst, const VectorBase &x, const VectorBase &b) const
 
const_iterator begin () const
 
const_iterator end () const
 
const_iterator begin (const size_type r) const
 
const_iterator end (const size_type r) const
 
 operator Mat () const
 
Mat & petsc_matrix ()
 
void transpose ()
 
PetscBool is_symmetric (const double tolerance=1.e-12)
 
PetscBool is_hermitian (const double tolerance=1.e-12)
 
void write_ascii (const PetscViewerFormat format=PETSC_VIEWER_DEFAULT)
 
void print (std::ostream &out, const bool alternative_output=false) const
 
std::size_t memory_consumption () const
 
- Public Member Functions inherited from Subscriptor
 Subscriptor ()
 
 Subscriptor (const Subscriptor &)
 
 Subscriptor (Subscriptor &&) noexcept
 
virtual ~Subscriptor ()
 
Subscriptoroperator= (const Subscriptor &)
 
Subscriptoroperator= (Subscriptor &&) noexcept
 
void subscribe (const char *identifier=nullptr) const
 
void unsubscribe (const char *identifier=nullptr) const
 
unsigned int n_subscriptions () const
 
void list_subscribers () const
 
template<class Archive >
void serialize (Archive &ar, const unsigned int version)
 

Static Public Member Functions

static ::ExceptionBaseExcLocalRowsTooLarge (int arg1, int arg2)
 
- Static Public Member Functions inherited from PETScWrappers::MatrixBase
static ::ExceptionBaseExcSourceEqualsDestination ()
 
static ::ExceptionBaseExcWrongMode (int arg1, int arg2)
 
- Static Public Member Functions inherited from Subscriptor
static ::ExceptionBaseExcInUse (int arg1, std::string arg2, std::string arg3)
 
static ::ExceptionBaseExcNoSubscriber (std::string arg1, std::string arg2)
 

Private Member Functions

void do_reinit (const size_type m, const size_type n, const size_type local_rows, const size_type local_columns, const size_type n_nonzero_per_row, const bool is_symmetric=false, const size_type n_offdiag_nonzero_per_row=0)
 
void do_reinit (const size_type m, const size_type n, const size_type local_rows, const size_type local_columns, const std::vector< size_type > &row_lengths, const bool is_symmetric=false, const std::vector< size_type > &offdiag_row_lengths=std::vector< size_type >())
 
template<typename SparsityPatternType >
void do_reinit (const SparsityPatternType &sparsity_pattern, const std::vector< size_type > &local_rows_per_process, const std::vector< size_type > &local_columns_per_process, const unsigned int this_process, const bool preset_nonzero_locations)
 
template<typename SparsityPatternType >
void do_reinit (const IndexSet &local_rows, const IndexSet &local_columns, const SparsityPatternType &sparsity_pattern)
 

Private Attributes

MPI_Comm communicator
 

Friends

class BlockMatrixBase< SparseMatrix >
 

Additional Inherited Members

- Protected Member Functions inherited from PETScWrappers::MatrixBase
void prepare_action (const VectorOperation::values new_action)
 
void assert_is_compressed ()
 
void prepare_add ()
 
void prepare_set ()
 
void mmult (MatrixBase &C, const MatrixBase &B, const VectorBase &V) const
 
void Tmmult (MatrixBase &C, const MatrixBase &B, const VectorBase &V) const
 
- Protected Attributes inherited from PETScWrappers::MatrixBase
Mat matrix
 
VectorOperation::values last_action
 

Detailed Description

Implementation of a parallel sparse matrix class based on PETSc, with rows of the matrix distributed across an MPI network. All the functionality is actually in the base class, except for the calls to generate a parallel sparse matrix. This is possible since PETSc only works on an abstract matrix type and internally distributes to functions that do the actual work depending on the actual matrix type (much like using virtual functions). Only the functions creating a matrix of specific type differ, and are implemented in this particular class.

There are a number of comments on the communication model as well as access to individual elements in the documentation to the parallel vector class. These comments apply here as well.

Partitioning of matrices

PETSc partitions parallel matrices so that each MPI process "owns" a certain number of rows (i.e. only this process stores the respective entries in these rows). The number of rows each process owns has to be passed to the constructors and reinit() functions via the argument local_rows. The individual values passed as local_rows on all the MPI processes of course have to add up to the global number of rows of the matrix.

In addition to this, PETSc also partitions the rectangular chunk of the matrix it owns (i.e. the local_rows times n() elements in the matrix), so that matrix vector multiplications can be performed efficiently. This column-partitioning therefore has to match the partitioning of the vectors with which the matrix is multiplied, just as the row-partitioning has to match the partitioning of destination vectors. This partitioning is passed to the constructors and reinit() functions through the local_columns variable, which again has to add up to the global number of columns in the matrix. The name local_columns may be named inappropriately since it does not reflect that only these columns are stored locally, but it reflects the fact that these are the columns for which the elements of incoming vectors are stored locally.

To make things even more complicated, PETSc needs a very good estimate of the number of elements to be stored in each row to be efficient. Otherwise it spends most of the time with allocating small chunks of memory, a process that can slow down programs to a crawl if it happens to often. As if a good estimate of the number of entries per row isn't even, it even needs to split this as follows: for each row it owns, it needs an estimate for the number of elements in this row that fall into the columns that are set apart for this process (see above), and the number of elements that are in the rest of the columns.

Since in general this information is not readily available, most of the initializing functions of this class assume that all of the number of elements you give as an argument to n_nonzero_per_row or by row_lengths fall into the columns "owned" by this process, and none into the other ones. This is a fair guess for most of the rows, since in a good domain partitioning, nodes only interact with nodes that are within the same subdomain. It does not hold for nodes on the interfaces of subdomain, however, and for the rows corresponding to these nodes, PETSc will have to allocate additional memory, a costly process.

The only way to avoid this is to tell PETSc where the actual entries of the matrix will be. For this, there are constructors and reinit() functions of this class that take a DynamicSparsityPattern object containing all this information. While in the general case it is sufficient if the constructors and reinit() functions know the number of local rows and columns, the functions getting a sparsity pattern also need to know the number of local rows (local_rows_per_process) and columns (local_columns_per_process) for all other processes, in order to compute which parts of the matrix are which. Thus, it is not sufficient to just count the number of degrees of freedom that belong to a particular process, but you have to have the numbers for all processes available at all processes.

Author
Wolfgang Bangerth, 2004

Definition at line 119 of file petsc_parallel_sparse_matrix.h.

Member Typedef Documentation

◆ size_type

Declare type for container size.

Definition at line 125 of file petsc_parallel_sparse_matrix.h.

Constructor & Destructor Documentation

◆ SparseMatrix() [1/4]

SparseMatrix< number >::SparseMatrix ( )

Default constructor. Create an empty matrix.

Definition at line 33 of file petsc_parallel_sparse_matrix.cc.

◆ ~SparseMatrix()

SparseMatrix< number >::~SparseMatrix ( )

Destructor to free the PETSc object.

Definition at line 47 of file petsc_parallel_sparse_matrix.cc.

◆ SparseMatrix() [2/4]

SparseMatrix< number >::SparseMatrix ( const MPI_Comm &  communicator,
const size_type  m,
const size_type  n,
const size_type  local_rows,
const size_type  local_columns,
const size_type  n_nonzero_per_row,
const bool  is_symmetric = false,
const size_type  n_offdiag_nonzero_per_row = 0 
)

Create a sparse matrix of dimensions m times n, with an initial guess of n_nonzero_per_row and n_offdiag_nonzero_per_row nonzero elements per row (see documentation of the MatCreateAIJ PETSc function for more information about these parameters). PETSc is able to cope with the situation that more than this number of elements are later allocated for a row, but this involves copying data, and is thus expensive.

For the meaning of the local_row and local_columns parameters, see the class documentation.

The is_symmetric flag determines whether we should tell PETSc that the matrix is going to be symmetric (as indicated by the call MatSetOption(mat, MAT_SYMMETRIC). Note that the PETSc documentation states that one cannot form an ILU decomposition of a matrix for which this flag has been set to true, only an ICC. The default value of this flag is false.

Deprecated:
This constructor is deprecated: please use the constructor with a sparsity pattern argument instead.

Definition at line 52 of file petsc_parallel_sparse_matrix.cc.

◆ SparseMatrix() [3/4]

SparseMatrix< number >::SparseMatrix ( const MPI_Comm &  communicator,
const size_type  m,
const size_type  n,
const size_type  local_rows,
const size_type  local_columns,
const std::vector< size_type > &  row_lengths,
const bool  is_symmetric = false,
const std::vector< size_type > &  offdiag_row_lengths = std::vector<size_type>() 
)

Initialize a rectangular matrix with m rows and n columns. The maximal number of nonzero entries for diagonal and off- diagonal blocks of each row is given by the row_lengths and offdiag_row_lengths arrays.

For the meaning of the local_row and local_columns parameters, see the class documentation.

Just as for the other constructors: PETSc is able to cope with the situation that more than this number of elements are later allocated for a row, but this involves copying data, and is thus expensive.

The is_symmetric flag determines whether we should tell PETSc that the matrix is going to be symmetric (as indicated by the call MatSetOption(mat, MAT_SYMMETRIC). Note that the PETSc documentation states that one cannot form an ILU decomposition of a matrix for which this flag has been set to true, only an ICC. The default value of this flag is false.

Deprecated:
This constructor is deprecated: please use the constructor with a sparsity pattern argument instead.

Definition at line 70 of file petsc_parallel_sparse_matrix.cc.

◆ SparseMatrix() [4/4]

template<typename SparsityPatternType >
SparseMatrix< SparsityPatternType >::SparseMatrix ( const MPI_Comm &  communicator,
const SparsityPatternType &  sparsity_pattern,
const std::vector< size_type > &  local_rows_per_process,
const std::vector< size_type > &  local_columns_per_process,
const unsigned int  this_process,
const bool  preset_nonzero_locations = true 
)

Initialize using the given sparsity pattern with communication happening over the provided communicator.

For the meaning of the local_rows_per_process and local_columns_per_process parameters, see the class documentation.

Note that PETSc can be very slow if you do not provide it with a good estimate of the lengths of rows. Using the present function is a very efficient way to do this, as it uses the exact number of nonzero entries for each row of the matrix by using the given sparsity pattern argument. If the preset_nonzero_locations flag is true, this function in addition not only sets the correct row sizes up front, but also pre-allocated the correct nonzero entries in the matrix.

PETsc allows to later add additional nonzero entries to a matrix, by simply writing to these elements. However, this will then lead to additional memory allocations which are very inefficient and will greatly slow down your program. It is therefore significantly more efficient to get memory allocation right from the start.

Definition at line 89 of file petsc_parallel_sparse_matrix.cc.

Member Function Documentation

◆ operator=()

SparseMatrix & SparseMatrix< number >::operator= ( const value_type  d)

This operator assigns a scalar to a matrix. Since this does usually not make much sense (should we set all matrix entries to this value? Only the nonzero entries of the sparsity pattern?), this operation is only allowed if the actual value to be assigned is zero. This operator only exists to allow for the obvious notation matrix=0, which sets all elements of the matrix to zero, but keep the sparsity pattern previously used.

Definition at line 122 of file petsc_parallel_sparse_matrix.cc.

◆ copy_from()

void SparseMatrix< number >::copy_from ( const SparseMatrix other)

Make a copy of the PETSc matrix other. It is assumed that both matrices have the same SparsityPattern.

Definition at line 129 of file petsc_parallel_sparse_matrix.cc.

◆ reinit() [1/5]

void SparseMatrix< number >::reinit ( const MPI_Comm &  communicator,
const size_type  m,
const size_type  n,
const size_type  local_rows,
const size_type  local_columns,
const size_type  n_nonzero_per_row,
const bool  is_symmetric = false,
const size_type  n_offdiag_nonzero_per_row = 0 
)

Throw away the present matrix and generate one that has the same properties as if it were created by the constructor of this class with the same argument list as the present function.

Deprecated:
This overload of reinit is deprecated: please use the overload with a sparsity pattern argument instead.

Definition at line 142 of file petsc_parallel_sparse_matrix.cc.

◆ reinit() [2/5]

void SparseMatrix< number >::reinit ( const MPI_Comm &  communicator,
const size_type  m,
const size_type  n,
const size_type  local_rows,
const size_type  local_columns,
const std::vector< size_type > &  row_lengths,
const bool  is_symmetric = false,
const std::vector< size_type > &  offdiag_row_lengths = std::vector<size_type>() 
)

Throw away the present matrix and generate one that has the same properties as if it were created by the constructor of this class with the same argument list as the present function.

Deprecated:
This overload of reinit is deprecated: please use the overload with a sparsity pattern argument instead.

Definition at line 165 of file petsc_parallel_sparse_matrix.cc.

◆ reinit() [3/5]

template<typename SparsityPatternType >
void SparseMatrix< SparsityPatternType >::reinit ( const MPI_Comm &  communicator,
const SparsityPatternType &  sparsity_pattern,
const std::vector< size_type > &  local_rows_per_process,
const std::vector< size_type > &  local_columns_per_process,
const unsigned int  this_process,
const bool  preset_nonzero_locations = true 
)

Initialize using the given sparsity pattern with communication happening over the provided communicator.

Note that PETSc can be very slow if you do not provide it with a good estimate of the lengths of rows. Using the present function is a very efficient way to do this, as it uses the exact number of nonzero entries for each row of the matrix by using the given sparsity pattern argument. If the preset_nonzero_locations flag is true, this function in addition not only sets the correct row sizes up front, but also pre-allocated the correct nonzero entries in the matrix.

PETsc allows to later add additional nonzero entries to a matrix, by simply writing to these elements. However, this will then lead to additional memory allocations which are very inefficient and will greatly slow down your program. It is therefore significantly more efficient to get memory allocation right from the start.

Definition at line 190 of file petsc_parallel_sparse_matrix.cc.

◆ reinit() [4/5]

template<typename SparsityPatternType >
void SparseMatrix< SparsityPatternType >::reinit ( const IndexSet local_rows,
const IndexSet local_columns,
const SparsityPatternType &  sparsity_pattern,
const MPI_Comm &  communicator 
)

Create a matrix where the size() of the IndexSets determine the global number of rows and columns and the entries of the IndexSet give the rows and columns for the calling processor. Note that only ascending, 1:1 IndexSets are supported.

Definition at line 212 of file petsc_parallel_sparse_matrix.cc.

◆ reinit() [5/5]

void SparseMatrix< number >::reinit ( const SparseMatrix other)

Initialize this matrix to have the same structure as other. This will not copy the values of the other matrix, but you can use copy_from() for this.

Definition at line 106 of file petsc_parallel_sparse_matrix.cc.

◆ get_mpi_communicator()

const MPI_Comm & SparseMatrix< number >::get_mpi_communicator ( ) const
inlinevirtual

Return a reference to the MPI communicator object in use with this matrix.

Implements PETScWrappers::MatrixBase.

Definition at line 500 of file petsc_parallel_sparse_matrix.h.

◆ matrix_norm_square()

PetscScalar SparseMatrix< number >::matrix_norm_square ( const Vector v) const

Return the square of the norm of the vector \(v\) with respect to the norm induced by this matrix, i.e. \(\left(v^\ast,Mv\right)\). This is useful, e.g. in the finite element context, where the \(L_2\) norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

Obviously, the matrix needs to be quadratic for this operation.

The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

Definition at line 644 of file petsc_parallel_sparse_matrix.cc.

◆ matrix_scalar_product()

PetscScalar SparseMatrix< number >::matrix_scalar_product ( const Vector u,
const Vector v 
) const

Compute the matrix scalar product \(\left(u^\ast,Mv\right)\).

The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

Definition at line 653 of file petsc_parallel_sparse_matrix.cc.

◆ locally_owned_domain_indices()

IndexSet SparseMatrix< number >::locally_owned_domain_indices ( ) const

Return the partitioning of the domain space of this matrix, i.e., the partitioning of the vectors this matrix has to be multiplied with.

Definition at line 663 of file petsc_parallel_sparse_matrix.cc.

◆ locally_owned_range_indices()

IndexSet SparseMatrix< number >::locally_owned_range_indices ( ) const

Return the partitioning of the range space of this matrix, i.e., the partitioning of the vectors that result from matrix-vector products.

Definition at line 687 of file petsc_parallel_sparse_matrix.cc.

◆ mmult()

void SparseMatrix< number >::mmult ( SparseMatrix C,
const SparseMatrix B,
const MPI::Vector V = MPI::Vector() 
) const

Perform the matrix-matrix multiplication \(C = AB\), or, \(C = A \text{diag}(V) B\) given a compatible vector \(V\).

This function calls MatrixBase::mmult() to do the actual work.

Definition at line 711 of file petsc_parallel_sparse_matrix.cc.

◆ Tmmult()

void SparseMatrix< number >::Tmmult ( SparseMatrix C,
const SparseMatrix B,
const MPI::Vector V = MPI::Vector() 
) const

Perform the matrix-matrix multiplication with the transpose of this, i.e., \(C = A^T B\), or, \(C = A^T \text{diag}(V) B\) given a compatible vector \(V\).

This function calls MatrixBase::Tmmult() to do the actual work.

Definition at line 722 of file petsc_parallel_sparse_matrix.cc.

◆ do_reinit() [1/4]

void SparseMatrix< number >::do_reinit ( const size_type  m,
const size_type  n,
const size_type  local_rows,
const size_type  local_columns,
const size_type  n_nonzero_per_row,
const bool  is_symmetric = false,
const size_type  n_offdiag_nonzero_per_row = 0 
)
private

Do the actual work for the respective reinit() function and the matching constructor, i.e. create a matrix. Getting rid of the previous matrix is left to the caller.

Deprecated:
This overload of do_reinit is deprecated: please use the overload with a sparsity pattern argument instead.

Definition at line 227 of file petsc_parallel_sparse_matrix.cc.

◆ do_reinit() [2/4]

void SparseMatrix< number >::do_reinit ( const size_type  m,
const size_type  n,
const size_type  local_rows,
const size_type  local_columns,
const std::vector< size_type > &  row_lengths,
const bool  is_symmetric = false,
const std::vector< size_type > &  offdiag_row_lengths = std::vector<size_type>() 
)
private

Same as previous function.

Deprecated:
This overload of do_reinit is deprecated: please use the overload with a sparsity pattern argument instead.

Definition at line 260 of file petsc_parallel_sparse_matrix.cc.

◆ do_reinit() [3/4]

template<typename SparsityPatternType >
void SparseMatrix< SparsityPatternType >::do_reinit ( const SparsityPatternType &  sparsity_pattern,
const std::vector< size_type > &  local_rows_per_process,
const std::vector< size_type > &  local_columns_per_process,
const unsigned int  this_process,
const bool  preset_nonzero_locations 
)
private

Same as previous functions.

Definition at line 458 of file petsc_parallel_sparse_matrix.cc.

◆ do_reinit() [4/4]

template<typename SparsityPatternType >
void SparseMatrix< SparsityPatternType >::do_reinit ( const IndexSet local_rows,
const IndexSet local_columns,
const SparsityPatternType &  sparsity_pattern 
)
private

Same as previous functions.

Definition at line 321 of file petsc_parallel_sparse_matrix.cc.

Friends And Related Function Documentation

◆ BlockMatrixBase< SparseMatrix >

friend class BlockMatrixBase< SparseMatrix >
friend

To allow calling protected prepare_add() and prepare_set().

Definition at line 491 of file petsc_parallel_sparse_matrix.h.

Member Data Documentation

◆ communicator

MPI_Comm PETScWrappers::MPI::SparseMatrix::communicator
private

Copy of the communicator object to be used for this parallel vector.

Definition at line 436 of file petsc_parallel_sparse_matrix.h.


The documentation for this class was generated from the following files: