Reference documentation for deal.II version 9.2.0
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Modules | Namespaces | Classes | Typedefs | Functions

The classes in this module are wrappers around functionality provided by the PETSc library. They provide a modern object-oriented interface that is compatible with the interfaces of the other linear algebra classes in deal.II. All classes and functions in this group reside in a namespace PETScWrappers. More...

Collaboration diagram for PETScWrappers:

Modules

 SLEPcWrappers
 The classes in this module are wrappers around functionality provided by the SLEPc library. All classes and functions in this group reside in a namespace PETScWrappers.
 

Namespaces

 internal::LinearOperatorImplementation
 
 PETScWrappers
 
 PETScWrappers::MPI
 
 internal
 

Classes

class  PETScWrappers::MPI::BlockSparseMatrix
 
class  PETScWrappers::MPI::BlockVector
 
class  internal::LinearOperatorImplementation::ReinitHelper< Vector >
 
class  internal::LinearOperatorImplementation::ReinitHelper< PETScWrappers::MPI::BlockVector >
 
class  PETScWrappers::FullMatrix
 
class  PETScWrappers::MatrixIterators::const_iterator
 
class  PETScWrappers::MatrixBase
 
class  PETScWrappers::MatrixFree
 
class  PETScWrappers::PreconditionerBase
 
class  PETScWrappers::PreconditionJacobi
 
class  PETScWrappers::PreconditionBlockJacobi
 
class  PETScWrappers::PreconditionSOR
 
class  PETScWrappers::PreconditionSSOR
 
class  PETScWrappers::PreconditionEisenstat
 
class  PETScWrappers::PreconditionICC
 
class  PETScWrappers::PreconditionILU
 
class  PETScWrappers::PreconditionLU
 
class  PETScWrappers::PreconditionBoomerAMG
 
class  PETScWrappers::PreconditionParaSails
 
class  PETScWrappers::PreconditionNone
 
class  PETScWrappers::SolverBase
 
class  PETScWrappers::SolverRichardson
 
class  PETScWrappers::SolverChebychev
 
class  PETScWrappers::SolverCG
 
class  PETScWrappers::SolverBiCG
 
class  PETScWrappers::SolverGMRES
 
class  PETScWrappers::SolverBicgstab
 
class  PETScWrappers::SolverCGS
 
class  PETScWrappers::SolverTFQMR
 
class  PETScWrappers::SolverTCQMR
 
class  PETScWrappers::SolverCR
 
class  PETScWrappers::SolverLSQR
 
class  PETScWrappers::SolverPreOnly
 
class  PETScWrappers::SparseDirectMUMPS
 
class  PETScWrappers::SparseMatrix
 
class  PETScWrappers::MPI::SparseMatrix
 
class  PETScWrappers::MPI::Vector
 
class  PETScWrappers::VectorBase
 

Typedefs

using PETScWrappers::MPI::BlockSparseMatrix::BaseClass = BlockMatrixBase< SparseMatrix >
 
using PETScWrappers::MPI::BlockSparseMatrix::BlockType = BaseClass::BlockType
 
using PETScWrappers::MPI::BlockSparseMatrix::value_type = BaseClass::value_type
 
using PETScWrappers::MPI::BlockSparseMatrix::pointer = BaseClass::pointer
 
using PETScWrappers::MPI::BlockSparseMatrix::const_pointer = BaseClass::const_pointer
 
using PETScWrappers::MPI::BlockSparseMatrix::reference = BaseClass::reference
 
using PETScWrappers::MPI::BlockSparseMatrix::const_reference = BaseClass::const_reference
 
using PETScWrappers::MPI::BlockSparseMatrix::size_type = BaseClass::size_type
 
using PETScWrappers::MPI::BlockSparseMatrix::iterator = BaseClass::iterator
 
using PETScWrappers::MPI::BlockSparseMatrix::const_iterator = BaseClass::const_iterator
 
using PETScWrappers::MPI::BlockVector::BaseClass = BlockVectorBase< Vector >
 
using PETScWrappers::MPI::BlockVector::BlockType = BaseClass::BlockType
 
using PETScWrappers::MPI::BlockVector::value_type = BaseClass::value_type
 
using PETScWrappers::MPI::BlockVector::pointer = BaseClass::pointer
 
using PETScWrappers::MPI::BlockVector::const_pointer = BaseClass::const_pointer
 
using PETScWrappers::MPI::BlockVector::reference = BaseClass::reference
 
using PETScWrappers::MPI::BlockVector::const_reference = BaseClass::const_reference
 
using PETScWrappers::MPI::BlockVector::size_type = BaseClass::size_type
 
using PETScWrappers::MPI::BlockVector::iterator = BaseClass::iterator
 
using PETScWrappers::MPI::BlockVector::const_iterator = BaseClass::const_iterator
 
using PETScWrappers::FullMatrix::size_type = types::global_dof_index
 

Functions

 PETScWrappers::MPI::BlockSparseMatrix::BlockSparseMatrix ()=default
 
 PETScWrappers::MPI::BlockSparseMatrix::~BlockSparseMatrix () override=default
 
BlockSparseMatrixPETScWrappers::MPI::BlockSparseMatrix::operator= (const BlockSparseMatrix &)
 
BlockSparseMatrixPETScWrappers::MPI::BlockSparseMatrix::operator= (const double d)
 
void PETScWrappers::MPI::BlockSparseMatrix::reinit (const size_type n_block_rows, const size_type n_block_columns)
 
void PETScWrappers::MPI::BlockSparseMatrix::reinit (const std::vector< IndexSet > &rows, const std::vector< IndexSet > &cols, const BlockDynamicSparsityPattern &bdsp, const MPI_Comm &com)
 
void PETScWrappers::MPI::BlockSparseMatrix::reinit (const std::vector< IndexSet > &sizes, const BlockDynamicSparsityPattern &bdsp, const MPI_Comm &com)
 
void PETScWrappers::MPI::BlockSparseMatrix::vmult (BlockVector &dst, const BlockVector &src) const
 
void PETScWrappers::MPI::BlockSparseMatrix::vmult (BlockVector &dst, const Vector &src) const
 
void PETScWrappers::MPI::BlockSparseMatrix::vmult (Vector &dst, const BlockVector &src) const
 
void PETScWrappers::MPI::BlockSparseMatrix::vmult (Vector &dst, const Vector &src) const
 
void PETScWrappers::MPI::BlockSparseMatrix::Tvmult (BlockVector &dst, const BlockVector &src) const
 
void PETScWrappers::MPI::BlockSparseMatrix::Tvmult (BlockVector &dst, const Vector &src) const
 
void PETScWrappers::MPI::BlockSparseMatrix::Tvmult (Vector &dst, const BlockVector &src) const
 
void PETScWrappers::MPI::BlockSparseMatrix::Tvmult (Vector &dst, const Vector &src) const
 
void PETScWrappers::MPI::BlockSparseMatrix::collect_sizes ()
 
std::vector< IndexSetPETScWrappers::MPI::BlockSparseMatrix::locally_owned_domain_indices () const
 
std::vector< IndexSetPETScWrappers::MPI::BlockSparseMatrix::locally_owned_range_indices () const
 
const MPI_CommPETScWrappers::MPI::BlockSparseMatrix::get_mpi_communicator () const
 
 PETScWrappers::MPI::BlockVector::BlockVector ()=default
 
 PETScWrappers::MPI::BlockVector::BlockVector (const unsigned int n_blocks, const MPI_Comm &communicator, const size_type block_size, const size_type local_size)
 
 PETScWrappers::MPI::BlockVector::BlockVector (const BlockVector &V)
 
 PETScWrappers::MPI::BlockVector::BlockVector (const std::vector< size_type > &block_sizes, const MPI_Comm &communicator, const std::vector< size_type > &local_elements)
 
 PETScWrappers::MPI::BlockVector::BlockVector (const std::vector< IndexSet > &parallel_partitioning, const MPI_Comm &communicator=MPI_COMM_WORLD)
 
 PETScWrappers::MPI::BlockVector::BlockVector (const std::vector< IndexSet > &parallel_partitioning, const std::vector< IndexSet > &ghost_indices, const MPI_Comm &communicator)
 
 PETScWrappers::MPI::BlockVector::~BlockVector () override=default
 
BlockVectorPETScWrappers::MPI::BlockVector::operator= (const value_type s)
 
BlockVectorPETScWrappers::MPI::BlockVector::operator= (const BlockVector &V)
 
void PETScWrappers::MPI::BlockVector::reinit (const unsigned int n_blocks, const MPI_Comm &communicator, const size_type block_size, const size_type local_size, const bool omit_zeroing_entries=false)
 
void PETScWrappers::MPI::BlockVector::reinit (const std::vector< size_type > &block_sizes, const MPI_Comm &communicator, const std::vector< size_type > &local_sizes, const bool omit_zeroing_entries=false)
 
void PETScWrappers::MPI::BlockVector::reinit (const BlockVector &V, const bool omit_zeroing_entries=false)
 
void PETScWrappers::MPI::BlockVector::reinit (const std::vector< IndexSet > &parallel_partitioning, const MPI_Comm &communicator)
 
void PETScWrappers::MPI::BlockVector::reinit (const std::vector< IndexSet > &parallel_partitioning, const std::vector< IndexSet > &ghost_entries, const MPI_Comm &communicator)
 
void PETScWrappers::MPI::BlockVector::reinit (const unsigned int num_blocks)
 
bool PETScWrappers::MPI::BlockVector::has_ghost_elements () const
 
const MPI_CommPETScWrappers::MPI::BlockVector::get_mpi_communicator () const
 
void PETScWrappers::MPI::BlockVector::swap (BlockVector &v)
 
void PETScWrappers::MPI::BlockVector::print (std::ostream &out, const unsigned int precision=3, const bool scientific=true, const bool across=true) const
 
void PETScWrappers::MPI::swap (BlockVector &u, BlockVector &v)
 
template<typename Matrix >
static void internal::LinearOperatorImplementation::ReinitHelper< PETScWrappers::MPI::BlockVector >::reinit_range_vector (const Matrix &matrix, PETScWrappers::MPI::BlockVector &v, bool)
 
template<typename Matrix >
static void internal::LinearOperatorImplementation::ReinitHelper< PETScWrappers::MPI::BlockVector >::reinit_domain_vector (const Matrix &matrix, PETScWrappers::MPI::BlockVector &v, bool)
 
 PETScWrappers::FullMatrix::FullMatrix ()
 
 PETScWrappers::FullMatrix::FullMatrix (const size_type m, const size_type n)
 
void PETScWrappers::FullMatrix::reinit (const size_type m, const size_type n)
 
virtual const MPI_CommPETScWrappers::FullMatrix::get_mpi_communicator () const override
 
void PETScWrappers::FullMatrix::do_reinit (const size_type m, const size_type n)
 
void swap (BlockVector &u, BlockVector &v)
 

Detailed Description

The classes in this module are wrappers around functionality provided by the PETSc library. They provide a modern object-oriented interface that is compatible with the interfaces of the other linear algebra classes in deal.II. All classes and functions in this group reside in a namespace PETScWrappers.

These classes are only available if a PETSc installation was detected during configuration of deal.II. Refer to the README file for more details about this.

Author
Wolfgang Bangerth, 2004

Typedef Documentation

◆ BaseClass [1/2]

Typedef the base class for simpler access to its own alias.

Definition at line 73 of file petsc_block_sparse_matrix.h.

◆ BlockType [1/2]

Typedef the type of the underlying matrix.

Definition at line 78 of file petsc_block_sparse_matrix.h.

◆ value_type [1/2]

Import the alias from the base class.

Definition at line 83 of file petsc_block_sparse_matrix.h.

◆ pointer [1/2]

Definition at line 84 of file petsc_block_sparse_matrix.h.

◆ const_pointer [1/2]

Definition at line 85 of file petsc_block_sparse_matrix.h.

◆ reference [1/2]

Definition at line 86 of file petsc_block_sparse_matrix.h.

◆ const_reference [1/2]

Definition at line 87 of file petsc_block_sparse_matrix.h.

◆ size_type [1/3]

Definition at line 88 of file petsc_block_sparse_matrix.h.

◆ iterator [1/2]

Definition at line 89 of file petsc_block_sparse_matrix.h.

◆ const_iterator [1/2]

Definition at line 90 of file petsc_block_sparse_matrix.h.

◆ BaseClass [2/2]

Typedef the base class for simpler access to its own alias.

Definition at line 67 of file petsc_block_vector.h.

◆ BlockType [2/2]

Typedef the type of the underlying vector.

Definition at line 72 of file petsc_block_vector.h.

◆ value_type [2/2]

Import the alias from the base class.

Definition at line 77 of file petsc_block_vector.h.

◆ pointer [2/2]

Definition at line 78 of file petsc_block_vector.h.

◆ const_pointer [2/2]

Definition at line 79 of file petsc_block_vector.h.

◆ reference [2/2]

Definition at line 80 of file petsc_block_vector.h.

◆ const_reference [2/2]

Definition at line 81 of file petsc_block_vector.h.

◆ size_type [2/3]

Definition at line 82 of file petsc_block_vector.h.

◆ iterator [2/2]

Definition at line 83 of file petsc_block_vector.h.

◆ const_iterator [2/2]

Definition at line 84 of file petsc_block_vector.h.

◆ size_type [3/3]

Declare type for container size.

Definition at line 55 of file petsc_full_matrix.h.

Function Documentation

◆ BlockSparseMatrix()

PETScWrappers::MPI::BlockSparseMatrix::BlockSparseMatrix ( )
default

Constructor; initializes the matrix to be empty, without any structure, i.e. the matrix is not usable at all. This constructor is therefore only useful for matrices which are members of a class. All other matrices should be created at a point in the data flow where all necessary information is available.

You have to initialize the matrix before usage with reinit(BlockSparsityPattern). The number of blocks per row and column are then determined by that function.

◆ ~BlockSparseMatrix()

PETScWrappers::MPI::BlockSparseMatrix::~BlockSparseMatrix ( )
overridedefault

Destructor.

◆ operator=() [1/4]

BlockSparseMatrix & BlockSparseMatrix< number >::operator= ( const BlockSparseMatrix m)

Pseudo copy operator only copying empty objects. The sizes of the block matrices need to be the same.

Definition at line 27 of file petsc_parallel_block_sparse_matrix.cc.

◆ operator=() [2/4]

BlockSparseMatrix & BlockSparseMatrix< number >::operator= ( const double  d)
inline

This operator assigns a scalar to a matrix. Since this does usually not make much sense (should we set all matrix entries to this value? Only the nonzero entries of the sparsity pattern?), this operation is only allowed if the actual value to be assigned is zero. This operator only exists to allow for the obvious notation matrix=0, which sets all elements of the matrix to zero, but keep the sparsity pattern previously used.

Definition at line 275 of file petsc_block_sparse_matrix.h.

◆ reinit() [1/10]

void PETScWrappers::MPI::BlockSparseMatrix::reinit ( const size_type  n_block_rows,
const size_type  n_block_columns 
)

Resize the matrix, by setting the number of block rows and columns. This deletes all blocks and replaces them with uninitialized ones, i.e. ones for which also the sizes are not yet set. You have to do that by calling the reinit functions of the blocks themselves. Do not forget to call collect_sizes() after that on this object.

The reason that you have to set sizes of the blocks yourself is that the sizes may be varying, the maximum number of elements per row may be varying, etc. It is simpler not to reproduce the interface of the SparsityPattern class here but rather let the user call whatever function they desire.

◆ reinit() [2/10]

void BlockSparseMatrix< number >::reinit ( const std::vector< IndexSet > &  rows,
const std::vector< IndexSet > &  cols,
const BlockDynamicSparsityPattern bdsp,
const MPI_Comm com 
)

Efficiently reinit the block matrix for a parallel computation. Only the BlockSparsityPattern of the Simple type can efficiently store large sparsity patterns in parallel, so this is the only supported argument. The IndexSets describe the locally owned range of DoFs for each block. Note that the IndexSets needs to be ascending and 1:1. For a symmetric structure hand in the same vector for the first two arguments.

Definition at line 60 of file petsc_parallel_block_sparse_matrix.cc.

◆ reinit() [3/10]

void BlockSparseMatrix< number >::reinit ( const std::vector< IndexSet > &  sizes,
const BlockDynamicSparsityPattern bdsp,
const MPI_Comm com 
)

Same as above but for a symmetric structure only.

Definition at line 99 of file petsc_parallel_block_sparse_matrix.cc.

◆ vmult() [1/4]

void BlockSparseMatrix< number >::vmult ( BlockVector dst,
const BlockVector src 
) const
inline

Matrix-vector multiplication: let \(dst = M*src\) with \(M\) being this matrix.

Definition at line 289 of file petsc_block_sparse_matrix.h.

◆ vmult() [2/4]

void BlockSparseMatrix< number >::vmult ( BlockVector dst,
const Vector src 
) const
inline

Matrix-vector multiplication. Just like the previous function, but only applicable if the matrix has only one block column.

Definition at line 297 of file petsc_block_sparse_matrix.h.

◆ vmult() [3/4]

void BlockSparseMatrix< number >::vmult ( Vector dst,
const BlockVector src 
) const
inline

Matrix-vector multiplication. Just like the previous function, but only applicable if the matrix has only one block row.

Definition at line 305 of file petsc_block_sparse_matrix.h.

◆ vmult() [4/4]

void BlockSparseMatrix< number >::vmult ( Vector dst,
const Vector src 
) const
inline

Matrix-vector multiplication. Just like the previous function, but only applicable if the matrix has only one block.

Definition at line 313 of file petsc_block_sparse_matrix.h.

◆ Tvmult() [1/4]

void BlockSparseMatrix< number >::Tvmult ( BlockVector dst,
const BlockVector src 
) const
inline

Matrix-vector multiplication: let \(dst = M^T*src\) with \(M\) being this matrix. This function does the same as vmult() but takes the transposed matrix.

Definition at line 320 of file petsc_block_sparse_matrix.h.

◆ Tvmult() [2/4]

void BlockSparseMatrix< number >::Tvmult ( BlockVector dst,
const Vector src 
) const
inline

Matrix-vector multiplication. Just like the previous function, but only applicable if the matrix has only one block row.

Definition at line 328 of file petsc_block_sparse_matrix.h.

◆ Tvmult() [3/4]

void BlockSparseMatrix< number >::Tvmult ( Vector dst,
const BlockVector src 
) const
inline

Matrix-vector multiplication. Just like the previous function, but only applicable if the matrix has only one block column.

Definition at line 336 of file petsc_block_sparse_matrix.h.

◆ Tvmult() [4/4]

void BlockSparseMatrix< number >::Tvmult ( Vector dst,
const Vector src 
) const
inline

Matrix-vector multiplication. Just like the previous function, but only applicable if the matrix has only one block.

Definition at line 344 of file petsc_block_sparse_matrix.h.

◆ collect_sizes()

void BlockSparseMatrix< number >::collect_sizes ( )

This function collects the sizes of the sub-objects and stores them in internal arrays, in order to be able to relay global indices into the matrix to indices into the subobjects. You must call this function each time after you have changed the size of the sub- objects.

Definition at line 109 of file petsc_parallel_block_sparse_matrix.cc.

◆ locally_owned_domain_indices()

std::vector< IndexSet > BlockSparseMatrix< number >::locally_owned_domain_indices ( ) const

Return the partitioning of the domain space of this matrix, i.e., the partitioning of the vectors this matrix has to be multiplied with.

Definition at line 115 of file petsc_parallel_block_sparse_matrix.cc.

◆ locally_owned_range_indices()

std::vector< IndexSet > BlockSparseMatrix< number >::locally_owned_range_indices ( ) const

Return the partitioning of the range space of this matrix, i.e., the partitioning of the vectors that are result from matrix-vector products.

Definition at line 126 of file petsc_parallel_block_sparse_matrix.cc.

◆ get_mpi_communicator() [1/3]

const MPI_Comm & BlockSparseMatrix< number >::get_mpi_communicator ( ) const

Return a reference to the MPI communicator object in use with this matrix.

Definition at line 137 of file petsc_parallel_block_sparse_matrix.cc.

◆ BlockVector() [1/6]

PETScWrappers::MPI::BlockVector::BlockVector ( )
default

Default constructor. Generate an empty vector without any blocks.

◆ BlockVector() [2/6]

BlockVector< Number >::BlockVector ( const unsigned int  n_blocks,
const MPI_Comm communicator,
const size_type  block_size,
const size_type  local_size 
)
inlineexplicit

Constructor. Generate a block vector with n_blocks blocks, each of which is a parallel vector across communicator with block_size elements of which local_size elements are stored on the present process.

Definition at line 293 of file petsc_block_vector.h.

◆ BlockVector() [3/6]

BlockVector< Number >::BlockVector ( const BlockVector V)
inline

Copy constructor. Set all the properties of the parallel vector to those of the given argument and copy the elements.

Definition at line 312 of file petsc_block_vector.h.

◆ BlockVector() [4/6]

BlockVector< Number >::BlockVector ( const std::vector< size_type > &  block_sizes,
const MPI_Comm communicator,
const std::vector< size_type > &  local_elements 
)
inline

Constructor. Set the number of blocks to block_sizes.size() and initialize each block with block_sizes[i] zero elements. The individual blocks are distributed across the given communicator, and each store local_elements[i] elements on the present process.

Definition at line 303 of file petsc_block_vector.h.

◆ BlockVector() [5/6]

BlockVector< Number >::BlockVector ( const std::vector< IndexSet > &  parallel_partitioning,
const MPI_Comm communicator = MPI_COMM_WORLD 
)
inlineexplicit

Create a BlockVector with parallel_partitioning.size() blocks, each initialized with the given IndexSet.

Definition at line 322 of file petsc_block_vector.h.

◆ BlockVector() [6/6]

BlockVector< Number >::BlockVector ( const std::vector< IndexSet > &  parallel_partitioning,
const std::vector< IndexSet > &  ghost_indices,
const MPI_Comm communicator 
)
inline

Same as above, but include ghost elements

Definition at line 329 of file petsc_block_vector.h.

◆ ~BlockVector()

PETScWrappers::MPI::BlockVector::~BlockVector ( )
overridedefault

Destructor. Clears memory

◆ operator=() [3/4]

BlockVector & BlockVector< Number >::operator= ( const value_type  s)
inline

Copy operator: fill all components of the vector that are locally stored with the given scalar value.

Definition at line 338 of file petsc_block_vector.h.

◆ operator=() [4/4]

BlockVector & BlockVector< Number >::operator= ( const BlockVector V)
inline

Copy operator for arguments of the same type.

Definition at line 345 of file petsc_block_vector.h.

◆ reinit() [4/10]

void BlockVector< Number >::reinit ( const unsigned int  n_blocks,
const MPI_Comm communicator,
const size_type  block_size,
const size_type  local_size,
const bool  omit_zeroing_entries = false 
)
inline

Reinitialize the BlockVector to contain n_blocks of size block_size, each of which stores local_size elements locally. The communicator argument denotes which MPI channel each of these blocks shall communicate.

If omit_zeroing_entries==false, the vector is filled with zeros.

Definition at line 366 of file petsc_block_vector.h.

◆ reinit() [5/10]

void BlockVector< Number >::reinit ( const std::vector< size_type > &  block_sizes,
const MPI_Comm communicator,
const std::vector< size_type > &  local_sizes,
const bool  omit_zeroing_entries = false 
)
inline

Reinitialize the BlockVector such that it contains block_sizes.size() blocks. Each block is reinitialized to dimension block_sizes[i]. Each of them stores local_sizes[i] elements on the present process.

If the number of blocks is the same as before this function was called, all vectors remain the same and reinit() is called for each vector.

If omit_zeroing_entries==false, the vector is filled with zeros.

Note that you must call this (or the other reinit() functions) function, rather than calling the reinit() functions of an individual block, to allow the block vector to update its caches of vector sizes. If you call reinit() of one of the blocks, then subsequent actions on this object may yield unpredictable results since they may be routed to the wrong block.

Definition at line 381 of file petsc_block_vector.h.

◆ reinit() [6/10]

void BlockVector< Number >::reinit ( const BlockVector V,
const bool  omit_zeroing_entries = false 
)
inline

Change the dimension to that of the vector V. The same applies as for the other reinit() function.

The elements of V are not copied, i.e. this function is the same as calling reinit (V.size(), omit_zeroing_entries).

Note that you must call this (or the other reinit() functions) function, rather than calling the reinit() functions of an individual block, to allow the block vector to update its caches of vector sizes. If you call reinit() on one of the blocks, then subsequent actions on this object may yield unpredictable results since they may be routed to the wrong block.

Definition at line 399 of file petsc_block_vector.h.

◆ reinit() [7/10]

void BlockVector< Number >::reinit ( const std::vector< IndexSet > &  parallel_partitioning,
const MPI_Comm communicator 
)
inline

Reinitialize the BlockVector using IndexSets. See the constructor with the same arguments for details.

Definition at line 410 of file petsc_block_vector.h.

◆ reinit() [8/10]

void BlockVector< Number >::reinit ( const std::vector< IndexSet > &  parallel_partitioning,
const std::vector< IndexSet > &  ghost_entries,
const MPI_Comm communicator 
)
inline

Same as above but include ghost entries.

Definition at line 426 of file petsc_block_vector.h.

◆ reinit() [9/10]

void BlockVector< Number >::reinit ( const unsigned int  num_blocks)

Change the number of blocks to num_blocks. The individual blocks will get initialized with zero size, so it is assumed that the user resizes the individual blocks by herself in an appropriate way, and calls collect_sizes afterwards.

Definition at line 29 of file petsc_parallel_block_vector.cc.

◆ has_ghost_elements()

bool BlockVector< Number >::has_ghost_elements ( ) const
inline

Return if this vector is a ghosted vector (and thus read-only).

Definition at line 453 of file petsc_block_vector.h.

◆ get_mpi_communicator() [2/3]

const MPI_Comm & BlockVector< Number >::get_mpi_communicator ( ) const
inline

Return a reference to the MPI communicator object in use with this vector.

Definition at line 447 of file petsc_block_vector.h.

◆ swap() [1/3]

void BlockVector< Number >::swap ( BlockVector v)
inline

Swap the contents of this vector and the other vector v. One could do this operation with a temporary variable and copying over the data elements, but this function is significantly more efficient since it only swaps the pointers to the data of the two vectors and therefore does not need to allocate temporary storage and move data around.

Limitation: right now this function only works if both vectors have the same number of blocks. If needed, the numbers of blocks should be exchanged, too.

This function is analogous to the swap() function of all C++ standard containers. Also, there is a global function swap(u,v) that simply calls u.swap(v), again in analogy to standard functions.

Definition at line 465 of file petsc_block_vector.h.

◆ print()

void BlockVector< Number >::print ( std::ostream &  out,
const unsigned int  precision = 3,
const bool  scientific = true,
const bool  across = true 
) const
inline

Print to a stream.

Definition at line 475 of file petsc_block_vector.h.

◆ swap() [2/3]

void swap ( BlockVector u,
BlockVector v 
)
inline

Global function which overloads the default implementation of the C++ standard library which uses a temporary object. The function simply exchanges the data of the two vectors.

Author
Wolfgang Bangerth, 2000

Definition at line 501 of file petsc_block_vector.h.

◆ reinit_range_vector()

template<typename Matrix >
static void internal::LinearOperatorImplementation::ReinitHelper< PETScWrappers::MPI::BlockVector >::reinit_range_vector ( const Matrix &  matrix,
PETScWrappers::MPI::BlockVector v,
bool   
)
inlinestatic

Definition at line 527 of file petsc_block_vector.h.

◆ reinit_domain_vector()

template<typename Matrix >
static void internal::LinearOperatorImplementation::ReinitHelper< PETScWrappers::MPI::BlockVector >::reinit_domain_vector ( const Matrix &  matrix,
PETScWrappers::MPI::BlockVector v,
bool   
)
inlinestatic

Definition at line 537 of file petsc_block_vector.h.

◆ FullMatrix() [1/2]

FullMatrix< number >::FullMatrix ( )

Default constructor. Create an empty matrix.

Definition at line 27 of file petsc_full_matrix.cc.

◆ FullMatrix() [2/2]

FullMatrix< number >::FullMatrix ( const size_type  m,
const size_type  n 
)

Create a full matrix of dimensions m times n.

Definition at line 33 of file petsc_full_matrix.cc.

◆ reinit() [10/10]

void FullMatrix< number >::reinit ( const size_type  m,
const size_type  n 
)

Throw away the present matrix and generate one that has the same properties as if it were created by the constructor of this class with the same argument list as the present function.

Definition at line 39 of file petsc_full_matrix.cc.

◆ get_mpi_communicator() [3/3]

const MPI_Comm & FullMatrix< number >::get_mpi_communicator ( ) const
overridevirtual

Return a reference to the MPI communicator object in use with this matrix. Since this is a sequential matrix, it returns the MPI_COMM_SELF communicator.

Implements PETScWrappers::MatrixBase.

Definition at line 61 of file petsc_full_matrix.cc.

◆ do_reinit()

void FullMatrix< number >::do_reinit ( const size_type  m,
const size_type  n 
)
private

Do the actual work for the respective reinit() function and the matching constructor, i.e. create a matrix. Getting rid of the previous matrix is left to the caller.

Definition at line 50 of file petsc_full_matrix.cc.

◆ swap() [3/3]

void swap ( BlockVector u,
BlockVector v 
)
related

Global function which overloads the default implementation of the C++ standard library which uses a temporary object. The function simply exchanges the data of the two vectors.

Author
Wolfgang Bangerth, 2000

Definition at line 501 of file petsc_block_vector.h.