Reference documentation for deal.II version 9.2.0
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Public Types | Public Member Functions | Protected Member Functions | Private Attributes | Related Functions | List of all members
PETScWrappers::MPI::Vector Class Reference

#include <deal.II/lac/petsc_vector.h>

Inheritance diagram for PETScWrappers::MPI::Vector:
[legend]

Public Types

using size_type = types::global_dof_index
 
- Public Types inherited from PETScWrappers::VectorBase
using value_type = PetscScalar
 
using real_type = PetscReal
 
using size_type = types::global_dof_index
 
using reference = internal::VectorReference
 
using const_reference = const internal::VectorReference
 

Public Member Functions

 Vector ()
 
 Vector (const MPI_Comm &communicator, const size_type n, const size_type local_size)
 
template<typename Number >
 Vector (const MPI_Comm &communicator, const ::Vector< Number > &v, const size_type local_size)
 
 Vector (const MPI_Comm &communicator, const VectorBase &v, const size_type local_size)
 
 Vector (const IndexSet &local, const IndexSet &ghost, const MPI_Comm &communicator)
 
 Vector (const IndexSet &local, const MPI_Comm &communicator)
 
 Vector (const Vector &v)
 
virtual void clear () override
 
Vectoroperator= (const Vector &v)
 
Vectoroperator= (const PetscScalar s)
 
template<typename number >
Vectoroperator= (const ::Vector< number > &v)
 
void reinit (const MPI_Comm &communicator, const size_type N, const size_type local_size, const bool omit_zeroing_entries=false)
 
void reinit (const Vector &v, const bool omit_zeroing_entries=false)
 
void reinit (const IndexSet &local, const IndexSet &ghost, const MPI_Comm &communicator)
 
void reinit (const IndexSet &local, const MPI_Comm &communicator)
 
const MPI_Commget_mpi_communicator () const override
 
void print (std::ostream &out, const unsigned int precision=3, const bool scientific=true, const bool across=true) const
 
bool all_zero () const
 
- Public Member Functions inherited from PETScWrappers::VectorBase
 VectorBase ()
 
 VectorBase (const VectorBase &v)
 
 VectorBase (const Vec &v)
 
VectorBaseoperator= (const VectorBase &)=delete
 
virtual ~VectorBase () override
 
void compress (const VectorOperation::values operation)
 
VectorBaseoperator= (const PetscScalar s)
 
bool operator== (const VectorBase &v) const
 
bool operator!= (const VectorBase &v) const
 
size_type size () const
 
size_type local_size () const
 
std::pair< size_type, size_typelocal_range () const
 
bool in_local_range (const size_type index) const
 
IndexSet locally_owned_elements () const
 
bool has_ghost_elements () const
 
void update_ghost_values () const
 
reference operator() (const size_type index)
 
PetscScalar operator() (const size_type index) const
 
reference operator[] (const size_type index)
 
PetscScalar operator[] (const size_type index) const
 
void set (const std::vector< size_type > &indices, const std::vector< PetscScalar > &values)
 
void extract_subvector_to (const std::vector< size_type > &indices, std::vector< PetscScalar > &values) const
 
template<typename ForwardIterator , typename OutputIterator >
void extract_subvector_to (const ForwardIterator indices_begin, const ForwardIterator indices_end, OutputIterator values_begin) const
 
void add (const std::vector< size_type > &indices, const std::vector< PetscScalar > &values)
 
void add (const std::vector< size_type > &indices, const ::Vector< PetscScalar > &values)
 
void add (const size_type n_elements, const size_type *indices, const PetscScalar *values)
 
PetscScalar operator* (const VectorBase &vec) const
 
real_type norm_sqr () const
 
PetscScalar mean_value () const
 
real_type l1_norm () const
 
real_type l2_norm () const
 
real_type lp_norm (const real_type p) const
 
real_type linfty_norm () const
 
PetscScalar add_and_dot (const PetscScalar a, const VectorBase &V, const VectorBase &W)
 
real_type min () const
 
real_type max () const
 
bool all_zero () const
 
bool is_non_negative () const
 
VectorBaseoperator*= (const PetscScalar factor)
 
VectorBaseoperator/= (const PetscScalar factor)
 
VectorBaseoperator+= (const VectorBase &V)
 
VectorBaseoperator-= (const VectorBase &V)
 
void add (const PetscScalar s)
 
void add (const PetscScalar a, const VectorBase &V)
 
void add (const PetscScalar a, const VectorBase &V, const PetscScalar b, const VectorBase &W)
 
void sadd (const PetscScalar s, const VectorBase &V)
 
void sadd (const PetscScalar s, const PetscScalar a, const VectorBase &V)
 
void scale (const VectorBase &scaling_factors)
 
void equ (const PetscScalar a, const VectorBase &V)
 
void write_ascii (const PetscViewerFormat format=PETSC_VIEWER_DEFAULT)
 
void print (std::ostream &out, const unsigned int precision=3, const bool scientific=true, const bool across=true) const
 
void swap (VectorBase &v)
 
 operator const Vec & () const
 
std::size_t memory_consumption () const
 
- Public Member Functions inherited from Subscriptor
 Subscriptor ()
 
 Subscriptor (const Subscriptor &)
 
 Subscriptor (Subscriptor &&) noexcept
 
virtual ~Subscriptor ()
 
Subscriptoroperator= (const Subscriptor &)
 
Subscriptoroperator= (Subscriptor &&) noexcept
 
void subscribe (std::atomic< bool > *const validity, const std::string &identifier="") const
 
void unsubscribe (std::atomic< bool > *const validity, const std::string &identifier="") const
 
unsigned int n_subscriptions () const
 
template<typename StreamType >
void list_subscribers (StreamType &stream) const
 
void list_subscribers () const
 
template<class Archive >
void serialize (Archive &ar, const unsigned int version)
 

Protected Member Functions

virtual void create_vector (const size_type n, const size_type local_size)
 
virtual void create_vector (const size_type n, const size_type local_size, const IndexSet &ghostnodes)
 
- Protected Member Functions inherited from PETScWrappers::VectorBase
void do_set_add_operation (const size_type n_elements, const size_type *indices, const PetscScalar *values, const bool add_values)
 

Private Attributes

MPI_Comm communicator
 

Related Functions

(Note that these are not member functions.)

void swap (Vector &u, Vector &v)
 

Additional Inherited Members

- Static Public Member Functions inherited from Subscriptor
static ::ExceptionBaseExcInUse (int arg1, std::string arg2, std::string arg3)
 
static ::ExceptionBaseExcNoSubscriber (std::string arg1, std::string arg2)
 
- Protected Attributes inherited from PETScWrappers::VectorBase
Vec vector
 
bool ghosted
 
IndexSet ghost_indices
 
VectorOperation::values last_action
 
bool obtained_ownership
 

Detailed Description

Implementation of a parallel vector class based on PETSC and using MPI communication to synchronize distributed operations. All the functionality is actually in the base class, except for the calls to generate a parallel vector. This is possible since PETSc only works on an abstract vector type and internally distributes to functions that do the actual work depending on the actual vector type (much like using virtual functions). Only the functions creating a vector of specific type differ, and are implemented in this particular class.

Parallel communication model

The parallel functionality of PETSc is built on top of the Message Passing Interface (MPI). MPI's communication model is built on collective communications: if one process wants something from another, that other process has to be willing to accept this communication. A process cannot query data from another process by calling a remote function, without that other process expecting such a transaction. The consequence is that most of the operations in the base class of this class have to be called collectively. For example, if you want to compute the l2 norm of a parallel vector, all processes across which this vector is shared have to call the l2_norm function. If you don't do this, but instead only call the l2_norm function on one process, then the following happens: This one process will call one of the collective MPI functions and wait for all the other processes to join in on this. Since the other processes don't call this function, you will either get a time-out on the first process, or, worse, by the time the next a call to a PETSc function generates an MPI message on the other processes, you will get a cryptic message that only a subset of processes attempted a communication. These bugs can be very hard to figure out, unless you are well-acquainted with the communication model of MPI, and know which functions may generate MPI messages.

One particular case, where an MPI message may be generated unexpectedly is discussed below.

Accessing individual elements of a vector

PETSc does allow read access to individual elements of a vector, but in the distributed case only to elements that are stored locally. We implement this through calls like d=vec(i). However, if you access an element outside the locally stored range, an exception is generated.

In contrast to read access, PETSc (and the respective deal.II wrapper classes) allow to write (or add) to individual elements of vectors, even if they are stored on a different process. You can do this writing, for example, vec(i)=d or vec(i)+=d, or similar operations. There is one catch, however, that may lead to very confusing error messages: PETSc requires application programs to call the compress() function when they switch from adding, to elements to writing to elements. The reasoning is that all processes might accumulate addition operations to elements, even if multiple processes write to the same elements. By the time we call compress() the next time, all these additions are executed. However, if one process adds to an element, and another overwrites to it, the order of execution would yield non-deterministic behavior if we don't make sure that a synchronization with compress() happens in between.

In order to make sure these calls to compress() happen at the appropriate time, the deal.II wrappers keep a state variable that store which is the presently allowed operation: additions or writes. If it encounters an operation of the opposite kind, it calls compress() and flips the state. This can sometimes lead to very confusing behavior, in code that may for example look like this:

...
// do some write operations on the vector
for (unsigned int i=0; i<vector.size(); ++i)
vector(i) = i;
// do some additions to vector elements, but only for some elements
for (unsigned int i=0; i<vector.size(); ++i)
if (some_condition(i) == true)
vector(i) += 1;
// do another collective operation
const double norm = vector.l2_norm();

This code can run into trouble: by the time we see the first addition operation, we need to flush the overwrite buffers for the vector, and the deal.II library will do so by calling compress(). However, it will only do so for all processes that actually do an addition – if the condition is never true for one of the processes, then this one will not get to the actual compress() call, whereas all the other ones do. This gets us into trouble, since all the other processes hang in the call to flush the write buffers, while the one other process advances to the call to compute the l2 norm. At this time, you will get an error that some operation was attempted by only a subset of processes. This behavior may seem surprising, unless you know that write/addition operations on single elements may trigger this behavior.

The problem described here may be avoided by placing additional calls to compress(), or making sure that all processes do the same type of operations at the same time, for example by placing zero additions if necessary.

See also
vectors with ghost elements
Author
Wolfgang Bangerth, 2004

Definition at line 158 of file petsc_vector.h.

Member Typedef Documentation

◆ size_type

Declare type for container size.

Definition at line 164 of file petsc_vector.h.

Constructor & Destructor Documentation

◆ Vector() [1/7]

Vector< Number >::Vector ( )

Default constructor. Initialize the vector as empty.

Definition at line 31 of file petsc_parallel_vector.cc.

◆ Vector() [2/7]

Vector< Number >::Vector ( const MPI_Comm communicator,
const size_type  n,
const size_type  local_size 
)
explicit

Constructor. Set dimension to n and initialize all elements with zero.

  • local_size denotes the size of the chunk that shall be stored on the present process.
  • communicator denotes the MPI communicator over which the different parts of the vector shall communicate

The constructor is made explicit to avoid accidents like this: v=0;. Presumably, the user wants to set every element of the vector to zero, but instead, what happens is this call: v=Vector<number>(0);, i.e. the vector is replaced by one of length zero.

Definition at line 42 of file petsc_parallel_vector.cc.

◆ Vector() [3/7]

template<typename Number >
PETScWrappers::MPI::Vector::Vector ( const MPI_Comm communicator,
const ::Vector< Number > &  v,
const size_type  local_size 
)
explicit

Copy-constructor from deal.II vectors. Sets the dimension to that of the given vector, and copies all elements.

  • local_size denotes the size of the chunk that shall be stored on the present process.
  • communicator denotes the MPI communicator over which the different parts of the vector shall communicate

◆ Vector() [4/7]

Vector< Number >::Vector ( const MPI_Comm communicator,
const VectorBase v,
const size_type  local_size 
)
explicit

Copy-constructor the values from a PETSc wrapper vector class.

  • local_size denotes the size of the chunk that shall be stored on the present process.
  • communicator denotes the MPI communicator over which the different parts of the vector shall communicate

Definition at line 52 of file petsc_parallel_vector.cc.

◆ Vector() [5/7]

Vector< Number >::Vector ( const IndexSet local,
const IndexSet ghost,
const MPI_Comm communicator 
)

Construct a new parallel ghosted PETSc vector from IndexSets.

Note that local must be ascending and 1:1, see IndexSet::is_ascending_and_one_to_one(). In particular, the DoFs in local need to be contiguous, meaning you can only create vectors from a DoFHandler with several finite element components if they are not reordered by component (use a PETScWrappers::BlockVector otherwise). The global size of the vector is determined by local.size(). The global indices in ghost are supplied as ghost indices so that they can be read locally.

Note that the ghost IndexSet may be empty and that any indices already contained in local are ignored during construction. That way, the ghost parameter can equal the set of locally relevant degrees of freedom, see step-32.

Note
This operation always creates a ghosted vector, which is considered read-only.
See also
vectors with ghost elements

Definition at line 72 of file petsc_parallel_vector.cc.

◆ Vector() [6/7]

Vector< Number >::Vector ( const IndexSet local,
const MPI_Comm communicator 
)
explicit

Construct a new parallel PETSc vector without ghost elements from an IndexSet.

Note that local must be ascending and 1:1, see IndexSet::is_ascending_and_one_to_one(). In particular, the DoFs in local need to be contiguous, meaning you can only create vectors from a DoFHandler with several finite element components if they are not reordered by component (use a PETScWrappers::BlockVector otherwise).

Definition at line 102 of file petsc_parallel_vector.cc.

◆ Vector() [7/7]

Vector< Number >::Vector ( const Vector v)

Copy constructor.

Definition at line 88 of file petsc_parallel_vector.cc.

Member Function Documentation

◆ clear()

void Vector< Number >::clear ( )
overridevirtual

Release all memory and return to a state just like after having called the default constructor.

Reimplemented from PETScWrappers::VectorBase.

Definition at line 150 of file petsc_parallel_vector.cc.

◆ operator=() [1/3]

Vector & Vector< Number >::operator= ( const Vector v)

Copy the given vector. Resize the present vector if necessary. Also take over the MPI communicator of v.

Definition at line 113 of file petsc_parallel_vector.cc.

◆ operator=() [2/3]

Vector& PETScWrappers::MPI::Vector::operator= ( const PetscScalar  s)

Set all components of the vector to the given number s. Simply pass this down to the base class, but we still need to declare this function to make the example given in the discussion about making the constructor explicit work.

◆ operator=() [3/3]

template<typename number >
Vector& PETScWrappers::MPI::Vector::operator= ( const ::Vector< number > &  v)

Copy the values of a deal.II vector (as opposed to those of the PETSc vector wrapper class) into this object.

Contrary to the case of sequential vectors, this operators requires that the present vector already has the correct size, since we need to have a partition and a communicator present which we otherwise can't get from the source vector.

◆ reinit() [1/4]

void Vector< Number >::reinit ( const MPI_Comm communicator,
const size_type  N,
const size_type  local_size,
const bool  omit_zeroing_entries = false 
)

Change the dimension of the vector to N. It is unspecified how resizing the vector affects the memory allocation of this object; i.e., it is not guaranteed that resizing it to a smaller size actually also reduces memory consumption, or if for efficiency the same amount of memory is used

local_size denotes how many of the N values shall be stored locally on the present process. for less data.

communicator denotes the MPI communicator henceforth to be used for this vector.

If omit_zeroing_entries is false, the vector is filled by zeros. Otherwise, the elements are left an unspecified state.

Definition at line 161 of file petsc_parallel_vector.cc.

◆ reinit() [2/4]

void Vector< Number >::reinit ( const Vector v,
const bool  omit_zeroing_entries = false 
)

Change the dimension to that of the vector v, and also take over the partitioning into local sizes as well as the MPI communicator. The same applies as for the other reinit function.

The elements of v are not copied, i.e. this function is the same as calling reinit(v.size(), v.local_size(), omit_zeroing_entries).

Definition at line 204 of file petsc_parallel_vector.cc.

◆ reinit() [3/4]

void Vector< Number >::reinit ( const IndexSet local,
const IndexSet ghost,
const MPI_Comm communicator 
)

Reinit as a vector with ghost elements. See the constructor with same signature for more details.

See also
vectors with ghost elements

Definition at line 222 of file petsc_parallel_vector.cc.

◆ reinit() [4/4]

void Vector< Number >::reinit ( const IndexSet local,
const MPI_Comm communicator 
)

Reinit as a vector without ghost elements. See constructor with same signature for more details.

See also
vectors with ghost elements

Definition at line 240 of file petsc_parallel_vector.cc.

◆ get_mpi_communicator()

const MPI_Comm& PETScWrappers::MPI::Vector::get_mpi_communicator ( ) const
overridevirtual

Return a reference to the MPI communicator object in use with this vector.

Reimplemented from PETScWrappers::VectorBase.

◆ print()

void Vector< Number >::print ( std::ostream &  out,
const unsigned int  precision = 3,
const bool  scientific = true,
const bool  across = true 
) const

Print to a stream. precision denotes the desired precision with which values shall be printed, scientific whether scientific notation shall be used. If across is true then the vector is printed in a line, while if false then the elements are printed on a separate line each.

Note
This function overloads the one in the base class to ensure that the right thing happens for parallel vectors that are distributed across processors.

Definition at line 355 of file petsc_parallel_vector.cc.

◆ all_zero()

bool Vector< Number >::all_zero ( ) const

Return whether the vector contains only elements with value zero. This is a collective operation. This function is expensive, because potentially all elements have to be checked.

Note
This function overloads the one in the base class to make this a collective operation.

Definition at line 340 of file petsc_parallel_vector.cc.

◆ create_vector() [1/2]

void Vector< Number >::create_vector ( const size_type  n,
const size_type  local_size 
)
protectedvirtual

Create a vector of length n. For this class, we create a parallel vector. n denotes the total size of the vector to be created. local_size denotes how many of these elements shall be stored locally.

Definition at line 254 of file petsc_parallel_vector.cc.

◆ create_vector() [2/2]

void Vector< Number >::create_vector ( const size_type  n,
const size_type  local_size,
const IndexSet ghostnodes 
)
protectedvirtual

Create a vector of global length n, local size local_size and with the specified ghost indices. Note that you need to call update_ghost_values() before accessing those.

Definition at line 270 of file petsc_parallel_vector.cc.

Friends And Related Function Documentation

◆ swap()

void swap ( Vector u,
Vector v 
)
related

Global function swap which overloads the default implementation of the C++ standard library which uses a temporary object. The function simply exchanges the data of the two vectors.

Author
Wolfgang Bangerth, 2004

Definition at line 438 of file petsc_vector.h.

Member Data Documentation

◆ communicator

MPI_Comm PETScWrappers::MPI::Vector::communicator
private

Copy of the communicator object to be used for this parallel vector.

Definition at line 422 of file petsc_vector.h.


The documentation for this class was generated from the following files:
LocalIntegrators::Divergence::norm
double norm(const FEValuesBase< dim > &fe, const ArrayView< const std::vector< Tensor< 1, dim >>> &Du)
Definition: divergence.h:548
PETScWrappers::MPI::Vector
Definition: petsc_vector.h:158
PETScWrappers::VectorBase::vector
Vec vector
Definition: petsc_vector_base.h:758