Reference documentation for deal.II version 9.1.1
\(\newcommand{\dealcoloneq}{\mathrel{\vcenter{:}}=}\)
Static Public Member Functions | Private Member Functions | Private Attributes | Friends | List of all members
LinearAlgebra::distributed::Vector< Number, MemorySpace > Class Template Reference

#include <deal.II/lac/la_parallel_vector.h>

Inheritance diagram for LinearAlgebra::distributed::Vector< Number, MemorySpace >:
[legend]

Public Member Functions

1: Basic Object-handling
 Vector ()
 
 Vector (const Vector< Number, MemorySpace > &in_vector)
 
 Vector (const size_type size)
 
 Vector (const IndexSet &local_range, const IndexSet &ghost_indices, const MPI_Comm communicator)
 
 Vector (const IndexSet &local_range, const MPI_Comm communicator)
 
 Vector (const std::shared_ptr< const Utilities::MPI::Partitioner > &partitioner)
 
virtual ~Vector () override
 
void reinit (const size_type size, const bool omit_zeroing_entries=false)
 
template<typename Number2 >
void reinit (const Vector< Number2, MemorySpace > &in_vector, const bool omit_zeroing_entries=false)
 
void reinit (const IndexSet &local_range, const IndexSet &ghost_indices, const MPI_Comm communicator)
 
void reinit (const IndexSet &local_range, const MPI_Comm communicator)
 
void reinit (const std::shared_ptr< const Utilities::MPI::Partitioner > &partitioner)
 
void swap (Vector< Number, MemorySpace > &v)
 
Vector< Number, MemorySpace > & operator= (const Vector< Number, MemorySpace > &in_vector)
 
template<typename Number2 >
Vector< Number, MemorySpace > & operator= (const Vector< Number2, MemorySpace > &in_vector)
 
Vector< Number, MemorySpace > & operator= (const PETScWrappers::MPI::Vector &petsc_vec)
 
Vector< Number, MemorySpace > & operator= (const TrilinosWrappers::MPI::Vector &trilinos_vec)
 
2: Parallel data exchange
virtual void compress (::VectorOperation::values operation) override
 
void update_ghost_values () const
 
void compress_start (const unsigned int communication_channel=0, ::VectorOperation::values operation=VectorOperation::add)
 
void compress_finish (::VectorOperation::values operation)
 
void update_ghost_values_start (const unsigned int communication_channel=0) const
 
void update_ghost_values_finish () const
 
void zero_out_ghosts () const
 
bool has_ghost_elements () const
 
template<typename Number2 >
void copy_locally_owned_data_from (const Vector< Number2, MemorySpace > &src)
 
3: Implementation of VectorSpaceVector
virtual void reinit (const VectorSpaceVector< Number > &V, const bool omit_zeroing_entries=false) override
 
virtual Vector< Number, MemorySpace > & operator*= (const Number factor) override
 
virtual Vector< Number, MemorySpace > & operator/= (const Number factor) override
 
virtual Vector< Number, MemorySpace > & operator+= (const VectorSpaceVector< Number > &V) override
 
virtual Vector< Number, MemorySpace > & operator-= (const VectorSpaceVector< Number > &V) override
 
virtual void import (const LinearAlgebra::ReadWriteVector< Number > &V, VectorOperation::values operation, std::shared_ptr< const CommunicationPatternBase > communication_pattern=std::shared_ptr< const CommunicationPatternBase >()) override
 
virtual Number operator* (const VectorSpaceVector< Number > &V) const override
 
virtual void add (const Number a) override
 
virtual void add (const Number a, const VectorSpaceVector< Number > &V) override
 
virtual void add (const Number a, const VectorSpaceVector< Number > &V, const Number b, const VectorSpaceVector< Number > &W) override
 
virtual void add (const std::vector< size_type > &indices, const std::vector< Number > &values)
 
virtual void sadd (const Number s, const Number a, const VectorSpaceVector< Number > &V) override
 
virtual void scale (const VectorSpaceVector< Number > &scaling_factors) override
 
virtual void equ (const Number a, const VectorSpaceVector< Number > &V) override
 
virtual real_type l1_norm () const override
 
virtual real_type l2_norm () const override
 
real_type norm_sqr () const
 
virtual real_type linfty_norm () const override
 
virtual Number add_and_dot (const Number a, const VectorSpaceVector< Number > &V, const VectorSpaceVector< Number > &W) override
 
virtual size_type size () const override
 
virtual ::IndexSet locally_owned_elements () const override
 
virtual void print (std::ostream &out, const unsigned int precision=3, const bool scientific=true, const bool across=true) const override
 
virtual std::size_t memory_consumption () const override
 
4: Other vector operations not included in VectorSpaceVector
virtual Vector< Number, MemorySpace > & operator= (const Number s) override
 
template<typename OtherNumber >
void add (const std::vector< size_type > &indices, const ::Vector< OtherNumber > &values)
 
template<typename OtherNumber >
void add (const size_type n_elements, const size_type *indices, const OtherNumber *values)
 
void sadd (const Number s, const Vector< Number, MemorySpace > &V)
 
void sadd (const Number s, const Number a, const Vector< Number, MemorySpace > &V, const Number b, const Vector< Number, MemorySpace > &W)
 
void equ (const Number a, const Vector< Number, MemorySpace > &u, const Number b, const Vector< Number, MemorySpace > &v)
 
5: Entry access and local data representation
size_type local_size () const
 
std::pair< size_type, size_type > local_range () const
 
bool in_local_range (const size_type global_index) const
 
size_type n_ghost_entries () const
 
const IndexSetghost_elements () const
 
bool is_ghost_entry (const types::global_dof_index global_index) const
 
iterator begin ()
 
const_iterator begin () const
 
iterator end ()
 
const_iterator end () const
 
Number operator() (const size_type global_index) const
 
Number & operator() (const size_type global_index)
 
Number operator[] (const size_type global_index) const
 
Number & operator[] (const size_type global_index)
 
Number local_element (const size_type local_index) const
 
Number & local_element (const size_type local_index)
 
Number * get_values () const
 
template<typename OtherNumber >
void extract_subvector_to (const std::vector< size_type > &indices, std::vector< OtherNumber > &values) const
 
template<typename ForwardIterator , typename OutputIterator >
void extract_subvector_to (ForwardIterator indices_begin, const ForwardIterator indices_end, OutputIterator values_begin) const
 
virtual bool all_zero () const override
 
virtual Number mean_value () const override
 
real_type lp_norm (const real_type p) const
 
6: Mixed stuff
const MPI_Comm & get_mpi_communicator () const
 
const std::shared_ptr< const Utilities::MPI::Partitioner > & get_partitioner () const
 
bool partitioners_are_compatible (const Utilities::MPI::Partitioner &part) const
 
bool partitioners_are_globally_compatible (const Utilities::MPI::Partitioner &part) const
 
void set_ghost_state (const bool ghosted) const
 
- Public Member Functions inherited from LinearAlgebra::VectorSpaceVector< Number >
virtual void compress (VectorOperation::values)
 
virtual ~VectorSpaceVector ()=default
 
- Public Member Functions inherited from Subscriptor
 Subscriptor ()
 
 Subscriptor (const Subscriptor &)
 
 Subscriptor (Subscriptor &&) noexcept
 
virtual ~Subscriptor ()
 
Subscriptoroperator= (const Subscriptor &)
 
Subscriptoroperator= (Subscriptor &&) noexcept
 
void subscribe (std::atomic< bool > *const validity, const std::string &identifier="") const
 
void unsubscribe (std::atomic< bool > *const validity, const std::string &identifier="") const
 
unsigned int n_subscriptions () const
 
template<typename StreamType >
void list_subscribers (StreamType &stream) const
 
void list_subscribers () const
 
template<class Archive >
void serialize (Archive &ar, const unsigned int version)
 

Static Public Member Functions

static ::ExceptionBaseExcVectorTypeNotCompatible ()
 
static ::ExceptionBaseExcNotAllowedForCuda ()
 
static ::ExceptionBaseExcNonMatchingElements (Number arg1, Number arg2, unsigned int arg3)
 
static ::ExceptionBaseExcAccessToNonLocalElement (size_type arg1, size_type arg2, size_type arg3, size_type arg4)
 
- Static Public Member Functions inherited from Subscriptor
static ::ExceptionBaseExcInUse (int arg1, std::string arg2, std::string arg3)
 
static ::ExceptionBaseExcNoSubscriber (std::string arg1, std::string arg2)
 

Private Member Functions

void add_local (const Number a, const VectorSpaceVector< Number > &V)
 
void sadd_local (const Number s, const Number a, const VectorSpaceVector< Number > &V)
 
template<typename Number2 >
Number inner_product_local (const Vector< Number2, MemorySpace > &V) const
 
real_type norm_sqr_local () const
 
Number mean_value_local () const
 
real_type l1_norm_local () const
 
real_type lp_norm_local (const real_type p) const
 
real_type linfty_norm_local () const
 
Number add_and_dot_local (const Number a, const Vector< Number, MemorySpace > &V, const Vector< Number, MemorySpace > &W)
 
void clear_mpi_requests ()
 
void resize_val (const size_type new_allocated_size)
 

Private Attributes

std::shared_ptr< const Utilities::MPI::Partitionerpartitioner
 
size_type allocated_size
 
mutable ::MemorySpace::MemorySpaceData< Number, MemorySpace > data
 
std::shared_ptr<::parallel::internal::TBBPartitioner > thread_loop_partitioner
 
mutable ::MemorySpace::MemorySpaceData< Number, MemorySpace > import_data
 
bool vector_is_ghosted
 
std::vector< MPI_Request > compress_requests
 
std::vector< MPI_Request > update_ghost_values_requests
 
Threads::Mutex mutex
 

Friends

template<typename Number2 , typename MemorySpace2 >
class Vector
 
template<typename Number2 >
class BlockVector
 

Detailed Description

template<typename Number, typename MemorySpace = MemorySpace::Host>
class LinearAlgebra::distributed::Vector< Number, MemorySpace >

Implementation of a parallel vector class. The design of this class is similar to the standard Vector class in deal.II, with the exception that storage is distributed with MPI.

The vector is designed for the following scheme of parallel partitioning:

Functions related to parallel functionality:

This vector can take two different states with respect to ghost elements:

This vector uses the facilities of the class Vector<Number> for implementing the operations on the local range of the vector. In particular, it also inherits thread parallelism that splits most vector-vector operations into smaller chunks if the program uses multiple threads. This may or may not be desired when working also with MPI.

Limitations regarding the vector size

This vector class is based on two different number types for indexing. The so-called global index type encodes the overall size of the vector. Its type is types::global_dof_index. The largest possible value is 2^32-1 or approximately 4 billion in case 64 bit integers are disabled at configuration of deal.II (default case) or 2^64-1 or approximately 10^19 if 64 bit integers are enabled (see the glossary entry on When to use types::global_dof_index instead of unsigned int for further information).

The second relevant index type is the local index used within one MPI rank. As opposed to the global index, the implementation assumes 32-bit unsigned integers unconditionally. In other words, to actually use a vector with more than four billion entries, you need to use MPI with more than one rank (which in general is a safe assumption since four billion entries consume at least 16 GB of memory for floats or 32 GB of memory for doubles) and enable 64-bit indices. If more than 4 billion local elements are present, the implementation tries to detect that, which triggers an exception and aborts the code. Note, however, that the detection of overflow is tricky and the detection mechanism might fail in some circumstances. Therefore, it is strongly recommended to not rely on this class to automatically detect the unsupported case.

CUDA support

This vector class supports two different memory spaces: Host and CUDA. By default, the memory space is Host and all the data are allocated on the CPU. When the memory space is CUDA, all the data is allocated on the GPU. The operations on the vector are performed on the chosen memory space. * From the host, there are two methods to access the elements of the Vector when using the CUDA memory space:

The import method is a lot safer and will perform an MPI communication if necessary. Since an MPI communication may be performed, import needs to be called on all the processors.

Note
By default, all the ranks will try to access the device 0. This is fine is if you have one rank per node and one gpu per node. If you have multiple GPUs on one node, we need each process to access a different GPU. If each node has the same number of GPUs, this can be done as follows: int n_devices = 0; cudaGetDeviceCount(&n_devices); int device_id = my_rank % n_devices; cudaSetDevice(device_id);
See also
CUDAWrappers
Author
Katharina Kormann, Martin Kronbichler, Bruno Turcksin 2010, 2011, 2016, 2018

Definition at line 223 of file la_parallel_vector.h.

Constructor & Destructor Documentation

◆ Vector() [1/6]

template<typename Number, typename MemorySpace = MemorySpace::Host>
LinearAlgebra::distributed::Vector< Number, MemorySpace >::Vector ( )

Empty constructor.

◆ Vector() [2/6]

template<typename Number, typename MemorySpace = MemorySpace::Host>
LinearAlgebra::distributed::Vector< Number, MemorySpace >::Vector ( const Vector< Number, MemorySpace > &  in_vector)

Copy constructor. Uses the parallel partitioning of in_vector.

◆ Vector() [3/6]

template<typename Number, typename MemorySpace = MemorySpace::Host>
LinearAlgebra::distributed::Vector< Number, MemorySpace >::Vector ( const size_type  size)

Construct a parallel vector of the given global size without any actual parallel distribution.

◆ Vector() [4/6]

template<typename Number, typename MemorySpace = MemorySpace::Host>
LinearAlgebra::distributed::Vector< Number, MemorySpace >::Vector ( const IndexSet local_range,
const IndexSet ghost_indices,
const MPI_Comm  communicator 
)

Construct a parallel vector. The local range is specified by locally_owned_set (note that this must be a contiguous interval, multiple intervals are not possible). The IndexSet ghost_indices specifies ghost indices, i.e., indices which one might need to read data from or accumulate data from. It is allowed that the set of ghost indices also contains the local range, but it does not need to.

This function involves global communication, so it should only be called once for a given layout. Use the constructor with Vector<Number> argument to create additional vectors with the same parallel layout.

See also
vectors with ghost elements

◆ Vector() [5/6]

template<typename Number, typename MemorySpace = MemorySpace::Host>
LinearAlgebra::distributed::Vector< Number, MemorySpace >::Vector ( const IndexSet local_range,
const MPI_Comm  communicator 
)

Same constructor as above but without any ghost indices.

◆ Vector() [6/6]

template<typename Number, typename MemorySpace = MemorySpace::Host>
LinearAlgebra::distributed::Vector< Number, MemorySpace >::Vector ( const std::shared_ptr< const Utilities::MPI::Partitioner > &  partitioner)

Create the vector based on the parallel partitioning described in partitioner. The input argument is a shared pointer, which store the partitioner data only once and share it between several vectors with the same layout.

◆ ~Vector()

template<typename Number, typename MemorySpace = MemorySpace::Host>
virtual LinearAlgebra::distributed::Vector< Number, MemorySpace >::~Vector ( )
overridevirtual

Destructor.

Member Function Documentation

◆ reinit() [1/6]

template<typename Number, typename MemorySpace = MemorySpace::Host>
void LinearAlgebra::distributed::Vector< Number, MemorySpace >::reinit ( const size_type  size,
const bool  omit_zeroing_entries = false 
)

Set the global size of the vector to size without any actual parallel distribution.

◆ reinit() [2/6]

template<typename Number, typename MemorySpace = MemorySpace::Host>
template<typename Number2 >
void LinearAlgebra::distributed::Vector< Number, MemorySpace >::reinit ( const Vector< Number2, MemorySpace > &  in_vector,
const bool  omit_zeroing_entries = false 
)

Uses the parallel layout of the input vector in_vector and allocates memory for this vector. Recommended initialization function when several vectors with the same layout should be created.

If the flag omit_zeroing_entries is set to false, the memory will be initialized with zero, otherwise the memory will be untouched (and the user must make sure to fill it with reasonable data before using it).

◆ reinit() [3/6]

template<typename Number, typename MemorySpace = MemorySpace::Host>
void LinearAlgebra::distributed::Vector< Number, MemorySpace >::reinit ( const IndexSet local_range,
const IndexSet ghost_indices,
const MPI_Comm  communicator 
)

Initialize the vector. The local range is specified by locally_owned_set (note that this must be a contiguous interval, multiple intervals are not possible). The IndexSet ghost_indices specifies ghost indices, i.e., indices which one might need to read data from or accumulate data from. It is allowed that the set of ghost indices also contains the local range, but it does not need to.

This function involves global communication, so it should only be called once for a given layout. Use the reinit function with Vector<Number> argument to create additional vectors with the same parallel layout.

See also
vectors with ghost elements

◆ reinit() [4/6]

template<typename Number, typename MemorySpace = MemorySpace::Host>
void LinearAlgebra::distributed::Vector< Number, MemorySpace >::reinit ( const IndexSet local_range,
const MPI_Comm  communicator 
)

Same as above, but without ghost entries.

◆ reinit() [5/6]

template<typename Number, typename MemorySpace = MemorySpace::Host>
void LinearAlgebra::distributed::Vector< Number, MemorySpace >::reinit ( const std::shared_ptr< const Utilities::MPI::Partitioner > &  partitioner)

Initialize the vector given to the parallel partitioning described in partitioner. The input argument is a shared pointer, which store the partitioner data only once and share it between several vectors with the same layout.

◆ swap()

template<typename Number, typename MemorySpace = MemorySpace::Host>
void LinearAlgebra::distributed::Vector< Number, MemorySpace >::swap ( Vector< Number, MemorySpace > &  v)

Swap the contents of this vector and the other vector v. One could do this operation with a temporary variable and copying over the data elements, but this function is significantly more efficient since it only swaps the pointers to the data of the two vectors and therefore does not need to allocate temporary storage and move data around.

This function is analogous to the swap function of all C++ standard containers. Also, there is a global function swap(u,v) that simply calls u.swap(v), again in analogy to standard functions.

This function is virtual in order to allow for derived classes to handle memory separately.

◆ operator=() [1/5]

template<typename Number, typename MemorySpace = MemorySpace::Host>
Vector<Number, MemorySpace>& LinearAlgebra::distributed::Vector< Number, MemorySpace >::operator= ( const Vector< Number, MemorySpace > &  in_vector)

Assigns the vector to the parallel partitioning of the input vector in_vector, and copies all the data.

If one of the input vector or the calling vector (to the left of the assignment operator) had ghost elements set before this operation, the calling vector will have ghost values set. Otherwise, it will be in write mode. If the input vector does not have any ghost elements at all, the vector will also update its ghost values in analogy to the respective setting the Trilinos and PETSc vectors.

◆ operator=() [2/5]

template<typename Number, typename MemorySpace = MemorySpace::Host>
template<typename Number2 >
Vector<Number, MemorySpace>& LinearAlgebra::distributed::Vector< Number, MemorySpace >::operator= ( const Vector< Number2, MemorySpace > &  in_vector)

Assigns the vector to the parallel partitioning of the input vector in_vector, and copies all the data.

If one of the input vector or the calling vector (to the left of the assignment operator) had ghost elements set before this operation, the calling vector will have ghost values set. Otherwise, it will be in write mode. If the input vector does not have any ghost elements at all, the vector will also update its ghost values in analogy to the respective setting the Trilinos and PETSc vectors.

◆ operator=() [3/5]

template<typename Number, typename MemorySpace = MemorySpace::Host>
Vector<Number, MemorySpace>& LinearAlgebra::distributed::Vector< Number, MemorySpace >::operator= ( const PETScWrappers::MPI::Vector< Number, MemorySpace > &  petsc_vec)

Copy the content of a PETSc vector into the calling vector. This function assumes that the vectors layouts have already been initialized to match.

This operator is only available if deal.II was configured with PETSc.

This function is deprecated. Use the interface through ReadWriteVector instead.

◆ operator=() [4/5]

template<typename Number, typename MemorySpace = MemorySpace::Host>
Vector<Number, MemorySpace>& LinearAlgebra::distributed::Vector< Number, MemorySpace >::operator= ( const TrilinosWrappers::MPI::Vector< Number, MemorySpace > &  trilinos_vec)

Copy the content of a Trilinos vector into the calling vector. This function assumes that the vectors layouts have already been initialized to match.

This operator is only available if deal.II was configured with Trilinos.

This function is deprecated. Use the interface through ReadWriteVector instead.

◆ compress()

template<typename Number, typename MemorySpace = MemorySpace::Host>
virtual void LinearAlgebra::distributed::Vector< Number, MemorySpace >::compress ( ::VectorOperation::values  operation)
overridevirtual

This function copies the data that has accumulated in the data buffer for ghost indices to the owning processor. For the meaning of the argument operation, see the entry on Compressing distributed vectors and matrices in the glossary.

There are four variants for this function. If called with argument VectorOperation::add adds all the data accumulated in ghost elements to the respective elements on the owning processor and clears the ghost array afterwards. If called with argument VectorOperation::insert, a set operation is performed. Since setting elements in a vector with ghost elements is ambiguous (as one can set both the element on the ghost site as well as the owning site), this operation makes the assumption that all data is set correctly on the owning processor. Upon call of compress(VectorOperation::insert), all ghost entries are thus simply zeroed out (using zero_ghost_values()). In debug mode, a check is performed for whether the data set is actually consistent between processors, i.e., whenever a non-zero ghost element is found, it is compared to the value on the owning processor and an exception is thrown if these elements do not agree. If called with VectorOperation::min or VectorOperation::max, the minimum or maximum on all elements across the processors is set.

Note
This vector class has a fixed set of ghost entries attached to the local representation. As a consequence, all ghost entries are assumed to be valid and will be exchanged unconditionally according to the given VectorOperation. Make sure to initialize all ghost entries with the neutral element of the given VectorOperation or touch all ghost entries. The neutral element is zero for VectorOperation::add and VectorOperation::insert, +inf for VectorOperation::min, and -inf for VectorOperation::max. If all values are initialized with values below zero and compress is called with VectorOperation::max two times subsequently, the maximal value after the second calculation will be zero.

◆ update_ghost_values()

template<typename Number, typename MemorySpace = MemorySpace::Host>
void LinearAlgebra::distributed::Vector< Number, MemorySpace >::update_ghost_values ( ) const

Fills the data field for ghost indices with the values stored in the respective positions of the owning processor. This function is needed before reading from ghosts. The function is const even though ghost data is changed. This is needed to allow functions with a const vector to perform the data exchange without creating temporaries.

After calling this method, write access to ghost elements of the vector is forbidden and an exception is thrown. Only read access to ghost elements is allowed in this state. Note that all subsequent operations on this vector, like global vector addition, etc., will also update the ghost values by a call to this method after the operation. However, global reduction operations like norms or the inner product will always ignore ghost elements in order to avoid counting the ghost data more than once. To allow writing to ghost elements again, call zero_out_ghosts().

See also
vectors with ghost elements

◆ compress_start()

template<typename Number, typename MemorySpace = MemorySpace::Host>
void LinearAlgebra::distributed::Vector< Number, MemorySpace >::compress_start ( const unsigned int  communication_channel = 0,
::VectorOperation::values  operation = VectorOperation::add 
)

Initiates communication for the compress() function with non- blocking communication. This function does not wait for the transfer to finish, in order to allow for other computations during the time it takes until all data arrives.

Before the data is actually exchanged, the function must be followed by a call to compress_finish().

In case this function is called for more than one vector before compress_finish() is invoked, it is mandatory to specify a unique communication channel to each such call, in order to avoid several messages with the same ID that will corrupt this operation.

◆ compress_finish()

template<typename Number, typename MemorySpace = MemorySpace::Host>
void LinearAlgebra::distributed::Vector< Number, MemorySpace >::compress_finish ( ::VectorOperation::values  operation)

For all requests that have been initiated in compress_start, wait for the communication to finish. Once it is finished, add or set the data (depending on the flag operation) to the respective positions in the owning processor, and clear the contents in the ghost data fields. The meaning of this argument is the same as in compress().

This function should be called exactly once per vector after calling compress_start, otherwise the result is undefined. In particular, it is not well-defined to call compress_start on the same vector again before compress_finished has been called. However, there is no warning to prevent this situation.

Must follow a call to the compress_start function.

◆ update_ghost_values_start()

template<typename Number, typename MemorySpace = MemorySpace::Host>
void LinearAlgebra::distributed::Vector< Number, MemorySpace >::update_ghost_values_start ( const unsigned int  communication_channel = 0) const

Initiates communication for the update_ghost_values() function with non-blocking communication. This function does not wait for the transfer to finish, in order to allow for other computations during the time it takes until all data arrives.

Before the data is actually exchanged, the function must be followed by a call to update_ghost_values_finish().

In case this function is called for more than one vector before update_ghost_values_finish() is invoked, it is mandatory to specify a unique communication channel to each such call, in order to avoid several messages with the same ID that will corrupt this operation.

◆ update_ghost_values_finish()

template<typename Number, typename MemorySpace = MemorySpace::Host>
void LinearAlgebra::distributed::Vector< Number, MemorySpace >::update_ghost_values_finish ( ) const

For all requests that have been started in update_ghost_values_start, wait for the communication to finish.

Must follow a call to the update_ghost_values_start function before reading data from ghost indices.

◆ zero_out_ghosts()

template<typename Number, typename MemorySpace = MemorySpace::Host>
void LinearAlgebra::distributed::Vector< Number, MemorySpace >::zero_out_ghosts ( ) const

This method zeros the entries on ghost dofs, but does not touch locally owned DoFs.

After calling this method, read access to ghost elements of the vector is forbidden and an exception is thrown. Only write access to ghost elements is allowed in this state.

◆ has_ghost_elements()

template<typename Number, typename MemorySpace = MemorySpace::Host>
bool LinearAlgebra::distributed::Vector< Number, MemorySpace >::has_ghost_elements ( ) const

Return whether the vector currently is in a state where ghost values can be read or not. This is the same functionality as other parallel vectors have. If this method returns false, this only means that read-access to ghost elements is prohibited whereas write access is still possible (to those entries specified as ghosts during initialization), not that there are no ghost elements at all.

See also
vectors with ghost elements

◆ copy_locally_owned_data_from()

template<typename Number, typename MemorySpace = MemorySpace::Host>
template<typename Number2 >
void LinearAlgebra::distributed::Vector< Number, MemorySpace >::copy_locally_owned_data_from ( const Vector< Number2, MemorySpace > &  src)

This method copies the data in the locally owned range from another distributed vector src into the calling vector. As opposed to operator= that also includes ghost entries, this operation ignores the ghost range. The only prerequisite is that the local range on the calling vector and the given vector src are the same on all processors. It is explicitly allowed that the two vectors have different ghost elements that might or might not be related to each other.

Since no data exchange is performed, make sure that neither src nor the calling vector have pending communications in order to obtain correct results.

◆ reinit() [6/6]

template<typename Number, typename MemorySpace = MemorySpace::Host>
virtual void LinearAlgebra::distributed::Vector< Number, MemorySpace >::reinit ( const VectorSpaceVector< Number > &  V,
const bool  omit_zeroing_entries = false 
)
overridevirtual

Change the dimension to that of the vector V. The elements of V are not copied.

Implements LinearAlgebra::VectorSpaceVector< Number >.

◆ operator*=()

template<typename Number, typename MemorySpace = MemorySpace::Host>
virtual Vector<Number, MemorySpace>& LinearAlgebra::distributed::Vector< Number, MemorySpace >::operator*= ( const Number  factor)
overridevirtual

Multiply the entire vector by a fixed factor.

Implements LinearAlgebra::VectorSpaceVector< Number >.

◆ operator/=()

template<typename Number, typename MemorySpace = MemorySpace::Host>
virtual Vector<Number, MemorySpace>& LinearAlgebra::distributed::Vector< Number, MemorySpace >::operator/= ( const Number  factor)
overridevirtual

Divide the entire vector by a fixed factor.

Implements LinearAlgebra::VectorSpaceVector< Number >.

◆ operator+=()

template<typename Number, typename MemorySpace = MemorySpace::Host>
virtual Vector<Number, MemorySpace>& LinearAlgebra::distributed::Vector< Number, MemorySpace >::operator+= ( const VectorSpaceVector< Number > &  V)
overridevirtual

Add the vector V to the present one.

Implements LinearAlgebra::VectorSpaceVector< Number >.

◆ operator-=()

template<typename Number, typename MemorySpace = MemorySpace::Host>
virtual Vector<Number, MemorySpace>& LinearAlgebra::distributed::Vector< Number, MemorySpace >::operator-= ( const VectorSpaceVector< Number > &  V)
overridevirtual

Subtract the vector V from the present one.

Implements LinearAlgebra::VectorSpaceVector< Number >.

◆ import()

template<typename Number, typename MemorySpace = MemorySpace::Host>
virtual void LinearAlgebra::distributed::Vector< Number, MemorySpace >::import ( const LinearAlgebra::ReadWriteVector< Number > &  V,
VectorOperation::values  operation,
std::shared_ptr< const CommunicationPatternBase communication_pattern = std::shared_ptr< const CommunicationPatternBase >() 
)
overridevirtual

Import all the elements present in the vector's IndexSet from the input vector V. VectorOperation::values operation is used to decide if the elements in V should be added to the current vector or replace the current elements. The last parameter can be used if the same communication pattern is used multiple times. This can be used to improve performance.

Note
If the MemorySpace is CUDA, the data in the ReadWriteVector will be moved to the device.

Implements LinearAlgebra::VectorSpaceVector< Number >.

◆ operator*()

template<typename Number, typename MemorySpace = MemorySpace::Host>
virtual Number LinearAlgebra::distributed::Vector< Number, MemorySpace >::operator* ( const VectorSpaceVector< Number > &  V) const
overridevirtual

Return the scalar product of two vectors.

Implements LinearAlgebra::VectorSpaceVector< Number >.

◆ add() [1/6]

template<typename Number, typename MemorySpace = MemorySpace::Host>
virtual void LinearAlgebra::distributed::Vector< Number, MemorySpace >::add ( const Number  a)
overridevirtual

Add a to all components. Note that a is a scalar not a vector.

Implements LinearAlgebra::VectorSpaceVector< Number >.

◆ add() [2/6]

template<typename Number, typename MemorySpace = MemorySpace::Host>
virtual void LinearAlgebra::distributed::Vector< Number, MemorySpace >::add ( const Number  a,
const VectorSpaceVector< Number > &  V 
)
overridevirtual

Simple addition of a multiple of a vector, i.e. *this += a*V.

Implements LinearAlgebra::VectorSpaceVector< Number >.

◆ add() [3/6]

template<typename Number, typename MemorySpace = MemorySpace::Host>
virtual void LinearAlgebra::distributed::Vector< Number, MemorySpace >::add ( const Number  a,
const VectorSpaceVector< Number > &  V,
const Number  b,
const VectorSpaceVector< Number > &  W 
)
overridevirtual

Multiple addition of scaled vectors, i.e. *this += a*V+b*W.

Implements LinearAlgebra::VectorSpaceVector< Number >.

◆ add() [4/6]

template<typename Number, typename MemorySpace = MemorySpace::Host>
virtual void LinearAlgebra::distributed::Vector< Number, MemorySpace >::add ( const std::vector< size_type > &  indices,
const std::vector< Number > &  values 
)
virtual

A collective add operation: This function adds a whole set of values stored in values to the vector components specified by indices.

◆ sadd() [1/3]

template<typename Number, typename MemorySpace = MemorySpace::Host>
virtual void LinearAlgebra::distributed::Vector< Number, MemorySpace >::sadd ( const Number  s,
const Number  a,
const VectorSpaceVector< Number > &  V 
)
overridevirtual

Scaling and simple addition of a multiple of a vector, i.e. *this = s*(*this)+a*V.

Implements LinearAlgebra::VectorSpaceVector< Number >.

◆ scale()

template<typename Number, typename MemorySpace = MemorySpace::Host>
virtual void LinearAlgebra::distributed::Vector< Number, MemorySpace >::scale ( const VectorSpaceVector< Number > &  scaling_factors)
overridevirtual

Scale each element of this vector by the corresponding element in the argument. This function is mostly meant to simulate multiplication (and immediate re-assignment) by a diagonal scaling matrix.

Implements LinearAlgebra::VectorSpaceVector< Number >.

◆ equ() [1/2]

template<typename Number, typename MemorySpace = MemorySpace::Host>
virtual void LinearAlgebra::distributed::Vector< Number, MemorySpace >::equ ( const Number  a,
const VectorSpaceVector< Number > &  V 
)
overridevirtual

Assignment *this = a*V.

Implements LinearAlgebra::VectorSpaceVector< Number >.

◆ l1_norm()

template<typename Number, typename MemorySpace = MemorySpace::Host>
virtual real_type LinearAlgebra::distributed::Vector< Number, MemorySpace >::l1_norm ( ) const
overridevirtual

Return the l1 norm of the vector (i.e., the sum of the absolute values of all entries among all processors).

Implements LinearAlgebra::VectorSpaceVector< Number >.

◆ l2_norm()

template<typename Number, typename MemorySpace = MemorySpace::Host>
virtual real_type LinearAlgebra::distributed::Vector< Number, MemorySpace >::l2_norm ( ) const
overridevirtual

Return the \(l_2\) norm of the vector (i.e., the square root of the sum of the square of all entries among all processors).

Implements LinearAlgebra::VectorSpaceVector< Number >.

◆ norm_sqr()

template<typename Number, typename MemorySpace = MemorySpace::Host>
real_type LinearAlgebra::distributed::Vector< Number, MemorySpace >::norm_sqr ( ) const

Return the square of the \(l_2\) norm of the vector.

◆ linfty_norm()

template<typename Number, typename MemorySpace = MemorySpace::Host>
virtual real_type LinearAlgebra::distributed::Vector< Number, MemorySpace >::linfty_norm ( ) const
overridevirtual

Return the maximum norm of the vector (i.e., the maximum absolute value among all entries and among all processors).

Implements LinearAlgebra::VectorSpaceVector< Number >.

◆ add_and_dot()

template<typename Number, typename MemorySpace = MemorySpace::Host>
virtual Number LinearAlgebra::distributed::Vector< Number, MemorySpace >::add_and_dot ( const Number  a,
const VectorSpaceVector< Number > &  V,
const VectorSpaceVector< Number > &  W 
)
overridevirtual

Perform a combined operation of a vector addition and a subsequent inner product, returning the value of the inner product. In other words, the result of this function is the same as if the user called

this->add(a, V);
return_value = *this * W;

The reason this function exists is that this operation involves less memory transfer than calling the two functions separately. This method only needs to load three vectors, this, V, W, whereas calling separate methods means to load the calling vector this twice. Since most vector operations are memory transfer limited, this reduces the time by 25% (or 50% if W equals this).

For complex-valued vectors, the scalar product in the second step is implemented as \(\left<v,w\right>=\sum_i v_i \bar{w_i}\).

Implements LinearAlgebra::VectorSpaceVector< Number >.

◆ size()

template<typename Number, typename MemorySpace = MemorySpace::Host>
virtual size_type LinearAlgebra::distributed::Vector< Number, MemorySpace >::size ( ) const
overridevirtual

Return the global size of the vector, equal to the sum of the number of locally owned indices among all processors.

Implements LinearAlgebra::VectorSpaceVector< Number >.

◆ locally_owned_elements()

template<typename Number, typename MemorySpace = MemorySpace::Host>
virtual ::IndexSet LinearAlgebra::distributed::Vector< Number, MemorySpace >::locally_owned_elements ( ) const
overridevirtual

Return an index set that describes which elements of this vector are owned by the current processor. As a consequence, the index sets returned on different processors if this is a distributed vector will form disjoint sets that add up to the complete index set. Obviously, if a vector is created on only one processor, then the result would satisfy

vec.locally_owned_elements() == complete_index_set(vec.size())

Implements LinearAlgebra::VectorSpaceVector< Number >.

◆ print()

template<typename Number, typename MemorySpace = MemorySpace::Host>
virtual void LinearAlgebra::distributed::Vector< Number, MemorySpace >::print ( std::ostream &  out,
const unsigned int  precision = 3,
const bool  scientific = true,
const bool  across = true 
) const
overridevirtual

Print the vector to the output stream out.

Implements LinearAlgebra::VectorSpaceVector< Number >.

◆ memory_consumption()

template<typename Number, typename MemorySpace = MemorySpace::Host>
virtual std::size_t LinearAlgebra::distributed::Vector< Number, MemorySpace >::memory_consumption ( ) const
overridevirtual

Return the memory consumption of this class in bytes.

Implements LinearAlgebra::VectorSpaceVector< Number >.

◆ operator=() [5/5]

template<typename Number, typename MemorySpace = MemorySpace::Host>
virtual Vector<Number, MemorySpace>& LinearAlgebra::distributed::Vector< Number, MemorySpace >::operator= ( const Number  s)
overridevirtual

Sets all elements of the vector to the scalar s. If the scalar is zero, also ghost elements are set to zero, otherwise they remain unchanged.

Implements LinearAlgebra::VectorSpaceVector< Number >.

◆ add() [5/6]

template<typename Number, typename MemorySpace = MemorySpace::Host>
template<typename OtherNumber >
void LinearAlgebra::distributed::Vector< Number, MemorySpace >::add ( const std::vector< size_type > &  indices,
const ::Vector< OtherNumber > &  values 
)

This is a collective add operation that adds a whole set of values stored in values to the vector components specified by indices.

◆ add() [6/6]

template<typename Number, typename MemorySpace = MemorySpace::Host>
template<typename OtherNumber >
void LinearAlgebra::distributed::Vector< Number, MemorySpace >::add ( const size_type  n_elements,
const size_type *  indices,
const OtherNumber *  values 
)

Take an address where n_elements are stored contiguously and add them into the vector.

◆ sadd() [2/3]

template<typename Number, typename MemorySpace = MemorySpace::Host>
void LinearAlgebra::distributed::Vector< Number, MemorySpace >::sadd ( const Number  s,
const Vector< Number, MemorySpace > &  V 
)

Scaling and simple vector addition, i.e. *this = s*(*this)+V.

◆ sadd() [3/3]

template<typename Number, typename MemorySpace = MemorySpace::Host>
void LinearAlgebra::distributed::Vector< Number, MemorySpace >::sadd ( const Number  s,
const Number  a,
const Vector< Number, MemorySpace > &  V,
const Number  b,
const Vector< Number, MemorySpace > &  W 
)

Scaling and multiple addition.

This function is deprecated.

◆ equ() [2/2]

template<typename Number, typename MemorySpace = MemorySpace::Host>
void LinearAlgebra::distributed::Vector< Number, MemorySpace >::equ ( const Number  a,
const Vector< Number, MemorySpace > &  u,
const Number  b,
const Vector< Number, MemorySpace > &  v 
)

Assignment *this = a*u + b*v.

This function is deprecated.

◆ local_size()

template<typename Number, typename MemorySpace = MemorySpace::Host>
size_type LinearAlgebra::distributed::Vector< Number, MemorySpace >::local_size ( ) const

Return the local size of the vector, i.e., the number of indices owned locally.

◆ local_range()

template<typename Number, typename MemorySpace = MemorySpace::Host>
std::pair<size_type, size_type> LinearAlgebra::distributed::Vector< Number, MemorySpace >::local_range ( ) const

Return the half-open interval that specifies the locally owned range of the vector. Note that local_size() == local_range().second - local_range().first.

This function is deprecated.

◆ in_local_range()

template<typename Number, typename MemorySpace = MemorySpace::Host>
bool LinearAlgebra::distributed::Vector< Number, MemorySpace >::in_local_range ( const size_type  global_index) const

Return true if the given global index is in the local range of this processor.

This function is deprecated.

◆ n_ghost_entries()

template<typename Number, typename MemorySpace = MemorySpace::Host>
size_type LinearAlgebra::distributed::Vector< Number, MemorySpace >::n_ghost_entries ( ) const

Return the number of ghost elements present on the vector.

This function is deprecated.

◆ ghost_elements()

template<typename Number, typename MemorySpace = MemorySpace::Host>
const IndexSet& LinearAlgebra::distributed::Vector< Number, MemorySpace >::ghost_elements ( ) const

Return an index set that describes which elements of this vector are not owned by the current processor but can be written into or read from locally (ghost elements).

This function is deprecated.

◆ is_ghost_entry()

template<typename Number, typename MemorySpace = MemorySpace::Host>
bool LinearAlgebra::distributed::Vector< Number, MemorySpace >::is_ghost_entry ( const types::global_dof_index  global_index) const

Return whether the given global index is a ghost index on the present processor. Returns false for indices that are owned locally and for indices not present at all.

This function is deprecated.

◆ begin() [1/2]

template<typename Number, typename MemorySpace = MemorySpace::Host>
iterator LinearAlgebra::distributed::Vector< Number, MemorySpace >::begin ( )

Make the Vector class a bit like the vector<> class of the C++ standard library by returning iterators to the start and end of the locally owned elements of this vector.

It holds that end() - begin() == local_size().

Note
For the CUDA memory space, the iterator points to memory on the device.

◆ begin() [2/2]

template<typename Number, typename MemorySpace = MemorySpace::Host>
const_iterator LinearAlgebra::distributed::Vector< Number, MemorySpace >::begin ( ) const

Return constant iterator to the start of the locally owned elements of the vector.

Note
For the CUDA memory space, the iterator points to memory on the device.

◆ end() [1/2]

template<typename Number, typename MemorySpace = MemorySpace::Host>
iterator LinearAlgebra::distributed::Vector< Number, MemorySpace >::end ( )

Return an iterator pointing to the element past the end of the array of locally owned entries.

Note
For the CUDA memory space, the iterator points to memory on the device.

◆ end() [2/2]

template<typename Number, typename MemorySpace = MemorySpace::Host>
const_iterator LinearAlgebra::distributed::Vector< Number, MemorySpace >::end ( ) const

Return a constant iterator pointing to the element past the end of the array of the locally owned entries.

Note
For the CUDA memory space, the iterator points to memory on the device.

◆ operator()() [1/2]

template<typename Number, typename MemorySpace = MemorySpace::Host>
Number LinearAlgebra::distributed::Vector< Number, MemorySpace >::operator() ( const size_type  global_index) const

Read access to the data in the position corresponding to global_index. The index must be either in the local range of the vector or be specified as a ghost index at construction.

Performance: O(1) for locally owned elements that represent a contiguous range and O(log(nranges)) for ghost elements (quite fast, but slower than local_element()).

◆ operator()() [2/2]

template<typename Number, typename MemorySpace = MemorySpace::Host>
Number& LinearAlgebra::distributed::Vector< Number, MemorySpace >::operator() ( const size_type  global_index)

Read and write access to the data in the position corresponding to global_index. The index must be either in the local range of the vector or be specified as a ghost index at construction.

Performance: O(1) for locally owned elements that represent a contiguous range and O(log(nranges)) for ghost elements (quite fast, but slower than local_element()).

◆ operator[]() [1/2]

template<typename Number, typename MemorySpace = MemorySpace::Host>
Number LinearAlgebra::distributed::Vector< Number, MemorySpace >::operator[] ( const size_type  global_index) const

Read access to the data in the position corresponding to global_index. The index must be either in the local range of the vector or be specified as a ghost index at construction.

This function does the same thing as operator().

◆ operator[]() [2/2]

template<typename Number, typename MemorySpace = MemorySpace::Host>
Number& LinearAlgebra::distributed::Vector< Number, MemorySpace >::operator[] ( const size_type  global_index)

Read and write access to the data in the position corresponding to global_index. The index must be either in the local range of the vector or be specified as a ghost index at construction.

This function does the same thing as operator().

◆ local_element() [1/2]

template<typename Number, typename MemorySpace = MemorySpace::Host>
Number LinearAlgebra::distributed::Vector< Number, MemorySpace >::local_element ( const size_type  local_index) const

Read access to the data field specified by local_index. Locally owned indices can be accessed with indices [0,local_size), and ghost indices with indices [local_size,local_size+ n_ghost_entries].

Performance: Direct array access (fast).

◆ local_element() [2/2]

template<typename Number, typename MemorySpace = MemorySpace::Host>
Number& LinearAlgebra::distributed::Vector< Number, MemorySpace >::local_element ( const size_type  local_index)

Read and write access to the data field specified by local_index. Locally owned indices can be accessed with indices [0,local_size), and ghost indices with indices [local_size,local_size+n_ghosts].

Performance: Direct array access (fast).

◆ get_values()

template<typename Number, typename MemorySpace = MemorySpace::Host>
Number* LinearAlgebra::distributed::Vector< Number, MemorySpace >::get_values ( ) const

Return the pointer to the underlying raw array.

Note
For the CUDA memory space, the pointer points to memory on the device.

◆ extract_subvector_to() [1/2]

template<typename Number, typename MemorySpace = MemorySpace::Host>
template<typename OtherNumber >
void LinearAlgebra::distributed::Vector< Number, MemorySpace >::extract_subvector_to ( const std::vector< size_type > &  indices,
std::vector< OtherNumber > &  values 
) const

Instead of getting individual elements of a vector via operator(), this function allows getting a whole set of elements at once. The indices of the elements to be read are stated in the first argument, the corresponding values are returned in the second.

If the current vector is called v, then this function is the equivalent to the code

for (unsigned int i=0; i<indices.size(); ++i)
values[i] = v[indices[i]];
Precondition
The sizes of the indices and values arrays must be identical.
Note
This function is not implemented for CUDA memory space.

◆ extract_subvector_to() [2/2]

template<typename Number, typename MemorySpace = MemorySpace::Host>
template<typename ForwardIterator , typename OutputIterator >
void LinearAlgebra::distributed::Vector< Number, MemorySpace >::extract_subvector_to ( ForwardIterator  indices_begin,
const ForwardIterator  indices_end,
OutputIterator  values_begin 
) const

Instead of getting individual elements of a vector via operator(), this function allows getting a whole set of elements at once. In contrast to the previous function, this function obtains the indices of the elements by dereferencing all elements of the iterator range provided by the first two arguments, and puts the vector values into memory locations obtained by dereferencing a range of iterators starting at the location pointed to by the third argument.

If the current vector is called v, then this function is the equivalent to the code

ForwardIterator indices_p = indices_begin;
OutputIterator values_p = values_begin;
while (indices_p != indices_end)
{
*values_p = v[*indices_p];
++indices_p;
++values_p;
}
Precondition
It must be possible to write into as many memory locations starting at values_begin as there are iterators between indices_begin and indices_end.

◆ all_zero()

template<typename Number, typename MemorySpace = MemorySpace::Host>
virtual bool LinearAlgebra::distributed::Vector< Number, MemorySpace >::all_zero ( ) const
overridevirtual

Return whether the vector contains only elements with value zero. This is a collective operation. This function is expensive, because potentially all elements have to be checked.

Implements LinearAlgebra::VectorSpaceVector< Number >.

◆ mean_value()

template<typename Number, typename MemorySpace = MemorySpace::Host>
virtual Number LinearAlgebra::distributed::Vector< Number, MemorySpace >::mean_value ( ) const
overridevirtual

Compute the mean value of all the entries in the vector.

Implements LinearAlgebra::VectorSpaceVector< Number >.

◆ lp_norm()

template<typename Number, typename MemorySpace = MemorySpace::Host>
real_type LinearAlgebra::distributed::Vector< Number, MemorySpace >::lp_norm ( const real_type  p) const

\(l_p\)-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

◆ get_mpi_communicator()

template<typename Number, typename MemorySpace = MemorySpace::Host>
const MPI_Comm& LinearAlgebra::distributed::Vector< Number, MemorySpace >::get_mpi_communicator ( ) const

Return a reference to the MPI communicator object in use with this vector.

◆ get_partitioner()

template<typename Number, typename MemorySpace = MemorySpace::Host>
const std::shared_ptr<const Utilities::MPI::Partitioner>& LinearAlgebra::distributed::Vector< Number, MemorySpace >::get_partitioner ( ) const

Return the MPI partitioner that describes the parallel layout of the vector. This object can be used to initialize another vector with the respective reinit() call, for additional queries regarding the parallel communication, or the compatibility of partitioners.

◆ partitioners_are_compatible()

template<typename Number, typename MemorySpace = MemorySpace::Host>
bool LinearAlgebra::distributed::Vector< Number, MemorySpace >::partitioners_are_compatible ( const Utilities::MPI::Partitioner part) const

Check whether the given partitioner is compatible with the partitioner used for this vector. Two partitioners are compatible if they have the same local size and the same ghost indices. They do not necessarily need to be the same data field of the shared pointer. This is a local operation only, i.e., if only some processors decide that the partitioning is not compatible, only these processors will return false, whereas the other processors will return true.

◆ partitioners_are_globally_compatible()

template<typename Number, typename MemorySpace = MemorySpace::Host>
bool LinearAlgebra::distributed::Vector< Number, MemorySpace >::partitioners_are_globally_compatible ( const Utilities::MPI::Partitioner part) const

Check whether the given partitioner is compatible with the partitioner used for this vector. Two partitioners are compatible if they have the same local size and the same ghost indices. They do not necessarily need to be the same data field. As opposed to partitioners_are_compatible(), this method checks for compatibility among all processors and the method only returns true if the partitioner is the same on all processors.

This method performs global communication, so make sure to use it only in a context where all processors call it the same number of times.

◆ set_ghost_state()

template<typename Number, typename MemorySpace = MemorySpace::Host>
void LinearAlgebra::distributed::Vector< Number, MemorySpace >::set_ghost_state ( const bool  ghosted) const

Change the ghost state of this vector to ghosted.

◆ add_local()

template<typename Number, typename MemorySpace = MemorySpace::Host>
void LinearAlgebra::distributed::Vector< Number, MemorySpace >::add_local ( const Number  a,
const VectorSpaceVector< Number > &  V 
)
private

Simple addition of a multiple of a vector, i.e. *this += a*V without MPI communication.

◆ sadd_local()

template<typename Number, typename MemorySpace = MemorySpace::Host>
void LinearAlgebra::distributed::Vector< Number, MemorySpace >::sadd_local ( const Number  s,
const Number  a,
const VectorSpaceVector< Number > &  V 
)
private

Scaling and simple addition of a multiple of a vector, i.e. *this = s*(*this)+a*V without MPI communication.

◆ inner_product_local()

template<typename Number, typename MemorySpace = MemorySpace::Host>
template<typename Number2 >
Number LinearAlgebra::distributed::Vector< Number, MemorySpace >::inner_product_local ( const Vector< Number2, MemorySpace > &  V) const
private

Local part of the inner product of two vectors.

◆ norm_sqr_local()

template<typename Number, typename MemorySpace = MemorySpace::Host>
real_type LinearAlgebra::distributed::Vector< Number, MemorySpace >::norm_sqr_local ( ) const
private

Local part of norm_sqr().

◆ mean_value_local()

template<typename Number, typename MemorySpace = MemorySpace::Host>
Number LinearAlgebra::distributed::Vector< Number, MemorySpace >::mean_value_local ( ) const
private

Local part of mean_value().

◆ l1_norm_local()

template<typename Number, typename MemorySpace = MemorySpace::Host>
real_type LinearAlgebra::distributed::Vector< Number, MemorySpace >::l1_norm_local ( ) const
private

Local part of l1_norm().

◆ lp_norm_local()

template<typename Number, typename MemorySpace = MemorySpace::Host>
real_type LinearAlgebra::distributed::Vector< Number, MemorySpace >::lp_norm_local ( const real_type  p) const
private

Local part of lp_norm().

◆ linfty_norm_local()

template<typename Number, typename MemorySpace = MemorySpace::Host>
real_type LinearAlgebra::distributed::Vector< Number, MemorySpace >::linfty_norm_local ( ) const
private

Local part of linfty_norm().

◆ add_and_dot_local()

template<typename Number, typename MemorySpace = MemorySpace::Host>
Number LinearAlgebra::distributed::Vector< Number, MemorySpace >::add_and_dot_local ( const Number  a,
const Vector< Number, MemorySpace > &  V,
const Vector< Number, MemorySpace > &  W 
)
private

Local part of the addition followed by an inner product of two vectors. The same applies for complex-valued vectors as for the add_and_dot() function.

◆ clear_mpi_requests()

template<typename Number, typename MemorySpace = MemorySpace::Host>
void LinearAlgebra::distributed::Vector< Number, MemorySpace >::clear_mpi_requests ( )
private

A helper function that clears the compress_requests and update_ghost_values_requests field. Used in reinit functions.

◆ resize_val()

template<typename Number, typename MemorySpace = MemorySpace::Host>
void LinearAlgebra::distributed::Vector< Number, MemorySpace >::resize_val ( const size_type  new_allocated_size)
private

A helper function that is used to resize the val array.

Friends And Related Function Documentation

◆ Vector

template<typename Number, typename MemorySpace = MemorySpace::Host>
template<typename Number2 , typename MemorySpace2 >
friend class Vector
friend

Typedef for the vector type used

Typedef for the vector type used.

Implementation of a parallel vector class. The design of this class is similar to the standard Vector class in deal.II, with the exception that storage is distributed with MPI.

The vector is designed for the following scheme of parallel partitioning:

  • The indices held by individual processes (locally owned part) in the MPI parallelization form a contiguous range [my_first_index,my_last_index).
  • Ghost indices residing on arbitrary positions of other processors are allowed. It is in general more efficient if ghost indices are clustered, since they are stored as a set of intervals. The communication pattern of the ghost indices is determined when calling the function reinit (locally_owned, ghost_indices, communicator), and retained until the partitioning is changed. This allows for efficient parallel communication of indices. In particular, it stores the communication pattern, rather than having to compute it again for every communication. For more information on ghost vectors, see also the glossary entry on vectors with ghost elements.
  • Besides the usual global access operator() it is also possible to access vector entries in the local index space with the function local_element(). Locally owned indices are placed first, [0, local_size()), and then all ghost indices follow after them contiguously, [local_size(), local_size()+n_ghost_entries()).

Functions related to parallel functionality:

  • The function compress() goes through the data associated with ghost indices and communicates it to the owner process, which can then add it to the correct position. This can be used e.g. after having run an assembly routine involving ghosts that fill this vector. Note that the insert mode of compress() does not set the elements included in ghost entries but simply discards them, assuming that the owning processor has set them to the desired value already (See also the glossary entry on compress).
  • The update_ghost_values() function imports the data from the owning processor to the ghost indices in order to provide read access to the data associated with ghosts.
  • It is possible to split the above functions into two phases, where the first initiates the communication and the second one finishes it. These functions can be used to overlap communication with computations in other parts of the code.
  • Of course, reduction operations (like norms) make use of collective all-to-all MPI communications.

This vector can take two different states with respect to ghost elements:

  • After creation and whenever zero_out_ghosts() is called (or operator= (0.)), the vector does only allow writing into ghost elements but not reading from ghost elements.
  • After a call to update_ghost_values(), the vector does not allow writing into ghost elements but only reading from them. This is to avoid undesired ghost data artifacts when calling compress() after modifying some vector entries. The current status of the ghost entries (read mode or write mode) can be queried by the method has_ghost_elements(), which returns true exactly when ghost elements have been updated and false otherwise, irrespective of the actual number of ghost entries in the vector layout (for that information, use n_ghost_entries() instead).

This vector uses the facilities of the class Vector<Number> for implementing the operations on the local range of the vector. In particular, it also inherits thread parallelism that splits most vector-vector operations into smaller chunks if the program uses multiple threads. This may or may not be desired when working also with MPI.

Limitations regarding the vector size

This vector class is based on two different number types for indexing. The so-called global index type encodes the overall size of the vector. Its type is types::global_dof_index. The largest possible value is 2^32-1 or approximately 4 billion in case 64 bit integers are disabled at configuration of deal.II (default case) or 2^64-1 or approximately 10^19 if 64 bit integers are enabled (see the glossary entry on When to use types::global_dof_index instead of unsigned int for further information).

The second relevant index type is the local index used within one MPI rank. As opposed to the global index, the implementation assumes 32-bit unsigned integers unconditionally. In other words, to actually use a vector with more than four billion entries, you need to use MPI with more than one rank (which in general is a safe assumption since four billion entries consume at least 16 GB of memory for floats or 32 GB of memory for doubles) and enable 64-bit indices. If more than 4 billion local elements are present, the implementation tries to detect that, which triggers an exception and aborts the code. Note, however, that the detection of overflow is tricky and the detection mechanism might fail in some circumstances. Therefore, it is strongly recommended to not rely on this class to automatically detect the unsupported case.

Author
Katharina Kormann, Martin Kronbichler, 2010, 2011
Deprecated:
Use LinearAlgebra::distributed::Vector instead.

Definition at line 1392 of file la_parallel_vector.h.

◆ BlockVector

template<typename Number, typename MemorySpace = MemorySpace::Host>
template<typename Number2 >
friend class BlockVector
friend

Make BlockVector type friends.

Typedef for the block-vector type used

Typedef for the type used to describe vectors that consist of multiple blocks.

An implementation of block vectors based on distributed deal.II vectors. While the base class provides for most of the interface, this class handles the actual allocation of vectors and provides functions that are specific to the underlying vector type.

Note
Instantiations for this template are provided for <float> and <double>; others can be generated in application programs (see the section on Template instantiations in the manual).
See also
Block (linear algebra)
Author
Katharina Kormann, Martin Kronbichler, 2011
Deprecated:
Use LinearAlgebra::distributed::BlockVector instead.

Definition at line 1398 of file la_parallel_vector.h.

Member Data Documentation

◆ partitioner

template<typename Number, typename MemorySpace = MemorySpace::Host>
std::shared_ptr<const Utilities::MPI::Partitioner> LinearAlgebra::distributed::Vector< Number, MemorySpace >::partitioner
private

Shared pointer to store the parallel partitioning information. This information can be shared between several vectors that have the same partitioning.

Definition at line 1314 of file la_parallel_vector.h.

◆ allocated_size

template<typename Number, typename MemorySpace = MemorySpace::Host>
size_type LinearAlgebra::distributed::Vector< Number, MemorySpace >::allocated_size
private

The size that is currently allocated in the val array.

Definition at line 1319 of file la_parallel_vector.h.

◆ data

template<typename Number, typename MemorySpace = MemorySpace::Host>
mutable ::MemorySpace::MemorySpaceData<Number, MemorySpace> LinearAlgebra::distributed::Vector< Number, MemorySpace >::data
private

Underlying data structure storing the local elements of this vector.

Definition at line 1324 of file la_parallel_vector.h.

◆ thread_loop_partitioner

template<typename Number, typename MemorySpace = MemorySpace::Host>
std::shared_ptr<::parallel::internal::TBBPartitioner> LinearAlgebra::distributed::Vector< Number, MemorySpace >::thread_loop_partitioner
mutableprivate

For parallel loops with TBB, this member variable stores the affinity information of loops.

Definition at line 1331 of file la_parallel_vector.h.

◆ import_data

template<typename Number, typename MemorySpace = MemorySpace::Host>
mutable ::MemorySpace::MemorySpaceData<Number, MemorySpace> LinearAlgebra::distributed::Vector< Number, MemorySpace >::import_data
private

Temporary storage that holds the data that is sent to this processor in compress() or sent from this processor in update_ghost_values.

Definition at line 1339 of file la_parallel_vector.h.

◆ vector_is_ghosted

template<typename Number, typename MemorySpace = MemorySpace::Host>
bool LinearAlgebra::distributed::Vector< Number, MemorySpace >::vector_is_ghosted
mutableprivate

Stores whether the vector currently allows for reading ghost elements or not. Note that this is to ensure consistent ghost data and does not indicate whether the vector actually can store ghost elements. In particular, when assembling a vector we do not allow reading elements, only writing them.

Definition at line 1348 of file la_parallel_vector.h.

◆ compress_requests

template<typename Number, typename MemorySpace = MemorySpace::Host>
std::vector<MPI_Request> LinearAlgebra::distributed::Vector< Number, MemorySpace >::compress_requests
private

A vector that collects all requests from compress() operations. This class uses persistent MPI communicators, i.e., the communication channels are stored during successive calls to a given function. This reduces the overhead involved with setting up the MPI machinery, but it does not remove the need for a receive operation to be posted before the data can actually be sent.

Definition at line 1359 of file la_parallel_vector.h.

◆ update_ghost_values_requests

template<typename Number, typename MemorySpace = MemorySpace::Host>
std::vector<MPI_Request> LinearAlgebra::distributed::Vector< Number, MemorySpace >::update_ghost_values_requests
mutableprivate

A vector that collects all requests from update_ghost_values() operations. This class uses persistent MPI communicators.

Definition at line 1365 of file la_parallel_vector.h.

◆ mutex

template<typename Number, typename MemorySpace = MemorySpace::Host>
Threads::Mutex LinearAlgebra::distributed::Vector< Number, MemorySpace >::mutex
mutableprivate

A lock that makes sure that the compress and update_ghost_values functions give reasonable results also when used with several threads.

Definition at line 1373 of file la_parallel_vector.h.


The documentation for this class was generated from the following files: