Reference documentation for deal.II version 9.1.1
\(\newcommand{\dealcoloneq}{\mathrel{\vcenter{:}}=}\)
Public Types | Static Public Member Functions | Private Attributes | Friends | Related Functions | List of all members
TrilinosWrappers::MPI::Vector Class Reference

#include <deal.II/lac/trilinos_vector.h>

Inheritance diagram for TrilinosWrappers::MPI::Vector:
[legend]

Public Types

using value_type = TrilinosScalar
 

Public Member Functions

1: Basic Object-handling
 Vector ()
 
 Vector (const Vector &v)
 
 Vector (const IndexSet &parallel_partitioning, const MPI_Comm &communicator=MPI_COMM_WORLD)
 
 Vector (const IndexSet &local, const IndexSet &ghost, const MPI_Comm &communicator=MPI_COMM_WORLD)
 
 Vector (const IndexSet &parallel_partitioning, const Vector &v, const MPI_Comm &communicator=MPI_COMM_WORLD)
 
template<typename Number >
 Vector (const IndexSet &parallel_partitioning, const ::Vector< Number > &v, const MPI_Comm &communicator=MPI_COMM_WORLD)
 
 Vector (Vector &&v) noexcept
 
 ~Vector () override=default
 
void clear ()
 
void reinit (const Vector &v, const bool omit_zeroing_entries=false, const bool allow_different_maps=false)
 
void reinit (const IndexSet &parallel_partitioning, const MPI_Comm &communicator=MPI_COMM_WORLD, const bool omit_zeroing_entries=false)
 
void reinit (const IndexSet &locally_owned_entries, const IndexSet &ghost_entries, const MPI_Comm &communicator=MPI_COMM_WORLD, const bool vector_writable=false)
 
void reinit (const BlockVector &v, const bool import_data=false)
 
void compress (::VectorOperation::values operation)
 
Vectoroperator= (const TrilinosScalar s)
 
Vectoroperator= (const Vector &v)
 
Vectoroperator= (Vector &&v) noexcept
 
template<typename Number >
Vectoroperator= (const ::Vector< Number > &v)
 
void import_nonlocal_data_for_fe (const ::TrilinosWrappers::SparseMatrix &matrix, const Vector &vector)
 
void import (const LinearAlgebra::ReadWriteVector< double > &rwv, const VectorOperation::values operation)
 
bool operator== (const Vector &v) const
 
bool operator!= (const Vector &v) const
 
size_type size () const
 
size_type local_size () const
 
std::pair< size_type, size_type > local_range () const
 
bool in_local_range (const size_type index) const
 
IndexSet locally_owned_elements () const
 
bool has_ghost_elements () const
 
void update_ghost_values () const
 
TrilinosScalar operator* (const Vector &vec) const
 
real_type norm_sqr () const
 
TrilinosScalar mean_value () const
 
TrilinosScalar min () const
 
TrilinosScalar max () const
 
real_type l1_norm () const
 
real_type l2_norm () const
 
real_type lp_norm (const TrilinosScalar p) const
 
real_type linfty_norm () const
 
TrilinosScalar add_and_dot (const TrilinosScalar a, const Vector &V, const Vector &W)
 
bool all_zero () const
 
bool is_non_negative () const
 
2: Data-Access
reference operator() (const size_type index)
 
TrilinosScalar operator() (const size_type index) const
 
reference operator[] (const size_type index)
 
TrilinosScalar operator[] (const size_type index) const
 
void extract_subvector_to (const std::vector< size_type > &indices, std::vector< TrilinosScalar > &values) const
 
template<typename ForwardIterator , typename OutputIterator >
void extract_subvector_to (ForwardIterator indices_begin, const ForwardIterator indices_end, OutputIterator values_begin) const
 
iterator begin ()
 
const_iterator begin () const
 
iterator end ()
 
const_iterator end () const
 
3: Modification of vectors
void set (const std::vector< size_type > &indices, const std::vector< TrilinosScalar > &values)
 
void set (const std::vector< size_type > &indices, const ::Vector< TrilinosScalar > &values)
 
void set (const size_type n_elements, const size_type *indices, const TrilinosScalar *values)
 
void add (const std::vector< size_type > &indices, const std::vector< TrilinosScalar > &values)
 
void add (const std::vector< size_type > &indices, const ::Vector< TrilinosScalar > &values)
 
void add (const size_type n_elements, const size_type *indices, const TrilinosScalar *values)
 
Vectoroperator*= (const TrilinosScalar factor)
 
Vectoroperator/= (const TrilinosScalar factor)
 
Vectoroperator+= (const Vector &V)
 
Vectoroperator-= (const Vector &V)
 
void add (const TrilinosScalar s)
 
void add (const Vector &V, const bool allow_different_maps=false)
 
void add (const TrilinosScalar a, const Vector &V)
 
void add (const TrilinosScalar a, const Vector &V, const TrilinosScalar b, const Vector &W)
 
void sadd (const TrilinosScalar s, const Vector &V)
 
void sadd (const TrilinosScalar s, const TrilinosScalar a, const Vector &V)
 
void scale (const Vector &scaling_factors)
 
void equ (const TrilinosScalar a, const Vector &V)
 
4: Mixed stuff
const Epetra_MultiVector & trilinos_vector () const
 
Epetra_FEVector & trilinos_vector ()
 
const Epetra_Map & vector_partitioner () const
 
const Epetra_BlockMap & trilinos_partitioner () const
 
void print (std::ostream &out, const unsigned int precision=3, const bool scientific=true, const bool across=true) const
 
void swap (Vector &v)
 
std::size_t memory_consumption () const
 
const MPI_Comm & get_mpi_communicator () const
 
- Public Member Functions inherited from Subscriptor
 Subscriptor ()
 
 Subscriptor (const Subscriptor &)
 
 Subscriptor (Subscriptor &&) noexcept
 
virtual ~Subscriptor ()
 
Subscriptoroperator= (const Subscriptor &)
 
Subscriptoroperator= (Subscriptor &&) noexcept
 
void subscribe (std::atomic< bool > *const validity, const std::string &identifier="") const
 
void unsubscribe (std::atomic< bool > *const validity, const std::string &identifier="") const
 
unsigned int n_subscriptions () const
 
template<typename StreamType >
void list_subscribers (StreamType &stream) const
 
void list_subscribers () const
 
template<class Archive >
void serialize (Archive &ar, const unsigned int version)
 

Static Public Member Functions

static ::ExceptionBaseExcDifferentParallelPartitioning ()
 
static ::ExceptionBaseExcTrilinosError (int arg1)
 
static ::ExceptionBaseExcAccessToNonLocalElement (size_type arg1, size_type arg2, size_type arg3, size_type arg4)
 
- Static Public Member Functions inherited from Subscriptor
static ::ExceptionBaseExcInUse (int arg1, std::string arg2, std::string arg3)
 
static ::ExceptionBaseExcNoSubscriber (std::string arg1, std::string arg2)
 

Private Attributes

Epetra_CombineMode last_action
 
bool compressed
 
bool has_ghosts
 
std::unique_ptr< Epetra_FEVector > vector
 
std::unique_ptr< Epetra_MultiVector > nonlocal_vector
 
IndexSet owned_elements
 

Friends

class internal::VectorReference
 

Related Functions

(Note that these are not member functions.)

void swap (Vector &u, Vector &v)
 

Detailed Description

This class implements a wrapper to use the Trilinos distributed vector class Epetra_FEVector, the (parallel) partitioning of which is governed by an Epetra_Map. The Epetra_FEVector is precisely the kind of vector we deal with all the time - we probably get it from some assembly process, where also entries not locally owned might need to written and hence need to be forwarded to the owner.

The interface of this class is modeled after the existing Vector class in deal.II. It has almost the same member functions, and is often exchangeable. However, since Trilinos only supports a single scalar type (double), it is not templated, and only works with that type.

Note that Trilinos only guarantees that operations do what you expect if the function GlobalAssemble has been called after vector assembly in order to distribute the data. This is necessary since some processes might have accumulated data of elements that are not owned by themselves, but must be sent to the owning process. In order to avoid using the wrong data, you need to call Vector::compress() before you actually use the vectors.

Parallel communication model

The parallel functionality of Trilinos is built on top of the Message Passing Interface (MPI). MPI's communication model is built on collective communications: if one process wants something from another, that other process has to be willing to accept this communication. A process cannot query data from another process by calling a remote function, without that other process expecting such a transaction. The consequence is that most of the operations in the base class of this class have to be called collectively. For example, if you want to compute the l2 norm of a parallel vector, all processes across which this vector is shared have to call the l2_norm function. If you don't do this, but instead only call the l2_norm function on one process, then the following happens: This one process will call one of the collective MPI functions and wait for all the other processes to join in on this. Since the other processes don't call this function, you will either get a time-out on the first process, or, worse, by the time the next a call to a Trilinos function generates an MPI message on the other processes, you will get a cryptic message that only a subset of processes attempted a communication. These bugs can be very hard to figure out, unless you are well-acquainted with the communication model of MPI, and know which functions may generate MPI messages.

One particular case, where an MPI message may be generated unexpectedly is discussed below.

Accessing individual elements of a vector

Trilinos does of course allow read access to individual elements of a vector, but in the distributed case only to elements that are stored locally. We implement this through calls like d=vec(i). However, if you access an element outside the locally stored range, an exception is generated.

In contrast to read access, Trilinos (and the respective deal.II wrapper classes) allow to write (or add) to individual elements of vectors, even if they are stored on a different process. You can do this by writing into or adding to elements using the syntax vec(i)=d or vec(i)+=d, or similar operations. There is one catch, however, that may lead to very confusing error messages: Trilinos requires application programs to call the compress() function when they switch from performing a set of operations that add to elements, to performing a set of operations that write to elements. The reasoning is that all processes might accumulate addition operations to elements, even if multiple processes write to the same elements. By the time we call compress() the next time, all these additions are executed. However, if one process adds to an element, and another overwrites to it, the order of execution would yield non-deterministic behavior if we don't make sure that a synchronization with compress() happens in between.

In order to make sure these calls to compress() happen at the appropriate time, the deal.II wrappers keep a state variable that store which is the presently allowed operation: additions or writes. If it encounters an operation of the opposite kind, it calls compress() and flips the state. This can sometimes lead to very confusing behavior, in code that may for example look like this:

// do some write operations on the vector
for (size_type i=0; i<vector->size(); ++i)
vector(i) = i;
// do some additions to vector elements, but
// only for some elements
for (size_type i=0; i<vector->size(); ++i)
if (some_condition(i) == true)
vector(i) += 1;
// do another collective operation
const double norm = vector->l2_norm();

This code can run into trouble: by the time we see the first addition operation, we need to flush the overwrite buffers for the vector, and the deal.II library will do so by calling compress(). However, it will only do so for all processes that actually do an addition – if the condition is never true for one of the processes, then this one will not get to the actual compress() call, whereas all the other ones do. This gets us into trouble, since all the other processes hang in the call to flush the write buffers, while the one other process advances to the call to compute the l2 norm. At this time, you will get an error that some operation was attempted by only a subset of processes. This behavior may seem surprising, unless you know that write/addition operations on single elements may trigger this behavior.

The problem described here may be avoided by placing additional calls to compress(), or making sure that all processes do the same type of operations at the same time, for example by placing zero additions if necessary.

Ghost elements of vectors

Parallel vectors come in two kinds: without and with ghost elements. Vectors without ghost elements uniquely partition the vector elements between processors: each vector entry has exactly one processor that owns it. For such vectors, you can read those elements that the processor you are currently on owns, and you can write into any element whether you own it or not: if you don't own it, the value written or added to a vector element will be shipped to the processor that owns this vector element the next time you call compress(), as described above.

What we call a 'ghosted' vector (see vectors with ghost elements ) is simply a view of the parallel vector where the element distributions overlap. The 'ghosted' Trilinos vector in itself has no idea of which entries are ghosted and which are locally owned. In fact, a ghosted vector may not even store all of the elements a non- ghosted vector would store on the current processor. Consequently, for Trilinos vectors, there is no notion of an 'owner' of vector elements in the way we have it in the non-ghost case view.

This explains why we do not allow writing into ghosted vectors on the Trilinos side: Who would be responsible for taking care of the duplicated entries, given that there is not such information as locally owned indices? In other words, since a processor doesn't know which other processors own an element, who would it send a value to if one were to write to it? The only possibility would be to send this information to all other processors, but that is clearly not practical. Thus, we only allow reading from ghosted vectors, which however we do very often.

So how do you fill a ghosted vector if you can't write to it? This only happens through the assignment with a non-ghosted vector. It can go both ways (non-ghosted is assigned to a ghosted vector, and a ghosted vector is assigned to a non-ghosted one; the latter one typically only requires taking out the locally owned part as most often ghosted vectors store a superset of elements of non-ghosted ones). In general, you send data around with that operation and it all depends on the different views of the two vectors. Trilinos also allows you to get subvectors out of a big vector that way.

Thread safety of Trilinos vectors

When writing into Trilinos vectors from several threads in shared memory, several things must be kept in mind as there is no built-in locks in this class to prevent data races. Simultaneous access to the same vector entry at the same time results in data races and must be explicitly avoided by the user. However, it is possible to access different entries of the vector from several threads simultaneously when only one MPI process is present or the vector has been constructed with an additional index set for ghost entries in write mode.

Author
Martin Kronbichler, Wolfgang Bangerth, Daniel Arndt, 2008, 2009, 2017

Definition at line 399 of file trilinos_vector.h.

Member Typedef Documentation

◆ value_type

Declare some of the standard types used in all containers. These types parallel those in the C standard libraries vector<...> class.

Definition at line 407 of file trilinos_vector.h.

Constructor & Destructor Documentation

◆ Vector() [1/7]

Vector< Number >::Vector ( )

Default constructor that generates an empty (zero size) vector. The function reinit() will have to give the vector the correct size and distribution among processes in case of an MPI run.

Definition at line 68 of file trilinos_vector.cc.

◆ Vector() [2/7]

Vector< Number >::Vector ( const Vector v)

Copy constructor using the given vector.

Definition at line 88 of file trilinos_vector.cc.

◆ Vector() [3/7]

Vector< Number >::Vector ( const IndexSet parallel_partitioning,
const MPI_Comm &  communicator = MPI_COMM_WORLD 
)
explicit

This constructor takes an IndexSet that defines how to distribute the individual components among the MPI processors. Since it also includes information about the size of the vector, this is all we need to generate a parallel vector.

Depending on whether the parallel_partitioning argument uniquely subdivides elements among processors or not, the resulting vector may or may not have ghost elements. See the general documentation of this class for more information.

In case the provided IndexSet forms an overlapping partitioning, it is not clear which elements are owned by which process and locally_owned_elements() will return an IndexSet of size zero.

See also
vectors with ghost elements

Definition at line 79 of file trilinos_vector.cc.

◆ Vector() [4/7]

Vector< Number >::Vector ( const IndexSet local,
const IndexSet ghost,
const MPI_Comm &  communicator = MPI_COMM_WORLD 
)

Creates a ghosted parallel vector.

Depending on whether the ghost argument uniquely subdivides elements among processors or not, the resulting vector may or may not have ghost elements. See the general documentation of this class for more information.

See also
vectors with ghost elements

Definition at line 126 of file trilinos_vector.cc.

◆ Vector() [5/7]

Vector< Number >::Vector ( const IndexSet parallel_partitioning,
const Vector v,
const MPI_Comm &  communicator = MPI_COMM_WORLD 
)

Copy constructor from the TrilinosWrappers vector class. Since a vector of this class does not necessarily need to be distributed among processes, the user needs to supply us with an IndexSet and an MPI communicator that set the partitioning details.

Depending on whether the parallel_partitioning argument uniquely subdivides elements among processors or not, the resulting vector may or may not have ghost elements. See the general documentation of this class for more information.

See also
vectors with ghost elements

Definition at line 107 of file trilinos_vector.cc.

◆ Vector() [6/7]

template<typename Number >
TrilinosWrappers::MPI::Vector::Vector ( const IndexSet parallel_partitioning,
const ::Vector< Number > &  v,
const MPI_Comm &  communicator = MPI_COMM_WORLD 
)

Copy-constructor from deal.II vectors. Sets the dimension to that of the given vector, and copies all the elements.

Depending on whether the parallel_partitioning argument uniquely subdivides elements among processors or not, the resulting vector may or may not have ghost elements. See the general documentation of this class for more information.

See also
vectors with ghost elements

◆ Vector() [7/7]

Vector< Number >::Vector ( Vector &&  v)
noexcept

Move constructor. Creates a new vector by stealing the internal data of the vector v.

Definition at line 98 of file trilinos_vector.cc.

◆ ~Vector()

TrilinosWrappers::MPI::Vector::~Vector ( )
overridedefault

Destructor.

Member Function Documentation

◆ clear()

void Vector< Number >::clear ( )

Release all memory and return to a state just like after having called the default constructor.

Definition at line 137 of file trilinos_vector.cc.

◆ reinit() [1/4]

void Vector< Number >::reinit ( const Vector v,
const bool  omit_zeroing_entries = false,
const bool  allow_different_maps = false 
)

Reinit functionality. This function sets the calling vector to the dimension and the parallel distribution of the input vector, but does not copy the elements in v. If omit_zeroing_entries is not true, the elements in the vector are initialized with zero. If it is set to true, the vector entries are in an unspecified state and the user has to set all elements. In the current implementation, this method does not touch the vector entries in case the vector layout is unchanged from before, otherwise entries are set to zero. Note that this behavior might change between releases without notification.

This function has a third argument, allow_different_maps, that allows for an exchange of data between two equal-sized vectors (but being distributed differently among the processors). A trivial application of this function is to generate a replication of a whole vector on each machine, when the calling vector is built with a map consisting of all indices on each process, and v is a distributed vector. In this case, the variable omit_zeroing_entries needs to be set to false, since it does not make sense to exchange data between differently parallelized vectors without touching the elements.

Definition at line 196 of file trilinos_vector.cc.

◆ reinit() [2/4]

void Vector< Number >::reinit ( const IndexSet parallel_partitioning,
const MPI_Comm &  communicator = MPI_COMM_WORLD,
const bool  omit_zeroing_entries = false 
)

Reinit functionality. This function destroys the old vector content and generates a new one based on the input partitioning. The flag omit_zeroing_entries determines whether the vector should be filled with zero (false). If the flag is set to true, the vector entries are in an unspecified state and the user has to set all elements. In the current implementation, this method still sets the entries to zero, but this might change between releases without notification.

Depending on whether the parallel_partitioning argument uniquely subdivides elements among processors or not, the resulting vector may or may not have ghost elements. See the general documentation of this class for more information.

In case parallel_partitioning is overlapping, it is not clear which process should own which elements. Hence, locally_owned_elements() returns an empty IndexSet in this case.

See also
vectors with ghost elements

Definition at line 155 of file trilinos_vector.cc.

◆ reinit() [3/4]

void Vector< Number >::reinit ( const IndexSet locally_owned_entries,
const IndexSet ghost_entries,
const MPI_Comm &  communicator = MPI_COMM_WORLD,
const bool  vector_writable = false 
)

Reinit functionality. This function destroys the old vector content and generates a new one based on the input partitioning. In addition to just specifying one index set as in all the other methods above, this method allows to supply an additional set of ghost entries. There are two different versions of a vector that can be created. If the flag vector_writable is set to false, the vector only allows read access to the joint set of parallel_partitioning and ghost_entries. The effect of the reinit method is then equivalent to calling the other reinit method with an index set containing both the locally owned entries and the ghost entries.

If the flag vector_writable is set to true, this creates an alternative storage scheme for ghost elements that allows multiple threads to write into the vector (for the other reinit methods, only one thread is allowed to write into the ghost entries at a time).

Depending on whether the ghost_entries argument uniquely subdivides elements among processors or not, the resulting vector may or may not have ghost elements. See the general documentation of this class for more information.

See also
vectors with ghost elements

Definition at line 358 of file trilinos_vector.cc.

◆ reinit() [4/4]

void Vector< Number >::reinit ( const BlockVector v,
const bool  import_data = false 
)

Create vector by merging components from a block vector.

Definition at line 281 of file trilinos_vector.cc.

◆ compress()

void Vector< Number >::compress ( ::VectorOperation::values  operation)

Compress the underlying representation of the Trilinos object, i.e. flush the buffers of the vector object if it has any. This function is necessary after writing into a vector element-by-element and before anything else can be done on it.

The (defaulted) argument can be used to specify the compress mode (Add or Insert) in case the vector has not been written to since the last time this function was called. The argument is ignored if the vector has been added or written to since the last time compress() was called.

See Compressing distributed objects for more information.

Definition at line 581 of file trilinos_vector.cc.

◆ operator=() [1/4]

Vector& TrilinosWrappers::MPI::Vector::operator= ( const TrilinosScalar  s)

Set all components of the vector to the given number s. Simply pass this down to the base class, but we still need to declare this function to make the example given in the discussion about making the constructor explicit work. the constructor explicit work.

Since the semantics of assigning a scalar to a vector are not immediately clear, this operator can only be used if you want to set the entire vector to zero. This allows the intuitive notation v=0.

◆ operator=() [2/4]

Vector & Vector< Number >::operator= ( const Vector v)

Copy the given vector. Resize the present vector if necessary. In this case, also the Epetra_Map that designs the parallel partitioning is taken from the input vector.

Definition at line 416 of file trilinos_vector.cc.

◆ operator=() [3/4]

Vector & Vector< Number >::operator= ( Vector &&  v)
noexcept

Move the given vector. This operator replaces the present vector with v by efficiently swapping the internal data structures.

Definition at line 497 of file trilinos_vector.cc.

◆ operator=() [4/4]

template<typename Number >
Vector& TrilinosWrappers::MPI::Vector::operator= ( const ::Vector< Number > &  v)

Another copy function. This one takes a deal.II vector and copies it into a TrilinosWrapper vector. Note that since we do not provide any Epetra_map that tells about the partitioning of the vector among the MPI processes, the size of the TrilinosWrapper vector has to be the same as the size of the input vector.

◆ import_nonlocal_data_for_fe()

void Vector< Number >::import_nonlocal_data_for_fe ( const ::TrilinosWrappers::SparseMatrix matrix,
const Vector vector 
)

This reinit function is meant to be used for parallel calculations where some non-local data has to be used. The typical situation where one needs this function is the call of the FEValues<dim>::get_function_values function (or of some derivatives) in parallel. Since it is usually faster to retrieve the data in advance, this function can be called before the assembly forks out to the different processors. What this function does is the following: It takes the information in the columns of the given matrix and looks which data couples between the different processors. That data is then queried from the input vector. Note that you should not write to the resulting vector any more, since the some data can be stored several times on different processors, leading to unpredictable results. In particular, such a vector cannot be used for matrix- vector products as for example done during the solution of linear systems.

Definition at line 526 of file trilinos_vector.cc.

◆ import()

void Vector< Number >::import ( const LinearAlgebra::ReadWriteVector< double > &  rwv,
const VectorOperation::values  operation 
)

Imports all the elements present in the vector's IndexSet from the input vector rwv. VectorOperation::values operation is used to decide if the elements in rwv should be added to the current vector or replace the current elements.

Definition at line 550 of file trilinos_vector.cc.

◆ operator==()

bool Vector< Number >::operator== ( const Vector v) const

Test for equality. This function assumes that the present vector and the one to compare with have the same size already, since comparing vectors of different sizes makes not much sense anyway.

Definition at line 714 of file trilinos_vector.cc.

◆ operator!=()

bool Vector< Number >::operator!= ( const Vector v) const

Test for inequality. This function assumes that the present vector and the one to compare with have the same size already, since comparing vectors of different sizes makes not much sense anyway.

Definition at line 731 of file trilinos_vector.cc.

◆ size()

size_type TrilinosWrappers::MPI::Vector::size ( ) const

Return the global dimension of the vector.

◆ local_size()

size_type TrilinosWrappers::MPI::Vector::local_size ( ) const

Return the local dimension of the vector, i.e. the number of elements stored on the present MPI process. For sequential vectors, this number is the same as size(), but for parallel vectors it may be smaller.

To figure out which elements exactly are stored locally, use local_range().

If the vector contains ghost elements, they are included in this number.

◆ local_range()

std::pair<size_type, size_type> TrilinosWrappers::MPI::Vector::local_range ( ) const

Return a pair of indices indicating which elements of this vector are stored locally. The first number is the index of the first element stored, the second the index of the one past the last one that is stored locally. If this is a sequential vector, then the result will be the pair (0,N), otherwise it will be a pair (i,i+n), where n=local_size() and i is the first element of the vector stored on this processor, corresponding to the half open interval \([i,i+n)\)

Note
The description above is true most of the time, but not always. In particular, Trilinos vectors need not store contiguous ranges of elements such as \([i,i+n)\). Rather, it can store vectors where the elements are distributed in an arbitrary way across all processors and each processor simply stores a particular subset, not necessarily contiguous. In this case, this function clearly makes no sense since it could, at best, return a range that includes all elements that are stored locally. Thus, the function only succeeds if the locally stored range is indeed contiguous. It will trigger an assertion if the local portion of the vector is not contiguous.

◆ in_local_range()

bool TrilinosWrappers::MPI::Vector::in_local_range ( const size_type  index) const

Return whether index is in the local range or not, see also local_range().

Note
The same limitation for the applicability of this function applies as listed in the documentation of local_range().

◆ locally_owned_elements()

IndexSet TrilinosWrappers::MPI::Vector::locally_owned_elements ( ) const

Return an index set that describes which elements of this vector are owned by the current processor. Note that this index set does not include elements this vector may store locally as ghost elements but that are in fact owned by another processor. As a consequence, the index sets returned on different processors if this is a distributed vector will form disjoint sets that add up to the complete index set. Obviously, if a vector is created on only one processor, then the result would satisfy

vec.locally_owned_elements() == complete_index_set (vec.size())

◆ has_ghost_elements()

bool TrilinosWrappers::MPI::Vector::has_ghost_elements ( ) const

Return if the vector contains ghost elements. This answer is true if there are ghost elements on at least one process.

See also
vectors with ghost elements

◆ update_ghost_values()

void TrilinosWrappers::MPI::Vector::update_ghost_values ( ) const

This function only exists for compatibility with the LinearAlgebra::distributed::Vector class and does nothing: this class implements ghost value updates in a different way that is a better fit with the underlying Trilinos vector object.

◆ operator*()

TrilinosScalar TrilinosWrappers::MPI::Vector::operator* ( const Vector vec) const

Return the scalar (inner) product of two vectors. The vectors must have the same size.

◆ norm_sqr()

real_type TrilinosWrappers::MPI::Vector::norm_sqr ( ) const

Return the square of the \(l_2\)-norm.

◆ mean_value()

TrilinosScalar TrilinosWrappers::MPI::Vector::mean_value ( ) const

Mean value of the elements of this vector.

◆ min()

TrilinosScalar TrilinosWrappers::MPI::Vector::min ( ) const

Compute the minimal value of the elements of this vector.

◆ max()

TrilinosScalar TrilinosWrappers::MPI::Vector::max ( ) const

Compute the maximal value of the elements of this vector.

◆ l1_norm()

real_type TrilinosWrappers::MPI::Vector::l1_norm ( ) const

\(l_1\)-norm of the vector. The sum of the absolute values.

◆ l2_norm()

real_type TrilinosWrappers::MPI::Vector::l2_norm ( ) const

\(l_2\)-norm of the vector. The square root of the sum of the squares of the elements.

◆ lp_norm()

real_type TrilinosWrappers::MPI::Vector::lp_norm ( const TrilinosScalar  p) const

\(l_p\)-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

◆ linfty_norm()

real_type TrilinosWrappers::MPI::Vector::linfty_norm ( ) const

Maximum absolute value of the elements.

◆ add_and_dot()

TrilinosScalar TrilinosWrappers::MPI::Vector::add_and_dot ( const TrilinosScalar  a,
const Vector V,
const Vector W 
)

Performs a combined operation of a vector addition and a subsequent inner product, returning the value of the inner product. In other words, the result of this function is the same as if the user called

this->add(a, V);
return_value = *this * W;

The reason this function exists is for compatibility with deal.II's own vector classes which can implement this functionality with less memory transfer. However, for Trilinos vectors such a combined operation is not natively supported and thus the cost is completely equivalent as calling the two methods separately.

For complex-valued vectors, the scalar product in the second step is implemented as \(\left<v,w\right>=\sum_i v_i \bar{w_i}\).

◆ all_zero()

bool Vector< Number >::all_zero ( ) const

Return whether the vector contains only elements with value zero. This is a collective operation. This function is expensive, because potentially all elements have to be checked.

Definition at line 741 of file trilinos_vector.cc.

◆ is_non_negative()

bool Vector< Number >::is_non_negative ( ) const

Return true if the vector has no negative entries, i.e. all entries are zero or positive. This function is used, for example, to check whether refinement indicators are really all positive (or zero).

Definition at line 774 of file trilinos_vector.cc.

◆ operator()() [1/2]

reference TrilinosWrappers::MPI::Vector::operator() ( const size_type  index)

Provide access to a given element, both read and write.

When using a vector distributed with MPI, this operation only makes sense for elements that are actually present on the calling processor. Otherwise, an exception is thrown.

◆ operator()() [2/2]

TrilinosScalar Vector< Number >::operator() ( const size_type  index) const

Provide read-only access to an element.

When using a vector distributed with MPI, this operation only makes sense for elements that are actually present on the calling processor. Otherwise, an exception is thrown.

Definition at line 653 of file trilinos_vector.cc.

◆ operator[]() [1/2]

reference TrilinosWrappers::MPI::Vector::operator[] ( const size_type  index)

Provide access to a given element, both read and write.

Exactly the same as operator().

◆ operator[]() [2/2]

TrilinosScalar TrilinosWrappers::MPI::Vector::operator[] ( const size_type  index) const

Provide read-only access to an element.

Exactly the same as operator().

◆ extract_subvector_to() [1/2]

void TrilinosWrappers::MPI::Vector::extract_subvector_to ( const std::vector< size_type > &  indices,
std::vector< TrilinosScalar > &  values 
) const

Instead of getting individual elements of a vector via operator(), this function allows getting a whole set of elements at once. The indices of the elements to be read are stated in the first argument, the corresponding values are returned in the second.

If the current vector is called v, then this function is the equivalent to the code

for (unsigned int i=0; i<indices.size(); ++i)
values[i] = v[indices[i]];
Precondition
The sizes of the indices and values arrays must be identical.

◆ extract_subvector_to() [2/2]

template<typename ForwardIterator , typename OutputIterator >
void TrilinosWrappers::MPI::Vector::extract_subvector_to ( ForwardIterator  indices_begin,
const ForwardIterator  indices_end,
OutputIterator  values_begin 
) const

Instead of getting individual elements of a vector via operator(), this function allows getting a whole set of elements at once. In contrast to the previous function, this function obtains the indices of the elements by dereferencing all elements of the iterator range provided by the first two arguments, and puts the vector values into memory locations obtained by dereferencing a range of iterators starting at the location pointed to by the third argument.

If the current vector is called v, then this function is the equivalent to the code

ForwardIterator indices_p = indices_begin;
OutputIterator values_p = values_begin;
while (indices_p != indices_end)
{
*values_p = v[*indices_p];
++indices_p;
++values_p;
}
Precondition
It must be possible to write into as many memory locations starting at values_begin as there are iterators between indices_begin and indices_end.

◆ begin() [1/2]

iterator TrilinosWrappers::MPI::Vector::begin ( )

Make the Vector class a bit like the vector<> class of the C++ standard library by returning iterators to the start and end of the locally owned elements of this vector. The ordering of local elements corresponds to the one given by the global indices in case the vector is constructed from an IndexSet or other methods in deal.II (note that an Epetra_Map can contain elements in arbitrary orders, though).

It holds that end() - begin() == local_size().

◆ begin() [2/2]

const_iterator TrilinosWrappers::MPI::Vector::begin ( ) const

Return constant iterator to the start of the locally owned elements of the vector.

◆ end() [1/2]

iterator TrilinosWrappers::MPI::Vector::end ( )

Return an iterator pointing to the element past the end of the array of locally owned entries.

◆ end() [2/2]

const_iterator TrilinosWrappers::MPI::Vector::end ( ) const

Return a constant iterator pointing to the element past the end of the array of the locally owned entries.

◆ set() [1/3]

void TrilinosWrappers::MPI::Vector::set ( const std::vector< size_type > &  indices,
const std::vector< TrilinosScalar > &  values 
)

A collective set operation: instead of setting individual elements of a vector, this function allows to set a whole set of elements at once. The indices of the elements to be set are stated in the first argument, the corresponding values in the second.

◆ set() [2/3]

void TrilinosWrappers::MPI::Vector::set ( const std::vector< size_type > &  indices,
const ::Vector< TrilinosScalar > &  values 
)

This is a second collective set operation. As a difference, this function takes a deal.II vector of values.

◆ set() [3/3]

void TrilinosWrappers::MPI::Vector::set ( const size_type  n_elements,
const size_type *  indices,
const TrilinosScalar *  values 
)

This collective set operation is of lower level and can handle anything else — the only thing you have to provide is an address where all the indices are stored and the number of elements to be set.

◆ add() [1/7]

void TrilinosWrappers::MPI::Vector::add ( const std::vector< size_type > &  indices,
const std::vector< TrilinosScalar > &  values 
)

A collective add operation: This function adds a whole set of values stored in values to the vector components specified by indices.

◆ add() [2/7]

void TrilinosWrappers::MPI::Vector::add ( const std::vector< size_type > &  indices,
const ::Vector< TrilinosScalar > &  values 
)

This is a second collective add operation. As a difference, this function takes a deal.II vector of values.

◆ add() [3/7]

void TrilinosWrappers::MPI::Vector::add ( const size_type  n_elements,
const size_type *  indices,
const TrilinosScalar *  values 
)

Take an address where n_elements are stored contiguously and add them into the vector. Handles all cases which are not covered by the other two add() functions above.

◆ operator*=()

Vector& TrilinosWrappers::MPI::Vector::operator*= ( const TrilinosScalar  factor)

Multiply the entire vector by a fixed factor.

◆ operator/=()

Vector& TrilinosWrappers::MPI::Vector::operator/= ( const TrilinosScalar  factor)

Divide the entire vector by a fixed factor.

◆ operator+=()

Vector& TrilinosWrappers::MPI::Vector::operator+= ( const Vector V)

Add the given vector to the present one.

◆ operator-=()

Vector& TrilinosWrappers::MPI::Vector::operator-= ( const Vector V)

Subtract the given vector from the present one.

◆ add() [4/7]

void TrilinosWrappers::MPI::Vector::add ( const TrilinosScalar  s)

Addition of s to all components. Note that s is a scalar and not a vector.

◆ add() [5/7]

void Vector< Number >::add ( const Vector V,
const bool  allow_different_maps = false 
)

Simple vector addition, equal to the operator+=.

Though, if the second argument allow_different_maps is set, then it is possible to add data from a vector that uses a different map, i.e., a vector whose elements are split across processors differently. This may include vectors with ghost elements, for example. In general, however, adding vectors with a different element-to- processor map requires communicating data among processors and, consequently, is a slower operation than when using vectors using the same map.

Definition at line 679 of file trilinos_vector.cc.

◆ add() [6/7]

void TrilinosWrappers::MPI::Vector::add ( const TrilinosScalar  a,
const Vector V 
)

Simple addition of a multiple of a vector, i.e. *this += a*V.

◆ add() [7/7]

void TrilinosWrappers::MPI::Vector::add ( const TrilinosScalar  a,
const Vector V,
const TrilinosScalar  b,
const Vector W 
)

Multiple addition of scaled vectors, i.e. *this += a*V + b*W.

◆ sadd() [1/2]

void TrilinosWrappers::MPI::Vector::sadd ( const TrilinosScalar  s,
const Vector V 
)

Scaling and simple vector addition, i.e. *this = s*(*this) + V.

◆ sadd() [2/2]

void TrilinosWrappers::MPI::Vector::sadd ( const TrilinosScalar  s,
const TrilinosScalar  a,
const Vector V 
)

Scaling and simple addition, i.e. *this = s*(*this) + a*V.

◆ scale()

void TrilinosWrappers::MPI::Vector::scale ( const Vector scaling_factors)

Scale each element of this vector by the corresponding element in the argument. This function is mostly meant to simulate multiplication (and immediate re-assignment) by a diagonal scaling matrix.

◆ equ()

void TrilinosWrappers::MPI::Vector::equ ( const TrilinosScalar  a,
const Vector V 
)

Assignment *this = a*V.

◆ trilinos_vector() [1/2]

const Epetra_MultiVector& TrilinosWrappers::MPI::Vector::trilinos_vector ( ) const

Return a const reference to the underlying Trilinos Epetra_MultiVector class.

◆ trilinos_vector() [2/2]

Epetra_FEVector& TrilinosWrappers::MPI::Vector::trilinos_vector ( )

Return a (modifiable) reference to the underlying Trilinos Epetra_FEVector class.

◆ vector_partitioner()

const Epetra_Map& TrilinosWrappers::MPI::Vector::vector_partitioner ( ) const

Return a const reference to the underlying Trilinos Epetra_Map that sets the parallel partitioning of the vector.

Deprecated:
Use trilinos_partitioner() instead.

◆ trilinos_partitioner()

const Epetra_BlockMap& TrilinosWrappers::MPI::Vector::trilinos_partitioner ( ) const

Return a const reference to the underlying Trilinos Epetra_BlockMap that sets the parallel partitioning of the vector.

◆ print()

void Vector< Number >::print ( std::ostream &  out,
const unsigned int  precision = 3,
const bool  scientific = true,
const bool  across = true 
) const

Print to a stream. precision denotes the desired precision with which values shall be printed, scientific whether scientific notation shall be used. If across is true then the vector is printed in a line, while if false then the elements are printed on a separate line each.

Definition at line 813 of file trilinos_vector.cc.

◆ swap()

void Vector< Number >::swap ( Vector v)

Swap the contents of this vector and the other vector v. One could do this operation with a temporary variable and copying over the data elements, but this function is significantly more efficient since it only swaps the pointers to the data of the two vectors and therefore does not need to allocate temporary storage and move data around. Note that the vectors need to be of the same size and base on the same map.

This function is analogous to the swap function of all C++ standard containers. Also, there is a global function swap(u,v) that simply calls u.swap(v), again in analogy to standard functions.

Definition at line 861 of file trilinos_vector.cc.

◆ memory_consumption()

std::size_t Vector< Number >::memory_consumption ( ) const

Estimate for the memory consumption in bytes.

Definition at line 874 of file trilinos_vector.cc.

◆ get_mpi_communicator()

const MPI_Comm& TrilinosWrappers::MPI::Vector::get_mpi_communicator ( ) const

Return a reference to the MPI communicator object in use with this object.

Friends And Related Function Documentation

◆ internal::VectorReference

friend class internal::VectorReference
friend

Make the reference class a friend.

Definition at line 1341 of file trilinos_vector.h.

◆ swap()

void swap ( Vector u,
Vector v 
)
related

Global function swap which overloads the default implementation of the C++ standard library which uses a temporary object. The function simply exchanges the data of the two vectors.

Author
Martin Kronbichler, Wolfgang Bangerth, 2008

Definition at line 1358 of file trilinos_vector.h.

Member Data Documentation

◆ last_action

Epetra_CombineMode TrilinosWrappers::MPI::Vector::last_action
private

Trilinos doesn't allow to mix additions to matrix entries and overwriting them (to make synchronization of parallel computations simpler). The way we do it is to, for each access operation, store whether it is an insertion or an addition. If the previous one was of different type, then we first have to flush the Trilinos buffers; otherwise, we can simply go on. Luckily, Trilinos has an object for this which does already all the parallel communications in such a case, so we simply use their model, which stores whether the last operation was an addition or an insertion.

Definition at line 1305 of file trilinos_vector.h.

◆ compressed

bool TrilinosWrappers::MPI::Vector::compressed
private

A boolean variable to hold information on whether the vector is compressed or not.

Definition at line 1311 of file trilinos_vector.h.

◆ has_ghosts

bool TrilinosWrappers::MPI::Vector::has_ghosts
private

Whether this vector has ghost elements. This is true on all processors even if only one of them has any ghost elements.

Definition at line 1317 of file trilinos_vector.h.

◆ vector

std::unique_ptr<Epetra_FEVector> TrilinosWrappers::MPI::Vector::vector
private

Pointer to the actual Epetra vector object. This may represent a vector that is in fact distributed among multiple processors. The object requires an existing Epetra_Map for storing data when setting it up.

Definition at line 1324 of file trilinos_vector.h.

◆ nonlocal_vector

std::unique_ptr<Epetra_MultiVector> TrilinosWrappers::MPI::Vector::nonlocal_vector
private

A vector object in Trilinos to be used for collecting the non-local elements if the vector was constructed with an additional IndexSet describing ghost elements.

Definition at line 1331 of file trilinos_vector.h.

◆ owned_elements

IndexSet TrilinosWrappers::MPI::Vector::owned_elements
private

An IndexSet storing the indices this vector owns exclusively.

Definition at line 1336 of file trilinos_vector.h.


The documentation for this class was generated from the following files: