Reference documentation for deal.II version 8.5.1
|
#include <deal.II/lac/trilinos_vector.h>
Public Types | |
typedef ::types::global_dof_index | size_type |
Public Types inherited from TrilinosWrappers::VectorBase | |
typedef TrilinosScalar | value_type |
Public Member Functions | |
Basic constructors and initialization. | |
Vector () | |
Vector (const Vector &v) | |
Vector (Vector &&v) | |
~Vector () | |
void | reinit (const VectorBase &v, const bool omit_zeroing_entries=false, const bool allow_different_maps=false) |
void | reinit (const BlockVector &v, const bool import_data=false) |
Vector & | operator= (const TrilinosScalar s) |
Vector & | operator= (const Vector &v) |
Vector & | operator= (Vector &&v) |
Vector & | operator= (const ::TrilinosWrappers::Vector &v) |
template<typename Number > | |
Vector & | operator= (const ::Vector< Number > &v) |
void | import_nonlocal_data_for_fe (const ::TrilinosWrappers::SparseMatrix &matrix, const Vector &vector) |
Initialization with an Epetra_Map | |
Vector (const Epetra_Map ¶llel_partitioning) 1 | |
Vector (const Epetra_Map ¶llel_partitioning, const VectorBase &v) 1 | |
template<typename number > | |
void | reinit (const Epetra_Map ¶llel_partitioner, const ::Vector< number > &v) 1 |
void | reinit (const Epetra_Map ¶llel_partitioning, const bool omit_zeroing_entries=false) 1 |
template<typename Number > | |
Vector (const Epetra_Map ¶llel_partitioning, const ::Vector< Number > &v) 1 | |
Initialization with an IndexSet | |
Vector (const IndexSet ¶llel_partitioning, const MPI_Comm &communicator=MPI_COMM_WORLD) | |
Vector (const IndexSet &local, const IndexSet &ghost, const MPI_Comm &communicator=MPI_COMM_WORLD) | |
Vector (const IndexSet ¶llel_partitioning, const VectorBase &v, const MPI_Comm &communicator=MPI_COMM_WORLD) | |
template<typename Number > | |
Vector (const IndexSet ¶llel_partitioning, const ::Vector< Number > &v, const MPI_Comm &communicator=MPI_COMM_WORLD) | |
void | reinit (const IndexSet ¶llel_partitioning, const MPI_Comm &communicator=MPI_COMM_WORLD, const bool omit_zeroing_entries=false) |
void | reinit (const IndexSet &locally_owned_entries, const IndexSet &ghost_entries, const MPI_Comm &communicator=MPI_COMM_WORLD, const bool vector_writable=false) |
Public Member Functions inherited from TrilinosWrappers::VectorBase | |
VectorBase () | |
VectorBase (const VectorBase &v) | |
virtual | ~VectorBase () |
void | clear () |
void | reinit (const VectorBase &v, const bool omit_zeroing_entries=false) |
void | compress (::VectorOperation::values operation) |
bool | is_compressed () const 1 |
VectorBase & | operator= (const TrilinosScalar s) |
VectorBase & | operator= (const VectorBase &v) |
template<typename Number > | |
VectorBase & | operator= (const ::Vector< Number > &v) |
bool | operator== (const VectorBase &v) const |
bool | operator!= (const VectorBase &v) const |
size_type | size () const |
size_type | local_size () const |
std::pair< size_type, size_type > | local_range () const |
bool | in_local_range (const size_type index) const |
IndexSet | locally_owned_elements () const |
bool | has_ghost_elements () const |
TrilinosScalar | operator* (const VectorBase &vec) const |
real_type | norm_sqr () const |
TrilinosScalar | mean_value () const |
TrilinosScalar | minimal_value () const 1 |
TrilinosScalar | min () const |
TrilinosScalar | max () const |
real_type | l1_norm () const |
real_type | l2_norm () const |
real_type | lp_norm (const TrilinosScalar p) const |
real_type | linfty_norm () const |
TrilinosScalar | add_and_dot (const TrilinosScalar a, const VectorBase &V, const VectorBase &W) |
bool | all_zero () const |
bool | is_non_negative () const |
reference | operator() (const size_type index) |
TrilinosScalar | operator() (const size_type index) const |
reference | operator[] (const size_type index) |
TrilinosScalar | operator[] (const size_type index) const |
void | extract_subvector_to (const std::vector< size_type > &indices, std::vector< TrilinosScalar > &values) const |
template<typename ForwardIterator , typename OutputIterator > | |
void | extract_subvector_to (ForwardIterator indices_begin, const ForwardIterator indices_end, OutputIterator values_begin) const |
TrilinosScalar | el (const size_type index) const 1 |
iterator | begin () |
const_iterator | begin () const |
iterator | end () |
const_iterator | end () const |
void | set (const std::vector< size_type > &indices, const std::vector< TrilinosScalar > &values) |
void | set (const std::vector< size_type > &indices, const ::Vector< TrilinosScalar > &values) |
void | set (const size_type n_elements, const size_type *indices, const TrilinosScalar *values) |
void | add (const std::vector< size_type > &indices, const std::vector< TrilinosScalar > &values) |
void | add (const std::vector< size_type > &indices, const ::Vector< TrilinosScalar > &values) |
void | add (const size_type n_elements, const size_type *indices, const TrilinosScalar *values) |
VectorBase & | operator*= (const TrilinosScalar factor) |
VectorBase & | operator/= (const TrilinosScalar factor) |
VectorBase & | operator+= (const VectorBase &V) |
VectorBase & | operator-= (const VectorBase &V) |
void | add (const TrilinosScalar s) |
void | add (const VectorBase &V, const bool allow_different_maps=false) |
void | add (const TrilinosScalar a, const VectorBase &V) |
void | add (const TrilinosScalar a, const VectorBase &V, const TrilinosScalar b, const VectorBase &W) |
void | sadd (const TrilinosScalar s, const VectorBase &V) |
void | sadd (const TrilinosScalar s, const TrilinosScalar a, const VectorBase &V) |
void | sadd (const TrilinosScalar s, const TrilinosScalar a, const VectorBase &V, const TrilinosScalar b, const VectorBase &W) 1 |
void | sadd (const TrilinosScalar s, const TrilinosScalar a, const VectorBase &V, const TrilinosScalar b, const VectorBase &W, const TrilinosScalar c, const VectorBase &X) 1 |
void | scale (const VectorBase &scaling_factors) |
void | equ (const TrilinosScalar a, const VectorBase &V) |
void | equ (const TrilinosScalar a, const VectorBase &V, const TrilinosScalar b, const VectorBase &W) 1 |
void | ratio (const VectorBase &a, const VectorBase &b) 1 |
const Epetra_MultiVector & | trilinos_vector () const |
Epetra_FEVector & | trilinos_vector () |
const Epetra_Map & | vector_partitioner () const |
void | print (const char *format=0) const 1 |
void | print (std::ostream &out, const unsigned int precision=3, const bool scientific=true, const bool across=true) const |
void | swap (VectorBase &v) |
std::size_t | memory_consumption () const |
const MPI_Comm & | get_mpi_communicator () const |
Public Member Functions inherited from Subscriptor | |
Subscriptor () | |
Subscriptor (const Subscriptor &) | |
Subscriptor (Subscriptor &&) | |
virtual | ~Subscriptor () |
Subscriptor & | operator= (const Subscriptor &) |
Subscriptor & | operator= (Subscriptor &&) |
void | subscribe (const char *identifier=0) const |
void | unsubscribe (const char *identifier=0) const |
unsigned int | n_subscriptions () const |
void | list_subscribers () const |
template<class Archive > | |
void | serialize (Archive &ar, const unsigned int version) |
Static Public Attributes | |
static const bool | supports_distributed_data = true |
Related Functions | |
(Note that these are not member functions.) | |
void | swap (Vector &u, Vector &v) |
Related Functions inherited from TrilinosWrappers::VectorBase | |
void | swap (VectorBase &u, VectorBase &v) |
Additional Inherited Members | |
Static Public Member Functions inherited from TrilinosWrappers::VectorBase | |
static ::ExceptionBase & | ExcDifferentParallelPartitioning () |
static ::ExceptionBase & | ExcTrilinosError (int arg1) |
static ::ExceptionBase & | ExcAccessToNonLocalElement (size_type arg1, size_type arg2, size_type arg3, size_type arg4) |
Static Public Member Functions inherited from Subscriptor | |
static ::ExceptionBase & | ExcInUse (int arg1, char *arg2, std::string &arg3) |
static ::ExceptionBase & | ExcNoSubscriber (char *arg1, char *arg2) |
This class implements a wrapper to use the Trilinos distributed vector class Epetra_FEVector. This class is derived from the TrilinosWrappers::VectorBase class and provides all functionality included there.
Note that Trilinos only guarantees that operations do what you expect if the function GlobalAssemble
has been called after vector assembly in order to distribute the data. This is necessary since some processes might have accumulated data of elements that are not owned by themselves, but must be sent to the owning process. In order to avoid using the wrong data, you need to call Vector::compress() before you actually use the vectors.
The parallel functionality of Trilinos is built on top of the Message Passing Interface (MPI). MPI's communication model is built on collective communications: if one process wants something from another, that other process has to be willing to accept this communication. A process cannot query data from another process by calling a remote function, without that other process expecting such a transaction. The consequence is that most of the operations in the base class of this class have to be called collectively. For example, if you want to compute the l2 norm of a parallel vector, all processes across which this vector is shared have to call the l2_norm
function. If you don't do this, but instead only call the l2_norm
function on one process, then the following happens: This one process will call one of the collective MPI functions and wait for all the other processes to join in on this. Since the other processes don't call this function, you will either get a time-out on the first process, or, worse, by the time the next a call to a Trilinos function generates an MPI message on the other processes, you will get a cryptic message that only a subset of processes attempted a communication. These bugs can be very hard to figure out, unless you are well-acquainted with the communication model of MPI, and know which functions may generate MPI messages.
One particular case, where an MPI message may be generated unexpectedly is discussed below.
Trilinos does of course allow read access to individual elements of a vector, but in the distributed case only to elements that are stored locally. We implement this through calls like d=vec(i)
. However, if you access an element outside the locally stored range, an exception is generated.
In contrast to read access, Trilinos (and the respective deal.II wrapper classes) allow to write (or add) to individual elements of vectors, even if they are stored on a different process. You can do this by writing into or adding to elements using the syntax vec(i)=d
or vec(i)+=d
, or similar operations. There is one catch, however, that may lead to very confusing error messages: Trilinos requires application programs to call the compress() function when they switch from performing a set of operations that add to elements, to performing a set of operations that write to elements. The reasoning is that all processes might accumulate addition operations to elements, even if multiple processes write to the same elements. By the time we call compress() the next time, all these additions are executed. However, if one process adds to an element, and another overwrites to it, the order of execution would yield non-deterministic behavior if we don't make sure that a synchronization with compress() happens in between.
In order to make sure these calls to compress() happen at the appropriate time, the deal.II wrappers keep a state variable that store which is the presently allowed operation: additions or writes. If it encounters an operation of the opposite kind, it calls compress() and flips the state. This can sometimes lead to very confusing behavior, in code that may for example look like this:
This code can run into trouble: by the time we see the first addition operation, we need to flush the overwrite buffers for the vector, and the deal.II library will do so by calling compress(). However, it will only do so for all processes that actually do an addition – if the condition is never true for one of the processes, then this one will not get to the actual compress() call, whereas all the other ones do. This gets us into trouble, since all the other processes hang in the call to flush the write buffers, while the one other process advances to the call to compute the l2 norm. At this time, you will get an error that some operation was attempted by only a subset of processes. This behavior may seem surprising, unless you know that write/addition operations on single elements may trigger this behavior.
The problem described here may be avoided by placing additional calls to compress(), or making sure that all processes do the same type of operations at the same time, for example by placing zero additions if necessary.
Parallel vectors come in two kinds: without and with ghost elements. Vectors without ghost elements uniquely partition the vector elements between processors: each vector entry has exactly one processor that owns it. For such vectors, you can read those elements that the processor you are currently on owns, and you can write into any element whether you own it or not: if you don't own it, the value written or added to a vector element will be shipped to the processor that owns this vector element the next time you call compress(), as described above.
What we call a 'ghosted' vector (see vectors with ghost elements ) is simply a view of the parallel vector where the element distributions overlap. The 'ghosted' Trilinos vector in itself has no idea of which entries are ghosted and which are locally owned. In fact, a ghosted vector may not even store all of the elements a non- ghosted vector would store on the current processor. Consequently, for Trilinos vectors, there is no notion of an 'owner' of vector elements in the way we have it in the the non-ghost case view.
This explains why we do not allow writing into ghosted vectors on the Trilinos side: Who would be responsible for taking care of the duplicated entries, given that there is not such information as locally owned indices? In other words, since a processor doesn't know which other processors own an element, who would it send a value to if one were to write to it? The only possibility would be to send this information to all other processors, but that is clearly not practical. Thus, we only allow reading from ghosted vectors, which however we do very often.
So how do you fill a ghosted vector if you can't write to it? This only happens through the assignment with a non-ghosted vector. It can go both ways (non-ghosted is assigned to a ghosted vector, and a ghosted vector is assigned to a non-ghosted one; the latter one typically only requires taking out the locally owned part as most often ghosted vectors store a superset of elements of non-ghosted ones). In general, you send data around with that operation and it all depends on the different views of the two vectors. Trilinos also allows you to get subvectors out of a big vector that way.
When writing into Trilinos vectors from several threads in shared memory, several things must be kept in mind as there is no built-in locks in this class to prevent data races. Simultaneous access to the same vector entry at the same time results in data races and must be explicitly avoided by the user. However, it is possible to access different entries of the vector from several threads simultaneously when only one MPI process is present or the vector has been constructed with an additional index set for ghost entries in write mode.
Definition at line 254 of file trilinos_vector.h.
Declare type for container size.
Definition at line 260 of file trilinos_vector.h.
Default constructor that generates an empty (zero size) vector. The function reinit()
will have to give the vector the correct size and distribution among processes in case of an MPI run.
Definition at line 95 of file trilinos_vector.cc.
Copy constructor using the given vector.
Definition at line 118 of file trilinos_vector.cc.
Move constructor. Creates a new vector by stealing the internal data of the vector v
.
Definition at line 131 of file trilinos_vector.cc.
Destructor.
Definition at line 196 of file trilinos_vector.cc.
This constructor takes an Epetra_Map that already knows how to distribute the individual components among the MPI processors. Since it also includes information about the size of the vector, this is all we need to generate a parallel vector.
Depending on whether the parallel_partitioning
argument uniquely subdivides elements among processors or not, the resulting vector may or may not have ghost elements. See the general documentation of this class for more information.
This function is deprecated.
Definition at line 103 of file trilinos_vector.cc.
Vector< Number >::Vector | ( | const Epetra_Map & | parallel_partitioning, |
const VectorBase & | v | ||
) |
Copy constructor from the TrilinosWrappers vector class. Since a vector of this class does not necessarily need to be distributed among processes, the user needs to supply us with an Epetra_Map that sets the partitioning details.
Depending on whether the parallel_partitioning
argument uniquely subdivides elements among processors or not, the resulting vector may or may not have ghost elements. See the general documentation of this class for more information.
This function is deprecated.
Definition at line 144 of file trilinos_vector.cc.
TrilinosWrappers::MPI::Vector::Vector | ( | const Epetra_Map & | parallel_partitioning, |
const ::Vector< Number > & | v | ||
) |
Copy-constructor from deal.II vectors. Sets the dimension to that of the given vector, and copies all elements.
Depending on whether the parallel_partitioning
argument uniquely subdivides elements among processors or not, the resulting vector may or may not have ghost elements. See the general documentation of this class for more information.
This function is deprecated.
|
explicit |
This constructor takes an IndexSet that defines how to distribute the individual components among the MPI processors. Since it also includes information about the size of the vector, this is all we need to generate a parallel vector.
Depending on whether the parallel_partitioning
argument uniquely subdivides elements among processors or not, the resulting vector may or may not have ghost elements. See the general documentation of this class for more information.
In case the provided IndexSet forms an overlapping partitioning, it it not clear which elements are owned by which process and locally_owned_elements() will return an IndexSet of size zero.
Definition at line 110 of file trilinos_vector.cc.
Vector< Number >::Vector | ( | const IndexSet & | local, |
const IndexSet & | ghost, | ||
const MPI_Comm & | communicator = MPI_COMM_WORLD |
||
) |
Creates a ghosted parallel vector.
Depending on whether the ghost
argument uniquely subdivides elements among processors or not, the resulting vector may or may not have ghost elements. See the general documentation of this class for more information.
Definition at line 185 of file trilinos_vector.cc.
Vector< Number >::Vector | ( | const IndexSet & | parallel_partitioning, |
const VectorBase & | v, | ||
const MPI_Comm & | communicator = MPI_COMM_WORLD |
||
) |
Copy constructor from the TrilinosWrappers vector class. Since a vector of this class does not necessarily need to be distributed among processes, the user needs to supply us with an IndexSet and an MPI communicator that set the partitioning details.
Depending on whether the parallel_partitioning
argument uniquely subdivides elements among processors or not, the resulting vector may or may not have ghost elements. See the general documentation of this class for more information.
Definition at line 166 of file trilinos_vector.cc.
TrilinosWrappers::MPI::Vector::Vector | ( | const IndexSet & | parallel_partitioning, |
const ::Vector< Number > & | v, | ||
const MPI_Comm & | communicator = MPI_COMM_WORLD |
||
) |
Copy-constructor from deal.II vectors. Sets the dimension to that of the given vector, and copies all the elements.
Depending on whether the parallel_partitioning
argument uniquely subdivides elements among processors or not, the resulting vector may or may not have ghost elements. See the general documentation of this class for more information.
void Vector< Number >::reinit | ( | const VectorBase & | v, |
const bool | omit_zeroing_entries = false , |
||
const bool | allow_different_maps = false |
||
) |
Reinit functionality. This function sets the calling vector to the dimension and the parallel distribution of the input vector, but does not copy the elements in v
. If omit_zeroing_entries
is not true
, the elements in the vector are initialized with zero. If it is set to true
, the vector entries are in an unspecified state and the user has to set all elements. In the current implementation, this method does not touch the vector entries in case the vector layout is unchanged from before, otherwise entries are set to zero. Note that this behavior might change between releases without notification.
This function has a third argument, allow_different_maps
, that allows for an exchange of data between two equal-sized vectors (but being distributed differently among the processors). A trivial application of this function is to generate a replication of a whole vector on each machine, when the calling vector is built according to the localized vector class TrilinosWrappers::Vector, and v
is a distributed vector. In this case, the variable omit_zeroing_entries
needs to be set to false
, since it does not make sense to exchange data between differently parallelized vectors without touching the elements.
Definition at line 294 of file trilinos_vector.cc.
void Vector< Number >::reinit | ( | const BlockVector & | v, |
const bool | import_data = false |
||
) |
Create vector by merging components from a block vector.
Definition at line 375 of file trilinos_vector.cc.
Vector& TrilinosWrappers::MPI::Vector::operator= | ( | const TrilinosScalar | s | ) |
Set all components of the vector to the given number s
. Simply pass this down to the base class, but we still need to declare this function to make the example given in the discussion about making the constructor explicit work.
Copy the given vector. Resize the present vector if necessary. In this case, also the Epetra_Map that designs the parallel partitioning is taken from the input vector.
Definition at line 499 of file trilinos_vector.cc.
Move the given vector. This operator replaces the present vector with v
by efficiently swapping the internal data structures.
Definition at line 570 of file trilinos_vector.cc.
Vector& TrilinosWrappers::MPI::Vector::operator= | ( | const ::TrilinosWrappers::Vector & | v | ) |
Copy operator from a given localized vector (present on all processes) in TrilinosWrappers format to the current distributed vector. This function assumes that the calling vector (left hand object) already is of the same size as the right hand side vector. Otherwise, an exception will be thrown.
Vector& TrilinosWrappers::MPI::Vector::operator= | ( | const ::Vector< Number > & | v | ) |
Another copy function. This one takes a deal.II vector and copies it into a TrilinosWrapper vector. Note that since we do not provide any Epetra_map that tells about the partitioning of the vector among the MPI processes, the size of the TrilinosWrapper vector has to be the same as the size of the input vector. In order to change the map, use the reinit(const Epetra_Map &input_map) function.
void Vector< Number >::import_nonlocal_data_for_fe | ( | const ::TrilinosWrappers::SparseMatrix & | matrix, |
const Vector & | vector | ||
) |
This reinit function is meant to be used for parallel calculations where some non-local data has to be used. The typical situation where one needs this function is the call of the FEValues<dim>::get_function_values function (or of some derivatives) in parallel. Since it is usually faster to retrieve the data in advance, this function can be called before the assembly forks out to the different processors. What this function does is the following: It takes the information in the columns of the given matrix and looks which data couples between the different processors. That data is then queried from the input vector. Note that you should not write to the resulting vector any more, since the some data can be stored several times on different processors, leading to unpredictable results. In particular, such a vector cannot be used for matrix- vector products as for example done during the solution of linear systems.
Definition at line 599 of file trilinos_vector.cc.
void TrilinosWrappers::MPI::Vector::reinit | ( | const Epetra_Map & | parallel_partitioner, |
const ::Vector< number > & | v | ||
) |
Reinitialize from a deal.II vector. The Epetra_Map specifies the parallel partitioning.
Depending on whether the parallel_partitioning
argument uniquely subdivides elements among processors or not, the resulting vector may or may not have ghost elements. See the general documentation of this class for more information.
This function is deprecated.
void Vector< Number >::reinit | ( | const Epetra_Map & | parallel_partitioning, |
const bool | omit_zeroing_entries = false |
||
) |
Reinit functionality. This function destroys the old vector content and generates a new one based on the input map.
Depending on whether the parallel_partitioning
argument uniquely subdivides elements among processors or not, the resulting vector may or may not have ghost elements. See the general documentation of this class for more information.
This function is deprecated.
Definition at line 202 of file trilinos_vector.cc.
void Vector< Number >::reinit | ( | const IndexSet & | parallel_partitioning, |
const MPI_Comm & | communicator = MPI_COMM_WORLD , |
||
const bool | omit_zeroing_entries = false |
||
) |
Reinit functionality. This function destroys the old vector content and generates a new one based on the input partitioning. The flag omit_zeroing_entries
determines whether the vector should be filled with zero (false). If the flag is set to true
, the vector entries are in an unspecified state and the user has to set all elements. In the current implementation, this method still sets the entries to zero, but this might change between releases without notification.
Depending on whether the parallel_partitioning
argument uniquely subdivides elements among processors or not, the resulting vector may or may not have ghost elements. See the general documentation of this class for more information.
In case parallel_partitioning
is overlapping, it is not clear which process should own which elements. Hence, locally_owned_elements() returns an empty IndexSet in this case.
Definition at line 256 of file trilinos_vector.cc.
void Vector< Number >::reinit | ( | const IndexSet & | locally_owned_entries, |
const IndexSet & | ghost_entries, | ||
const MPI_Comm & | communicator = MPI_COMM_WORLD , |
||
const bool | vector_writable = false |
||
) |
Reinit functionality. This function destroys the old vector content and generates a new one based on the input partitioning. In addition to just specifying one index set as in all the other methods above, this method allows to supply an additional set of ghost entries. There are two different versions of a vector that can be created. If the flag vector_writable
is set to false
, the vector only allows read access to the joint set of parallel_partitioning
and ghost_entries
. The effect of the reinit method is then equivalent to calling the other reinit method with an index set containing both the locally owned entries and the ghost entries.
If the flag vector_writable
is set to true, this creates an alternative storage scheme for ghost elements that allows multiple threads to write into the vector (for the other reinit methods, only one thread is allowed to write into the ghost entries at a time).
Depending on whether the ghost_entries
argument uniquely subdivides elements among processors or not, the resulting vector may or may not have ghost elements. See the general documentation of this class for more information.
Definition at line 452 of file trilinos_vector.cc.
Global function swap
which overloads the default implementation of the C++ standard library which uses a temporary object. The function simply exchanges the data of the two vectors.
Definition at line 652 of file trilinos_vector.h.
|
static |
A variable that indicates whether this vector supports distributed data storage. If true, then this vector also needs an appropriate compress() function that allows communicating recent set or add operations to individual elements to be communicated to other processors.
For the current class, the variable equals true, since it does support parallel data storage.
is_serial_vector< VectorType >::value
Definition at line 275 of file trilinos_vector.h.