Reference documentation for deal.II version 9.3.3
|
Namespaces | |
namespace | ConsensusAlgorithms |
namespace | internal |
Classes | |
class | CollectiveMutex |
class | CommunicationPatternBase |
class | ConsensusAlgorithmsProcessTargets |
class | DuplicatedCommunicator |
struct | MinMaxAvg |
class | MPI_InitFinalize |
class | NoncontiguousPartitioner |
class | Partitioner |
class | ProcessGrid |
class | RemotePointEvaluation |
Functions | |
unsigned int | n_mpi_processes (const MPI_Comm &mpi_communicator) |
unsigned int | this_mpi_process (const MPI_Comm &mpi_communicator) |
const std::vector< unsigned int > | mpi_processes_within_communicator (const MPI_Comm &comm_large, const MPI_Comm &comm_small) |
std::vector< unsigned int > | compute_point_to_point_communication_pattern (const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations) |
unsigned int | compute_n_point_to_point_communications (const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations) |
MPI_Comm | duplicate_communicator (const MPI_Comm &mpi_communicator) |
void | free_communicator (MPI_Comm &mpi_communicator) |
int | create_group (const MPI_Comm &comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm) |
std::vector< IndexSet > | create_ascending_partitioning (const MPI_Comm &comm, const IndexSet::size_type locally_owned_size) |
IndexSet | create_evenly_distributed_partitioning (const MPI_Comm &comm, const IndexSet::size_type total_size) |
template<class Iterator , typename Number = long double> | |
std::pair< Number, typename numbers::NumberTraits< Number >::real_type > | mean_and_standard_deviation (const Iterator begin, const Iterator end, const MPI_Comm &comm) |
template<typename T > | |
T | sum (const T &t, const MPI_Comm &mpi_communicator) |
template<typename T , typename U > | |
void | sum (const T &values, const MPI_Comm &mpi_communicator, U &sums) |
template<typename T > | |
void | sum (const ArrayView< const T > &values, const MPI_Comm &mpi_communicator, const ArrayView< T > &sums) |
template<int rank, int dim, typename Number > | |
SymmetricTensor< rank, dim, Number > | sum (const SymmetricTensor< rank, dim, Number > &local, const MPI_Comm &mpi_communicator) |
template<int rank, int dim, typename Number > | |
Tensor< rank, dim, Number > | sum (const Tensor< rank, dim, Number > &local, const MPI_Comm &mpi_communicator) |
template<typename Number > | |
void | sum (const SparseMatrix< Number > &local, const MPI_Comm &mpi_communicator, SparseMatrix< Number > &global) |
template<typename T > | |
T | max (const T &t, const MPI_Comm &mpi_communicator) |
template<typename T , typename U > | |
void | max (const T &values, const MPI_Comm &mpi_communicator, U &maxima) |
template<typename T > | |
void | max (const ArrayView< const T > &values, const MPI_Comm &mpi_communicator, const ArrayView< T > &maxima) |
template<typename T > | |
T | min (const T &t, const MPI_Comm &mpi_communicator) |
template<typename T , typename U > | |
void | min (const T &values, const MPI_Comm &mpi_communicator, U &minima) |
template<typename T > | |
void | min (const ArrayView< const T > &values, const MPI_Comm &mpi_communicator, const ArrayView< T > &minima) |
template<typename T > | |
T | logical_or (const T &t, const MPI_Comm &mpi_communicator) |
template<typename T , typename U > | |
void | logical_or (const T &values, const MPI_Comm &mpi_communicator, U &results) |
template<typename T > | |
void | logical_or (const ArrayView< const T > &values, const MPI_Comm &mpi_communicator, const ArrayView< T > &results) |
MinMaxAvg | min_max_avg (const double my_value, const MPI_Comm &mpi_communicator) |
std::vector< MinMaxAvg > | min_max_avg (const std::vector< double > &my_value, const MPI_Comm &mpi_communicator) |
void | min_max_avg (const ArrayView< const double > &my_values, const ArrayView< MinMaxAvg > &result, const MPI_Comm &mpi_communicator) |
bool | job_supports_mpi () |
template<typename T > | |
std::map< unsigned int, T > | some_to_some (const MPI_Comm &comm, const std::map< unsigned int, T > &objects_to_send) |
template<typename T > | |
std::vector< T > | all_gather (const MPI_Comm &comm, const T &object_to_send) |
template<typename T > | |
std::vector< T > | gather (const MPI_Comm &comm, const T &object_to_send, const unsigned int root_process=0) |
template<typename T > | |
T | broadcast (const MPI_Comm &comm, const T &object_to_send, const unsigned int root_process=0) |
template<typename T > | |
T | all_reduce (const T &local_value, const MPI_Comm &comm, const std::function< T(const T &, const T &)> &combiner) |
std::vector< unsigned int > | compute_index_owner (const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm &comm) |
template<typename T > | |
std::vector< T > | compute_set_union (const std::vector< T > &vec, const MPI_Comm &comm) |
template<typename T > | |
std::set< T > | compute_set_union (const std::set< T > &set, const MPI_Comm &comm) |
A namespace for utility functions that abstract certain operations using the Message Passing Interface (MPI) or provide fallback operations in case deal.II is configured not to use MPI at all.
Return the number of MPI processes there exist in the given communicator object. If this is a sequential job (i.e., the program is not using MPI at all, or is using MPI but has been started with only one MPI process), then the communicator necessarily involves only one process and the function returns 1.
Return the rank of the present MPI process in the space of processes described by the given communicator. This will be a unique value for each process between zero and (less than) the number of all processes (given by get_n_mpi_processes()).
std::vector< unsigned int > Utilities::MPI::compute_point_to_point_communication_pattern | ( | const MPI_Comm & | mpi_comm, |
const std::vector< unsigned int > & | destinations | ||
) |
Consider an unstructured communication pattern where every process in an MPI universe wants to send some data to a subset of the other processors. To do that, the other processors need to know who to expect messages from. This function computes this information.
mpi_comm | A communicator that describes the processors that are going to communicate with each other. |
destinations | The list of processors the current process wants to send information to. This list need not be sorted in any way. If it contains duplicate entries that means that multiple messages are intended for a given destination. |
unsigned int Utilities::MPI::compute_n_point_to_point_communications | ( | const MPI_Comm & | mpi_comm, |
const std::vector< unsigned int > & | destinations | ||
) |
Simplified (for efficiency) version of the compute_point_to_point_communication_pattern() which only computes the number of processes in an MPI universe to expect communication from.
mpi_comm | A communicator that describes the processors that are going to communicate with each other. |
destinations | The list of processors the current process wants to send information to. This list need not be sorted in any way. If it contains duplicate entries that means that multiple messages are intended for a given destination. |
Given a communicator, generate a new communicator that contains the same set of processors but that has a different, unique identifier.
This functionality can be used to ensure that different objects, such as distributed matrices, each have unique communicators over which they can interact without interfering with each other.
When no longer needed, the communicator created here needs to be destroyed using free_communicator().
This function is equivalent to calling MPI_Comm_dup(mpi_communicator, &return_value);
.
void Utilities::MPI::free_communicator | ( | MPI_Comm & | mpi_communicator | ) |
Free the given communicator mpi_communicator
that was duplicated using duplicate_communicator().
The argument is passed by reference and will be invalidated and set to the MPI null handle. This function is equivalent to calling MPI_Comm_free(&mpi_communicator);
.
int Utilities::MPI::create_group | ( | const MPI_Comm & | comm, |
const MPI_Group & | group, | ||
const int | tag, | ||
MPI_Comm * | new_comm | ||
) |
If comm
is an intracommunicator, this function returns a new communicator newcomm
with communication group defined by the group
argument. The function is only collective over the group of processes that actually want to create the communicator, i.e., that are named in the group
argument. If multiple threads at a given process perform concurrent create_group() operations, the user must distinguish these operations by providing different tag
or comm
arguments.
This function was introduced in the MPI-3.0 standard. If available, the corresponding function in the provided MPI implementation is used. Otherwise, the implementation follows the one described in the following publication:
std::vector< IndexSet > Utilities::MPI::create_ascending_partitioning | ( | const MPI_Comm & | comm, |
const IndexSet::size_type | locally_owned_size | ||
) |
Given the number of locally owned elements locally_owned_size
, create a 1:1 partitioning of the of elements across the MPI communicator comm
. The total size of elements is the sum of locally_owned_size
across the MPI communicator. Each process will store contiguous subset of indices, and the index set on process p+1 starts at the index one larger than the last one stored on process p.
IndexSet Utilities::MPI::create_evenly_distributed_partitioning | ( | const MPI_Comm & | comm, |
const IndexSet::size_type | total_size | ||
) |
Given the total number of elements total_size
, create an evenly distributed 1:1 partitioning of the elements across the MPI communicator comm
. Uses comm
to determine number of partitions and processor ID to call the create_evenly_distributed_partitioning()
function above.
std::pair< Number, typename numbers::NumberTraits< Number >::real_type > Utilities::MPI::mean_and_standard_deviation | ( | const Iterator | begin, |
const Iterator | end, | ||
const MPI_Comm & | comm | ||
) |
Calculate mean and standard deviation across the MPI communicator comm
for values provided as a range [begin,end)
. The mean is computed as \(\bar x=\frac 1N \sum x_k\) where the \(x_k\) are the elements pointed to by the begin
and end
iterators on all processors (i.e., each processor's [begin,end)
range points to a subset of the overall number of elements). The standard deviation is calculated as \(\sigma=\sqrt{\frac {1}{N-1} \sum |x_k -\bar x|^2}\), which is known as unbiased sample variance.
Number | specifies the type to store the mean value. The standard deviation is stored as the corresponding real type. This allows, for example, to calculate statistics from integer input values. |
T Utilities::MPI::sum | ( | const T & | t, |
const MPI_Comm & | mpi_communicator | ||
) |
Return the sum over all processors of the value t
. This function is collective over all processors given in the communicator. If deal.II is not configured for use of MPI, this function simply returns the value of t
. This function corresponds to the MPI_Allreduce
function, i.e. all processors receive the result of this operation.
MPI_Reduce
function instead of the MPI_Allreduce
function. The latter is at most twice as expensive, so if you are concerned about performance, it may be worthwhile investigating whether your algorithm indeed needs the result everywhere.T
, namely float, double, int, unsigned int
. void Utilities::MPI::sum | ( | const T & | values, |
const MPI_Comm & | mpi_communicator, | ||
U & | sums | ||
) |
Like the previous function, but take the sums over the elements of an array of type T. In other words, the i-th element of the results array is the sum over the i-th entries of the input arrays from each processor. T and U must decay to the same type, e.g. they just differ by one of them having a const type qualifier and the other not.
Input and output arrays may be the same.
void Utilities::MPI::sum | ( | const ArrayView< const T > & | values, |
const MPI_Comm & | mpi_communicator, | ||
const ArrayView< T > & | sums | ||
) |
Like the previous function, but take the sums over the elements of an array as specified by the ArrayView arguments. In other words, the i-th element of the results array is the sum over the i-th entries of the input arrays from each processor.
Input and output arrays may be the same.
SymmetricTensor< rank, dim, Number > Utilities::MPI::sum | ( | const SymmetricTensor< rank, dim, Number > & | local, |
const MPI_Comm & | mpi_communicator | ||
) |
Perform an MPI sum of the entries of a symmetric tensor.
Tensor< rank, dim, Number > Utilities::MPI::sum | ( | const Tensor< rank, dim, Number > & | local, |
const MPI_Comm & | mpi_communicator | ||
) |
Perform an MPI sum of the entries of a tensor.
void Utilities::MPI::sum | ( | const SparseMatrix< Number > & | local, |
const MPI_Comm & | mpi_communicator, | ||
SparseMatrix< Number > & | global | ||
) |
Perform an MPI sum of the entries of a SparseMatrix.
local
and global
should have the same sparsity pattern and it should be the same for all MPI processes. T Utilities::MPI::max | ( | const T & | t, |
const MPI_Comm & | mpi_communicator | ||
) |
Return the maximum over all processors of the value t
. This function is collective over all processors given in the communicator. If deal.II is not configured for use of MPI, this function simply returns the value of t
. This function corresponds to the MPI_Allreduce
function, i.e. all processors receive the result of this operation.
MPI_Reduce
function instead of the MPI_Allreduce
function. The latter is at most twice as expensive, so if you are concerned about performance, it may be worthwhile investigating whether your algorithm indeed needs the result everywhere.T
, namely float, double, int, unsigned int
. void Utilities::MPI::max | ( | const T & | values, |
const MPI_Comm & | mpi_communicator, | ||
U & | maxima | ||
) |
Like the previous function, but take the maximum over the elements of an array of type T. In other words, the i-th element of the results array is the maximum over the i-th entries of the input arrays from each processor. T and U must decay to the same type, e.g. they just differ by one of them having a const type qualifier and the other not.
Input and output vectors may be the same.
void Utilities::MPI::max | ( | const ArrayView< const T > & | values, |
const MPI_Comm & | mpi_communicator, | ||
const ArrayView< T > & | maxima | ||
) |
Like the previous function, but take the maximum over the elements of an array as specified by the ArrayView arguments. In other words, the i-th element of the results array is the maximum over the i-th entries of the input arrays from each processor.
Input and output arrays may be the same.
T Utilities::MPI::min | ( | const T & | t, |
const MPI_Comm & | mpi_communicator | ||
) |
Return the minimum over all processors of the value t
. This function is collective over all processors given in the communicator. If deal.II is not configured for use of MPI, this function simply returns the value of t
. This function corresponds to the MPI_Allreduce
function, i.e. all processors receive the result of this operation.
MPI_Reduce
function instead of the MPI_Allreduce
function. The latter is at most twice as expensive, so if you are concerned about performance, it may be worthwhile investigating whether your algorithm indeed needs the result everywhere.T
, namely float, double, int, unsigned int
. void Utilities::MPI::min | ( | const T & | values, |
const MPI_Comm & | mpi_communicator, | ||
U & | minima | ||
) |
Like the previous function, but take the minima over the elements of an array of type T. In other words, the i-th element of the results array is the minimum of the i-th entries of the input arrays from each processor. T and U must decay to the same type, e.g. they just differ by one of them having a const type qualifier and the other not.
Input and output arrays may be the same.
void Utilities::MPI::min | ( | const ArrayView< const T > & | values, |
const MPI_Comm & | mpi_communicator, | ||
const ArrayView< T > & | minima | ||
) |
Like the previous function, but take the minimum over the elements of an array as specified by the ArrayView arguments. In other words, the i-th element of the results array is the minimum over the i-th entries of the input arrays from each processor.
Input and output arrays may be the same.
T Utilities::MPI::logical_or | ( | const T & | t, |
const MPI_Comm & | mpi_communicator | ||
) |
Performs a logical or operation over all processors of the value t
. The logical or operator ||
returns the boolean value true
if either or all operands are true
and returns false
otherwise. If the provided value t
corresponds to 0
in its associated data type T
, it will be interpreted as false
, and true
otherwise. Data type T
must be of type integral
, i.e., bool
, char
, short
, int
, long
, or any of their variations.
This function is collective over all processors given in the communicator. If deal.II is not configured for use of MPI, this function simply returns the value of value
. This function corresponds to the MPI_Allreduce
function, i.e., all processors receive the result of this operation.
MPI_Reduce
function instead of the MPI_Allreduce
function. The latter is at most twice as expensive, so if you are concerned about performance, it may be worthwhile investigating whether your algorithm indeed needs the result everywhere. void Utilities::MPI::logical_or | ( | const T & | values, |
const MPI_Comm & | mpi_communicator, | ||
U & | results | ||
) |
Like the previous function, but performs the logical or operation on each element of an array. In other words, the i-th element of the results array is the result of the logical or operation applied on the i-th entries of the input arrays from each processor. T and U must decay to the same type, e.g., they just differ by one of them having a const type qualifier and the other not.
Input and output arrays may be the same.
std::vector
for the data type bool
. In that case, use a different container or data type. void Utilities::MPI::logical_or | ( | const ArrayView< const T > & | values, |
const MPI_Comm & | mpi_communicator, | ||
const ArrayView< T > & | results | ||
) |
Like the previous function, but performs the logical or operation on each element of an array as specified by the ArrayView arguments. In other words, the i-th element of the results array is the result of the logical or operation applied on the i-th entries of the input arrays from each processor.
Input and output arrays may be the same.
Return sum, average, minimum, maximum, processor id of minimum and maximum as a collective operation of on the given MPI communicator mpi_communicator
. Each processor's value is given in my_value
and the result will be returned. The result is available on all machines.
MPI_Reduce
function instead of the MPI_Allreduce
function. The latter is at most twice as expensive, so if you are concerned about performance, it may be worthwhile investigating whether your algorithm indeed needs the result everywhere. std::vector< MinMaxAvg > Utilities::MPI::min_max_avg | ( | const std::vector< double > & | my_value, |
const MPI_Comm & | mpi_communicator | ||
) |
Same as above but returning the sum, average, minimum, maximum, process id of minimum and maximum as a collective operation on the given MPI communicator mpi_communicator
for each entry of the vector.
void Utilities::MPI::min_max_avg | ( | const ArrayView< const double > & | my_values, |
const ArrayView< MinMaxAvg > & | result, | ||
const MPI_Comm & | mpi_communicator | ||
) |
Same as above but returning the sum, average, minimum, maximum, process id of minimum and maximum as a collective operation on the given MPI communicator mpi_communicator
for each entry of the ArrayView.
bool Utilities::MPI::job_supports_mpi | ( | ) |
Return whether (i) deal.II has been compiled to support MPI (for example by compiling with CXX=mpiCC
) and if so whether (ii) MPI_Init()
has been called (for example using the Utilities::MPI::MPI_InitFinalize class). In other words, the result indicates whether the current job is running under MPI.
std::map< unsigned int, T > Utilities::MPI::some_to_some | ( | const MPI_Comm & | comm, |
const std::map< unsigned int, T > & | objects_to_send | ||
) |
Initiate a some-to-some communication, and exchange arbitrary objects (the class T should be serializable using boost::serialize) between processors.
[in] | comm | MPI communicator. |
[in] | objects_to_send | A map from the rank (unsigned int) of the process meant to receive the data and the object to send (the type T must be serializable for this function to work properly). If this map contains an entry with a key equal to the rank of the current process (i.e., an instruction to a process to send data to itself), then this data item is simply copied to the returned object. |
std::vector< T > Utilities::MPI::all_gather | ( | const MPI_Comm & | comm, |
const T & | object_to_send | ||
) |
A generalization of the classic MPI_Allgather function, that accepts arbitrary data types T, as long as boost::serialize accepts T as an argument.
[in] | comm | MPI communicator. |
[in] | object_to_send | An object to send to all other processes |
std::vector< T > Utilities::MPI::gather | ( | const MPI_Comm & | comm, |
const T & | object_to_send, | ||
const unsigned int | root_process = 0 |
||
) |
A generalization of the classic MPI_Gather function, that accepts arbitrary data types T, as long as boost::serialize accepts T as an argument.
[in] | comm | MPI communicator. |
[in] | object_to_send | an object to send to the root process |
[in] | root_process | The process, which receives the objects from all processes. By default the process with rank 0 is the root process. |
root_process
receives a vector of objects, with size equal to the number of processes in the MPI communicator. Each entry contains the object received from the processor with the corresponding rank within the communicator. All other processes receive an empty vector. T Utilities::MPI::broadcast | ( | const MPI_Comm & | comm, |
const T & | object_to_send, | ||
const unsigned int | root_process = 0 |
||
) |
Sends an object object_to_send
from the process root_process
to all other processes.
A generalization of the classic MPI_Bcast
function that accepts arbitrary data types T
, as long as Utilities::pack() (which in turn uses boost::serialize
, see in Utilities::pack() for details) accepts T
as an argument.
[in] | comm | MPI communicator. |
[in] | object_to_send | An object to send to all processes. |
[in] | root_process | The process that sends the object to all processes. By default the process with rank 0 is the root process. |
object_to_send
. On every other process, return a copy of the object sent by the root_process
. T Utilities::MPI::all_reduce | ( | const T & | local_value, |
const MPI_Comm & | comm, | ||
const std::function< T(const T &, const T &)> & | combiner | ||
) |
A function that combines values local_value
from all processes via a user-specified binary operation combiner
and distributes the result back to all processes. As such this function is similar to MPI_Allreduce (if it were implemented by a global reduction followed by a broadcast step) but due to the user-specified binary operation also general object types, including ones that store variable amounts of data, can be handled.
std::vector< unsigned int > Utilities::MPI::compute_index_owner | ( | const IndexSet & | owned_indices, |
const IndexSet & | indices_to_look_up, | ||
const MPI_Comm & | comm | ||
) |
Given a partitioned index set space, compute the owning MPI process rank of each element of a second index set according to the partitioned index set. A natural usage of this function is to compute for each ghosted degree of freedom the MPI rank of the process owning that index.
One might think: "But we know which rank a ghost DoF belongs to based on the subdomain id of the cell it is on". But this heuristic fails for DoFs on interfaces between ghost cells with different subdomain_ids, or between a ghost cell and an artificial cell. Furthermore, this function enables a completely abstract exchange of information without the help of the mesh in terms of neighbors.
The first argument passed to this function, owned_indices
, must uniquely partition an index space between all processes. Otherwise, there are no limitations on this argument: In particular, there is no need in partitioning the index space into contiguous subsets. Furthermore, there are no limitations on the second index set indices_to_look_up
as long as the size matches the first one. It can be chosen arbitrarily and independently on each process. In the case that the second index set also contains locally owned indices, these indices will be treated correctly and the rank of this process is returned for those entries.
[in] | owned_indices | Index set with indices locally owned by this process. |
[in] | indices_to_look_up | Index set containing indices of which the user is interested the rank of the owning process. |
[in] | comm | MPI communicator. |
indices_to_look_up
. The order coincides with the order within the ElementIterator. std::vector< T > Utilities::MPI::compute_set_union | ( | const std::vector< T > & | vec, |
const MPI_Comm & | comm | ||
) |
Compute the union of the input vectors vec
of all processes in the MPI communicator comm
.
std::set< T > Utilities::MPI::compute_set_union | ( | const std::set< T > & | set, |
const MPI_Comm & | comm | ||
) |
The same as above but for std::set.