|
| unsigned int | n_mpi_processes (const MPI_Comm &mpi_communicator) |
| |
| unsigned int | this_mpi_process (const MPI_Comm &mpi_communicator) |
| |
| std::vector< unsigned int > | compute_point_to_point_communication_pattern (const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations) |
| |
| MPI_Comm | duplicate_communicator (const MPI_Comm &mpi_communicator) |
| |
| template<typename T > |
| T | sum (const T &t, const MPI_Comm &mpi_communicator) |
| |
| template<typename T , unsigned int N> |
| void | sum (const T(&values)[N], const MPI_Comm &mpi_communicator, T(&sums)[N]) |
| |
| template<typename T > |
| void | sum (const std::vector< T > &values, const MPI_Comm &mpi_communicator, std::vector< T > &sums) |
| |
| template<typename T > |
| void | sum (const Vector< T > &values, const MPI_Comm &mpi_communicator, Vector< T > &sums) |
| |
| template<typename T > |
| T | max (const T &t, const MPI_Comm &mpi_communicator) |
| |
| template<typename T , unsigned int N> |
| void | max (const T(&values)[N], const MPI_Comm &mpi_communicator, T(&maxima)[N]) |
| |
| template<typename T > |
| void | max (const std::vector< T > &values, const MPI_Comm &mpi_communicator, std::vector< T > &maxima) |
| |
| template<typename T > |
| T | min (const T &t, const MPI_Comm &mpi_communicator) |
| |
| template<typename T , unsigned int N> |
| void | min (const T(&values)[N], const MPI_Comm &mpi_communicator, T(&minima)[N]) |
| |
| template<typename T > |
| void | min (const std::vector< T > &values, const MPI_Comm &mpi_communicator, std::vector< T > &minima) |
| |
| MinMaxAvg | min_max_avg (const double my_value, const MPI_Comm &mpi_communicator) |
| |
| bool | job_supports_mpi () |
| |
A namespace for utility functions that abstract certain operations using the Message Passing Interface (MPI) or provide fallback operations in case deal.II is configured not to use MPI at all.
| MPI_Comm Utilities::MPI::duplicate_communicator |
( |
const MPI_Comm & |
mpi_communicator | ) |
|
Given a communicator, generate a new communicator that contains the same set of processors but that has a different, unique identifier.
This functionality can be used to ensure that different objects, such as distributed matrices, each have unique communicators over which they can interact without interfering with each other.
When no longer needed, the communicator created here needs to be destroyed using MPI_Comm_free.
Definition at line 117 of file mpi.cc.
template<typename T , unsigned int N>
| void Utilities::MPI::sum |
( |
const T(&) |
values[N], |
|
|
const MPI_Comm & |
mpi_communicator, |
|
|
T(&) |
sums[N] |
|
) |
| |
|
inline |
Like the previous function, but take the sums over the elements of an array of length N. In other words, the i-th element of the results array is the sum over the i-th entries of the input arrays from each processor.
Input and output arrays may be the same.
Definition at line 620 of file mpi.h.
template<typename T >
| void Utilities::MPI::sum |
( |
const std::vector< T > & |
values, |
|
|
const MPI_Comm & |
mpi_communicator, |
|
|
std::vector< T > & |
sums |
|
) |
| |
|
inline |
Like the previous function, but take the sums over the elements of a std::vector. In other words, the i-th element of the results array is the sum over the i-th entries of the input arrays from each processor.
Input and output vectors may be the same.
Definition at line 630 of file mpi.h.
template<typename T , unsigned int N>
| void Utilities::MPI::max |
( |
const T(&) |
values[N], |
|
|
const MPI_Comm & |
mpi_communicator, |
|
|
T(&) |
maxima[N] |
|
) |
| |
|
inline |
Like the previous function, but take the maxima over the elements of an array of length N. In other words, the i-th element of the results array is the maximum of the i-th entries of the input arrays from each processor.
Input and output arrays may be the same.
Definition at line 702 of file mpi.h.
template<typename T >
| void Utilities::MPI::max |
( |
const std::vector< T > & |
values, |
|
|
const MPI_Comm & |
mpi_communicator, |
|
|
std::vector< T > & |
maxima |
|
) |
| |
|
inline |
Like the previous function, but take the maximum over the elements of a std::vector. In other words, the i-th element of the results array is the maximum over the i-th entries of the input arrays from each processor.
Input and output vectors may be the same.
Definition at line 712 of file mpi.h.
template<typename T , unsigned int N>
| void Utilities::MPI::min |
( |
const T(&) |
values[N], |
|
|
const MPI_Comm & |
mpi_communicator, |
|
|
T(&) |
minima[N] |
|
) |
| |
|
inline |
Like the previous function, but take the minima over the elements of an array of length N. In other words, the i-th element of the results array is the minimum of the i-th entries of the input arrays from each processor.
Input and output arrays may be the same.
Definition at line 731 of file mpi.h.
template<typename T >
| void Utilities::MPI::min |
( |
const std::vector< T > & |
values, |
|
|
const MPI_Comm & |
mpi_communicator, |
|
|
std::vector< T > & |
minima |
|
) |
| |
|
inline |
Like the previous function, but take the minimum over the elements of a std::vector. In other words, the i-th element of the results array is the minimum over the i-th entries of the input arrays from each processor.
Input and output vectors may be the same.
Definition at line 741 of file mpi.h.
| MinMaxAvg Utilities::MPI::min_max_avg |
( |
const double |
my_value, |
|
|
const MPI_Comm & |
mpi_communicator |
|
) |
| |
Returns sum, average, minimum, maximum, processor id of minimum and maximum as a collective operation of on the given MPI communicator mpi_communicator. Each processor's value is given in my_value and the result will be returned. The result is available on all machines.
- Note
- Sometimes, not all processors need a result and in that case one would call the
MPI_Reduce function instead of the MPI_Allreduce function. The latter is at most twice as expensive, so if you are concerned about performance, it may be worthwhile investigating whether your algorithm indeed needs the result everywhere.
Definition at line 239 of file mpi.cc.