Reference documentation for deal.II version 9.3.3
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
mpi.h
Go to the documentation of this file.
1// ---------------------------------------------------------------------
2//
3// Copyright (C) 2011 - 2021 by the deal.II authors
4//
5// This file is part of the deal.II library.
6//
7// The deal.II library is free software; you can use it, redistribute
8// it, and/or modify it under the terms of the GNU Lesser General
9// Public License as published by the Free Software Foundation; either
10// version 2.1 of the License, or (at your option) any later version.
11// The full text of the license can be found in the file LICENSE.md at
12// the top level directory of deal.II.
13//
14// ---------------------------------------------------------------------
15
16#ifndef dealii_mpi_h
17#define dealii_mpi_h
18
19#include <deal.II/base/config.h>
20
25
26#include <boost/signals2.hpp>
27
28#include <map>
29#include <numeric>
30#include <set>
31#include <vector>
32
33#if !defined(DEAL_II_WITH_MPI) && !defined(DEAL_II_WITH_PETSC)
34// without MPI, we would still like to use
35// some constructs with MPI data
36// types. Therefore, create some dummies
37using MPI_Comm = int;
38using MPI_Request = int;
39using MPI_Datatype = int;
40using MPI_Op = int;
41# ifndef MPI_COMM_WORLD
42# define MPI_COMM_WORLD 0
43# endif
44# ifndef MPI_COMM_SELF
45# define MPI_COMM_SELF 0
46# endif
47# ifndef MPI_REQUEST_NULL
48# define MPI_REQUEST_NULL 0
49# endif
50# ifndef MPI_MIN
51# define MPI_MIN 0
52# endif
53# ifndef MPI_MAX
54# define MPI_MAX 0
55# endif
56# ifndef MPI_SUM
57# define MPI_SUM 0
58# endif
59# ifndef MPI_LOR
60# define MPI_LOR 0
61# endif
62#endif
63
64
65
79#ifdef DEAL_II_WITH_MPI
80# if DEAL_II_MPI_VERSION_GTE(3, 0)
81
82# define DEAL_II_MPI_CONST_CAST(expr) (expr)
83
84# else
85
86# include <type_traits>
87
88# define DEAL_II_MPI_CONST_CAST(expr) \
89 const_cast<typename std::remove_const< \
90 typename std::remove_pointer<decltype(expr)>::type>::type *>(expr)
91
92# endif
93#endif
94
95
96
98
99
100// Forward type declarations to allow MPI sums over tensorial types
101#ifndef DOXYGEN
102template <int rank, int dim, typename Number>
103class Tensor;
104template <int rank, int dim, typename Number>
105class SymmetricTensor;
106template <typename Number>
107class SparseMatrix;
108class IndexSet;
109#endif
110
111namespace Utilities
112{
126 create_evenly_distributed_partitioning(const unsigned int my_partition_id,
127 const unsigned int n_partitions,
128 const IndexSet::size_type total_size);
129
137 namespace MPI
138 {
147 unsigned int
148 n_mpi_processes(const MPI_Comm &mpi_communicator);
149
158 unsigned int
159 this_mpi_process(const MPI_Comm &mpi_communicator);
160
165 const std::vector<unsigned int>
167 const MPI_Comm &comm_small);
168
190 std::vector<unsigned int>
192 const MPI_Comm & mpi_comm,
193 const std::vector<unsigned int> &destinations);
194
214 unsigned int
216 const MPI_Comm & mpi_comm,
217 const std::vector<unsigned int> &destinations);
218
236 duplicate_communicator(const MPI_Comm &mpi_communicator);
237
247 void
248 free_communicator(MPI_Comm &mpi_communicator);
249
263 {
264 public:
268 explicit DuplicatedCommunicator(const MPI_Comm &communicator)
269 : comm(duplicate_communicator(communicator))
270 {}
271
276
281 {
283 }
284
288 const MPI_Comm &operator*() const
289 {
290 return comm;
291 }
292
293
299
300 private:
305 };
306
337 {
338 public:
345 {
346 public:
351 : mutex(mutex)
352 , comm(comm)
353 {
354 mutex.lock(comm);
355 }
356
361 {
363 }
364
365 private:
374 };
375
379 explicit CollectiveMutex();
380
385
392 void
393 lock(const MPI_Comm &comm);
394
401 void
402 unlock(const MPI_Comm &comm);
403
404 private:
408 bool locked;
409
413 MPI_Request request;
414 };
415
416
417
445#ifdef DEAL_II_WITH_MPI
446 int
447 create_group(const MPI_Comm & comm,
448 const MPI_Group &group,
449 const int tag,
450 MPI_Comm * new_comm);
451#endif
452
461 std::vector<IndexSet>
463 const IndexSet::size_type locally_owned_size);
464
474 const MPI_Comm & comm,
475 const IndexSet::size_type total_size);
476
477#ifdef DEAL_II_WITH_MPI
493 template <class Iterator, typename Number = long double>
494 std::pair<Number, typename numbers::NumberTraits<Number>::real_type>
496 const Iterator end,
497 const MPI_Comm &comm);
498#endif
499
519 template <typename T>
520 T
521 sum(const T &t, const MPI_Comm &mpi_communicator);
522
532 template <typename T, typename U>
533 void
534 sum(const T &values, const MPI_Comm &mpi_communicator, U &sums);
535
545 template <typename T>
546 void
548 const MPI_Comm & mpi_communicator,
549 const ArrayView<T> & sums);
550
556 template <int rank, int dim, typename Number>
559 const MPI_Comm & mpi_communicator);
560
566 template <int rank, int dim, typename Number>
569 const MPI_Comm & mpi_communicator);
570
579 template <typename Number>
580 void
582 const MPI_Comm & mpi_communicator,
583 SparseMatrix<Number> & global);
584
604 template <typename T>
605 T
606 max(const T &t, const MPI_Comm &mpi_communicator);
607
617 template <typename T, typename U>
618 void
619 max(const T &values, const MPI_Comm &mpi_communicator, U &maxima);
620
630 template <typename T>
631 void
633 const MPI_Comm & mpi_communicator,
634 const ArrayView<T> & maxima);
635
655 template <typename T>
656 T
657 min(const T &t, const MPI_Comm &mpi_communicator);
658
668 template <typename T, typename U>
669 void
670 min(const T &values, const MPI_Comm &mpi_communicator, U &minima);
671
681 template <typename T>
682 void
684 const MPI_Comm & mpi_communicator,
685 const ArrayView<T> & minima);
686
710 template <typename T>
711 T
712 logical_or(const T &t, const MPI_Comm &mpi_communicator);
713
728 template <typename T, typename U>
729 void
730 logical_or(const T &values, const MPI_Comm &mpi_communicator, U &results);
731
741 template <typename T>
742 void
744 const MPI_Comm & mpi_communicator,
745 const ArrayView<T> & results);
746
762 {
767 double sum;
768
773 double min;
774
779 double max;
780
789 unsigned int min_index;
790
799 unsigned int max_index;
800
805 double avg;
806 };
807
823 min_max_avg(const double my_value, const MPI_Comm &mpi_communicator);
824
836 std::vector<MinMaxAvg>
837 min_max_avg(const std::vector<double> &my_value,
838 const MPI_Comm & mpi_communicator);
839
840
853 void
854 min_max_avg(const ArrayView<const double> &my_values,
855 const ArrayView<MinMaxAvg> & result,
856 const MPI_Comm & mpi_communicator);
857
858
903 {
904 public:
951 int & argc,
952 char **& argv,
953 const unsigned int max_num_threads = numbers::invalid_unsigned_int);
954
960
987 static void
988 register_request(MPI_Request &request);
989
993 static void
994 unregister_request(MPI_Request &request);
995
1003 struct Signals
1004 {
1009 boost::signals2::signal<void()> at_mpi_init;
1010
1017 boost::signals2::signal<void()> at_mpi_finalize;
1018 };
1019
1021
1022 private:
1026 static std::set<MPI_Request *> requests;
1027 };
1028
1040 bool
1042
1060 template <typename T>
1061 std::map<unsigned int, T>
1063 const std::map<unsigned int, T> &objects_to_send);
1064
1078 template <typename T>
1079 std::vector<T>
1080 all_gather(const MPI_Comm &comm, const T &object_to_send);
1081
1097 template <typename T>
1098 std::vector<T>
1100 const T & object_to_send,
1101 const unsigned int root_process = 0);
1102
1121 template <typename T>
1122 T
1124 const T & object_to_send,
1125 const unsigned int root_process = 0);
1126
1136 template <typename T>
1137 T
1138 all_reduce(const T & local_value,
1139 const MPI_Comm & comm,
1140 const std::function<T(const T &, const T &)> &combiner);
1141
1184 std::vector<unsigned int>
1185 compute_index_owner(const IndexSet &owned_indices,
1186 const IndexSet &indices_to_look_up,
1187 const MPI_Comm &comm);
1188
1196 template <typename T>
1197 std::vector<T>
1198 compute_set_union(const std::vector<T> &vec, const MPI_Comm &comm);
1199
1203 template <typename T>
1204 std::set<T>
1205 compute_set_union(const std::set<T> &set, const MPI_Comm &comm);
1206
1207#ifndef DOXYGEN
1208 // declaration for an internal function that lives in mpi.templates.h
1209 namespace internal
1210 {
1211 template <typename T>
1212 void
1213 all_reduce(const MPI_Op & mpi_op,
1215 const MPI_Comm & mpi_communicator,
1216 const ArrayView<T> & output);
1217 }
1218
1219 // Since these depend on N they must live in the header file
1220 template <typename T, unsigned int N>
1221 void
1222 sum(const T (&values)[N], const MPI_Comm &mpi_communicator, T (&sums)[N])
1223 {
1224 internal::all_reduce(MPI_SUM,
1226 mpi_communicator,
1227 ArrayView<T>(sums, N));
1228 }
1229
1230 template <typename T, unsigned int N>
1231 void
1232 max(const T (&values)[N], const MPI_Comm &mpi_communicator, T (&maxima)[N])
1233 {
1234 internal::all_reduce(MPI_MAX,
1236 mpi_communicator,
1237 ArrayView<T>(maxima, N));
1238 }
1239
1240 template <typename T, unsigned int N>
1241 void
1242 min(const T (&values)[N], const MPI_Comm &mpi_communicator, T (&minima)[N])
1243 {
1244 internal::all_reduce(MPI_MIN,
1246 mpi_communicator,
1247 ArrayView<T>(minima, N));
1248 }
1249
1250 template <typename T, unsigned int N>
1251 void
1252 logical_or(const T (&values)[N],
1253 const MPI_Comm &mpi_communicator,
1254 T (&results)[N])
1255 {
1256 static_assert(std::is_integral<T>::value,
1257 "The MPI_LOR operation only allows integral data types.");
1258
1259 internal::all_reduce(MPI_LOR,
1261 mpi_communicator,
1262 ArrayView<T>(results, N));
1263 }
1264
1265 template <typename T>
1266 std::map<unsigned int, T>
1267 some_to_some(const MPI_Comm & comm,
1268 const std::map<unsigned int, T> &objects_to_send)
1269 {
1270# ifndef DEAL_II_WITH_MPI
1271 (void)comm;
1272 Assert(objects_to_send.size() < 2,
1273 ExcMessage("Cannot send to more than one processor."));
1274 Assert(objects_to_send.find(0) != objects_to_send.end() ||
1275 objects_to_send.size() == 0,
1276 ExcMessage("Can only send to myself or to nobody."));
1277 return objects_to_send;
1278# else
1279 const auto my_proc = this_mpi_process(comm);
1280
1281 std::map<unsigned int, T> received_objects;
1282
1283 std::vector<unsigned int> send_to;
1284 send_to.reserve(objects_to_send.size());
1285 for (const auto &m : objects_to_send)
1286 if (m.first == my_proc)
1287 received_objects[my_proc] = m.second;
1288 else
1289 send_to.emplace_back(m.first);
1290
1291 const unsigned int n_point_point_communications =
1293
1294 // Protect the following communication:
1295 static CollectiveMutex mutex;
1296 CollectiveMutex::ScopedLock lock(mutex, comm);
1297
1298 // If we have something to send, or we expect something from other
1299 // processors, we need to visit one of the two scopes below. Otherwise,
1300 // no other action is required by this mpi process, and we can safely
1301 // return.
1302 if (send_to.size() == 0 && n_point_point_communications == 0)
1303 return received_objects;
1304
1305 const int mpi_tag =
1307
1308 // Sending buffers
1309 std::vector<std::vector<char>> buffers_to_send(send_to.size());
1310 std::vector<MPI_Request> buffer_send_requests(send_to.size());
1311 {
1312 unsigned int i = 0;
1313 for (const auto &rank_obj : objects_to_send)
1314 if (rank_obj.first != my_proc)
1315 {
1316 const auto &rank = rank_obj.first;
1317 buffers_to_send[i] = Utilities::pack(rank_obj.second,
1318 /*allow_compression=*/false);
1319 const int ierr = MPI_Isend(buffers_to_send[i].data(),
1320 buffers_to_send[i].size(),
1321 MPI_CHAR,
1322 rank,
1323 mpi_tag,
1324 comm,
1325 &buffer_send_requests[i]);
1326 AssertThrowMPI(ierr);
1327 ++i;
1328 }
1329 }
1330
1331 // Fill the output map
1332 {
1333 std::vector<char> buffer;
1334 // We do this on a first come/first served basis
1335 for (unsigned int i = 0; i < n_point_point_communications; ++i)
1336 {
1337 // Probe what's going on. Take data from the first available sender
1338 MPI_Status status;
1339 int ierr = MPI_Probe(MPI_ANY_SOURCE, mpi_tag, comm, &status);
1340 AssertThrowMPI(ierr);
1341
1342 // Length of the message
1343 int len;
1344 ierr = MPI_Get_count(&status, MPI_CHAR, &len);
1345 AssertThrowMPI(ierr);
1346 buffer.resize(len);
1347
1348 // Source rank
1349 const unsigned int rank = status.MPI_SOURCE;
1350
1351 // Actually receive the message
1352 ierr = MPI_Recv(buffer.data(),
1353 len,
1354 MPI_CHAR,
1355 status.MPI_SOURCE,
1356 status.MPI_TAG,
1357 comm,
1358 MPI_STATUS_IGNORE);
1359 AssertThrowMPI(ierr);
1360 Assert(received_objects.find(rank) == received_objects.end(),
1362 "I should not receive again from this rank"));
1363 received_objects[rank] =
1364 Utilities::unpack<T>(buffer,
1365 /*allow_compression=*/false);
1366 }
1367 }
1368
1369 // Wait to have sent all objects.
1370 const int ierr = MPI_Waitall(send_to.size(),
1371 buffer_send_requests.data(),
1372 MPI_STATUSES_IGNORE);
1373 AssertThrowMPI(ierr);
1374
1375 return received_objects;
1376# endif // deal.II with MPI
1377 }
1378
1379 template <typename T>
1380 std::vector<T>
1381 all_gather(const MPI_Comm &comm, const T &object)
1382 {
1383 if (job_supports_mpi() == false)
1384 return {object};
1385
1386# ifndef DEAL_II_WITH_MPI
1387 (void)comm;
1388 std::vector<T> v(1, object);
1389 return v;
1390# else
1391 const auto n_procs = ::Utilities::MPI::n_mpi_processes(comm);
1392
1393 std::vector<char> buffer = Utilities::pack(object);
1394
1395 int n_local_data = buffer.size();
1396
1397 // Vector to store the size of loc_data_array for every process
1398 std::vector<int> size_all_data(n_procs, 0);
1399
1400 // Exchanging the size of each buffer
1401 MPI_Allgather(
1402 &n_local_data, 1, MPI_INT, size_all_data.data(), 1, MPI_INT, comm);
1403
1404 // Now computing the displacement, relative to recvbuf,
1405 // at which to store the incoming buffer
1406 std::vector<int> rdispls(n_procs);
1407 rdispls[0] = 0;
1408 for (unsigned int i = 1; i < n_procs; ++i)
1409 rdispls[i] = rdispls[i - 1] + size_all_data[i - 1];
1410
1411 // Step 3: exchange the buffer:
1412 std::vector<char> received_unrolled_buffer(rdispls.back() +
1413 size_all_data.back());
1414
1415 MPI_Allgatherv(buffer.data(),
1416 n_local_data,
1417 MPI_CHAR,
1418 received_unrolled_buffer.data(),
1419 size_all_data.data(),
1420 rdispls.data(),
1421 MPI_CHAR,
1422 comm);
1423
1424 std::vector<T> received_objects(n_procs);
1425 for (unsigned int i = 0; i < n_procs; ++i)
1426 {
1427 std::vector<char> local_buffer(received_unrolled_buffer.begin() +
1428 rdispls[i],
1429 received_unrolled_buffer.begin() +
1430 rdispls[i] + size_all_data[i]);
1431 received_objects[i] = Utilities::unpack<T>(local_buffer);
1432 }
1433
1434 return received_objects;
1435# endif
1436 }
1437
1438 template <typename T>
1439 std::vector<T>
1440 gather(const MPI_Comm & comm,
1441 const T & object_to_send,
1442 const unsigned int root_process)
1443 {
1444# ifndef DEAL_II_WITH_MPI
1445 (void)comm;
1446 (void)root_process;
1447 std::vector<T> v(1, object_to_send);
1448 return v;
1449# else
1450 const auto n_procs = ::Utilities::MPI::n_mpi_processes(comm);
1451 const auto my_rank = ::Utilities::MPI::this_mpi_process(comm);
1452
1453 AssertIndexRange(root_process, n_procs);
1454
1455 std::vector<char> buffer = Utilities::pack(object_to_send);
1456 int n_local_data = buffer.size();
1457
1458 // Vector to store the size of loc_data_array for every process
1459 // only the root process needs to allocate memory for that purpose
1460 std::vector<int> size_all_data;
1461 if (my_rank == root_process)
1462 size_all_data.resize(n_procs, 0);
1463
1464 // Exchanging the size of each buffer
1465 int ierr = MPI_Gather(&n_local_data,
1466 1,
1467 MPI_INT,
1468 size_all_data.data(),
1469 1,
1470 MPI_INT,
1471 root_process,
1472 comm);
1473 AssertThrowMPI(ierr);
1474
1475 // Now computing the displacement, relative to recvbuf,
1476 // at which to store the incoming buffer; only for root
1477 std::vector<int> rdispls;
1478 if (my_rank == root_process)
1479 {
1480 rdispls.resize(n_procs, 0);
1481 for (unsigned int i = 1; i < n_procs; ++i)
1482 rdispls[i] = rdispls[i - 1] + size_all_data[i - 1];
1483 }
1484 // exchange the buffer:
1485 std::vector<char> received_unrolled_buffer;
1486 if (my_rank == root_process)
1487 received_unrolled_buffer.resize(rdispls.back() + size_all_data.back());
1488
1489 ierr = MPI_Gatherv(buffer.data(),
1490 n_local_data,
1491 MPI_CHAR,
1492 received_unrolled_buffer.data(),
1493 size_all_data.data(),
1494 rdispls.data(),
1495 MPI_CHAR,
1496 root_process,
1497 comm);
1498 AssertThrowMPI(ierr);
1499
1500 std::vector<T> received_objects;
1501
1502 if (my_rank == root_process)
1503 {
1504 received_objects.resize(n_procs);
1505
1506 for (unsigned int i = 0; i < n_procs; ++i)
1507 {
1508 const std::vector<char> local_buffer(
1509 received_unrolled_buffer.begin() + rdispls[i],
1510 received_unrolled_buffer.begin() + rdispls[i] +
1511 size_all_data[i]);
1512 received_objects[i] = Utilities::unpack<T>(local_buffer);
1513 }
1514 }
1515 return received_objects;
1516# endif
1517 }
1518
1519
1520
1521 template <typename T>
1522 T
1523 broadcast(const MPI_Comm & comm,
1524 const T & object_to_send,
1525 const unsigned int root_process)
1526 {
1527# ifndef DEAL_II_WITH_MPI
1528 (void)comm;
1529 (void)root_process;
1530 return object_to_send;
1531# else
1532 const auto n_procs = ::Utilities::MPI::n_mpi_processes(comm);
1533 AssertIndexRange(root_process, n_procs);
1534 (void)n_procs;
1535
1536 std::vector<char> buffer;
1537 unsigned int buffer_size = numbers::invalid_unsigned_int;
1538
1539 // On the root process, pack the data and determine what the
1540 // buffer size needs to be.
1541 if (this_mpi_process(comm) == root_process)
1542 {
1543 buffer = Utilities::pack(object_to_send, false);
1544 buffer_size = buffer.size();
1545 }
1546
1547 // Exchange the size of buffer
1548 int ierr = MPI_Bcast(&buffer_size, 1, MPI_UNSIGNED, root_process, comm);
1549 AssertThrowMPI(ierr);
1550
1551 // If not on the root process, correctly size the buffer to
1552 // receive the data, then do exactly that.
1553 if (this_mpi_process(comm) != root_process)
1554 buffer.resize(buffer_size);
1555
1556 ierr =
1557 MPI_Bcast(buffer.data(), buffer_size, MPI_CHAR, root_process, comm);
1558 AssertThrowMPI(ierr);
1559
1560 if (Utilities::MPI::this_mpi_process(comm) == root_process)
1561 return object_to_send;
1562 else
1563 return Utilities::unpack<T>(buffer, false);
1564# endif
1565 }
1566
1567
1568# ifdef DEAL_II_WITH_MPI
1569 template <class Iterator, typename Number>
1570 std::pair<Number, typename numbers::NumberTraits<Number>::real_type>
1571 mean_and_standard_deviation(const Iterator begin,
1572 const Iterator end,
1573 const MPI_Comm &comm)
1574 {
1575 // below we do simple and straight-forward implementation. More elaborate
1576 // options are:
1577 // http://dx.doi.org/10.1145/2807591.2807644 section 3.1.2
1578 // https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm
1579 // https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online
1580 using Std = typename numbers::NumberTraits<Number>::real_type;
1581 const Number sum = std::accumulate(begin, end, Number(0.));
1582
1583 const auto size = Utilities::MPI::sum(std::distance(begin, end), comm);
1584 Assert(size > 0, ExcDivideByZero());
1585 const Number mean =
1586 Utilities::MPI::sum(sum, comm) / static_cast<Std>(size);
1587 Std sq_sum = 0.;
1588 std::for_each(begin, end, [&mean, &sq_sum](const Number &v) {
1590 });
1591 sq_sum = Utilities::MPI::sum(sq_sum, comm);
1592 return std::make_pair(mean,
1593 std::sqrt(sq_sum / static_cast<Std>(size - 1)));
1594 }
1595# endif
1596
1597#endif
1598 } // end of namespace MPI
1599} // end of namespace Utilities
1600
1601
1603
1604#endif
types::global_dof_index size_type
Definition: index_set.h:83
void sum(const SparseMatrix< Number > &local, const MPI_Comm &mpi_communicator, SparseMatrix< Number > &global)
SymmetricTensor< rank, dim, Number > sum(const SymmetricTensor< rank, dim, Number > &local, const MPI_Comm &mpi_communicator)
Definition: tensor.h:472
Tensor< rank, dim, Number > sum(const Tensor< rank, dim, Number > &local, const MPI_Comm &mpi_communicator)
ScopedLock(CollectiveMutex &mutex, const MPI_Comm &comm)
Definition: mpi.h:350
void unlock(const MPI_Comm &comm)
Definition: mpi.cc:1205
void lock(const MPI_Comm &comm)
Definition: mpi.cc:1171
DuplicatedCommunicator(const DuplicatedCommunicator &)=delete
DuplicatedCommunicator(const MPI_Comm &communicator)
Definition: mpi.h:268
const MPI_Comm & operator*() const
Definition: mpi.h:288
DuplicatedCommunicator & operator=(const DuplicatedCommunicator &)=delete
static void unregister_request(MPI_Request &request)
Definition: mpi.cc:975
static std::set< MPI_Request * > requests
Definition: mpi.h:1026
static Signals signals
Definition: mpi.h:1020
MPI_InitFinalize(int &argc, char **&argv, const unsigned int max_num_threads=numbers::invalid_unsigned_int)
Definition: mpi.cc:802
static void register_request(MPI_Request &request)
Definition: mpi.cc:966
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:402
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:403
__global__ void set(Number *val, const Number s, const size_type N)
#define Assert(cond, exc)
Definition: exceptions.h:1465
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1746
#define AssertIndexRange(index, range)
Definition: exceptions.h:1690
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcDivideByZero()
static ::ExceptionBase & ExcMessage(std::string arg1)
static const char U
static const char T
static const char N
VectorType::value_type * end(VectorType &V)
VectorType::value_type * begin(VectorType &V)
@ compute_point_to_point_communication_pattern
Utilities::MPI::compute_point_to_point_communication_pattern()
Definition: mpi_tags.h:57
T broadcast(const MPI_Comm &comm, const T &object_to_send, const unsigned int root_process=0)
void free_communicator(MPI_Comm &mpi_communicator)
Definition: mpi.cc:171
std::vector< unsigned int > compute_index_owner(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm &comm)
Definition: mpi.cc:1113
T logical_or(const T &t, const MPI_Comm &mpi_communicator)
std::vector< T > all_gather(const MPI_Comm &comm, const T &object_to_send)
unsigned int compute_n_point_to_point_communications(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:497
T all_reduce(const T &local_value, const MPI_Comm &comm, const std::function< T(const T &, const T &)> &combiner)
std::vector< IndexSet > create_ascending_partitioning(const MPI_Comm &comm, const IndexSet::size_type locally_owned_size)
Definition: mpi.cc:263
IndexSet create_evenly_distributed_partitioning(const MPI_Comm &comm, const IndexSet::size_type total_size)
Definition: mpi.cc:285
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:128
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:359
T min(const T &t, const MPI_Comm &mpi_communicator)
bool job_supports_mpi()
Definition: mpi.cc:1097
MPI_Comm duplicate_communicator(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:160
std::vector< T > gather(const MPI_Comm &comm, const T &object_to_send, const unsigned int root_process=0)
T sum(const T &t, const MPI_Comm &mpi_communicator)
std::vector< T > compute_set_union(const std::vector< T > &vec, const MPI_Comm &comm)
const std::vector< unsigned int > mpi_processes_within_communicator(const MPI_Comm &comm_large, const MPI_Comm &comm_small)
Definition: mpi.cc:140
std::pair< Number, typename numbers::NumberTraits< Number >::real_type > mean_and_standard_deviation(const Iterator begin, const Iterator end, const MPI_Comm &comm)
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:117
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
Definition: mpi.cc:91
T max(const T &t, const MPI_Comm &mpi_communicator)
std::map< unsigned int, T > some_to_some(const MPI_Comm &comm, const std::map< unsigned int, T > &objects_to_send)
int create_group(const MPI_Comm &comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
Definition: mpi.cc:181
size_t pack(const T &object, std::vector< char > &dest_buffer, const bool allow_compression=true)
Definition: utilities.h:1218
IndexSet create_evenly_distributed_partitioning(const unsigned int my_partition_id, const unsigned int n_partitions, const IndexSet::size_type total_size)
Definition: mpi.cc:71
static const unsigned int invalid_unsigned_int
Definition: types.h:196
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
*braid_SplitCommworld & comm
boost::signals2::signal< void()> at_mpi_init
Definition: mpi.h:1009
boost::signals2::signal< void()> at_mpi_finalize
Definition: mpi.h:1017
unsigned int max_index
Definition: mpi.h:799
unsigned int min_index
Definition: mpi.h:789
static constexpr std::enable_if< std::is_same< Dummy, number >::value &&is_cuda_compatible< Dummy >::value, real_type >::type abs_square(const number &x)
Definition: numbers.h:577