Reference documentation for deal.II version 9.6.0
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
mpi_consensus_algorithms.h
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2020 - 2023 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
15#ifndef dealii_mpi_consensus_algorithm_h
16#define dealii_mpi_consensus_algorithm_h
17
18#include <deal.II/base/config.h>
19
20#include <deal.II/base/mpi.h>
21#include <deal.II/base/mpi.templates.h>
23
25
26
27namespace Utilities
28{
29 namespace MPI
30 {
129 namespace ConsensusAlgorithms
130 {
155 template <typename RequestType, typename AnswerType>
157 {
158 public:
163 virtual ~Process() = default;
164
171 virtual std::vector<unsigned int>
173
183 virtual void
184 create_request(const unsigned int other_rank, RequestType &send_buffer);
185
198 virtual void
199 answer_request(const unsigned int other_rank,
200 const RequestType &buffer_recv,
201 AnswerType &request_buffer);
202
210 virtual void
211 read_answer(const unsigned int other_rank,
212 const AnswerType &recv_buffer);
213 };
214
215
216
230 template <typename RequestType, typename AnswerType>
232 {
233 public:
237 Interface() = default;
238
243 virtual ~Interface() = default;
244
253 std::vector<unsigned int>
255
279 virtual std::vector<unsigned int>
281 const std::vector<unsigned int> &targets,
282 const std::function<RequestType(const unsigned int)> &create_request,
283 const std::function<AnswerType(const unsigned int,
284 const RequestType &)> &answer_request,
285 const std::function<void(const unsigned int, const AnswerType &)>
286 &process_answer,
287 const MPI_Comm comm) = 0;
288 };
289
290
304 template <typename RequestType, typename AnswerType>
305 class NBX : public Interface<RequestType, AnswerType>
306 {
307 public:
311 NBX() = default;
312
316 virtual ~NBX() = default;
317
318 // Import the declarations from the base class.
319 using Interface<RequestType, AnswerType>::run;
320
324 virtual std::vector<unsigned int>
326 const std::vector<unsigned int> &targets,
327 const std::function<RequestType(const unsigned int)> &create_request,
328 const std::function<AnswerType(const unsigned int,
329 const RequestType &)> &answer_request,
330 const std::function<void(const unsigned int, const AnswerType &)>
331 &process_answer,
332 const MPI_Comm comm) override;
333
334 private:
335#ifdef DEAL_II_WITH_MPI
339 std::vector<std::vector<char>> send_buffers;
340
344 std::vector<MPI_Request> send_requests;
345
353 std::vector<std::unique_ptr<std::vector<char>>> request_buffers;
354
358 std::vector<std::unique_ptr<MPI_Request>> request_requests;
359
364
365 // request for barrier
366 MPI_Request barrier_request;
367#endif
368
372 std::set<unsigned int> requesting_processes;
373
379 bool
381 const std::function<void(const unsigned int, const AnswerType &)>
382 &process_answer,
383 const MPI_Comm comm);
384
389 void
391
397 bool
399
405 void
407 const std::function<AnswerType(const unsigned int,
408 const RequestType &)> &answer_request,
409 const MPI_Comm comm);
410
415 void
417 const std::vector<unsigned int> &targets,
418 const std::function<RequestType(const unsigned int)> &create_request,
419 const MPI_Comm comm);
420
425 void
427 };
428
429
474 template <typename RequestType, typename AnswerType>
475 std::vector<unsigned int>
476 nbx(const std::vector<unsigned int> &targets,
477 const std::function<RequestType(const unsigned int)> &create_request,
478 const std::function<AnswerType(const unsigned int,
479 const RequestType &)> &answer_request,
480 const std::function<void(const unsigned int, const AnswerType &)>
481 &process_answer,
482 const MPI_Comm comm);
483
521 template <typename RequestType>
522 std::vector<unsigned int>
523 nbx(const std::vector<unsigned int> &targets,
524 const std::function<RequestType(const unsigned int)> &create_request,
525 const std::function<void(const unsigned int, const RequestType &)>
526 &process_request,
527 const MPI_Comm comm);
528
554 template <typename RequestType, typename AnswerType>
555 class PEX : public Interface<RequestType, AnswerType>
556 {
557 public:
561 PEX() = default;
562
566 virtual ~PEX() = default;
567
568 // Import the declarations from the base class.
569 using Interface<RequestType, AnswerType>::run;
570
574 virtual std::vector<unsigned int>
576 const std::vector<unsigned int> &targets,
577 const std::function<RequestType(const unsigned int)> &create_request,
578 const std::function<AnswerType(const unsigned int,
579 const RequestType &)> &answer_request,
580 const std::function<void(const unsigned int, const AnswerType &)>
581 &process_answer,
582 const MPI_Comm comm) override;
583
584 private:
585#ifdef DEAL_II_WITH_MPI
589 std::vector<std::vector<char>> send_buffers;
590
594 std::vector<std::vector<char>> recv_buffers;
595
599 std::vector<MPI_Request> send_request_requests;
600
604 std::vector<std::vector<char>> requests_buffers;
605
609 std::vector<MPI_Request> send_answer_requests;
610#endif
614 std::set<unsigned int> requesting_processes;
615
620 unsigned int
622 const std::vector<unsigned int> &targets,
623 const std::function<RequestType(const unsigned int)> &create_request,
624 const MPI_Comm comm);
625
630 void
632 const unsigned int index,
633 const std::function<AnswerType(const unsigned int,
634 const RequestType &)> &answer_request,
635 const MPI_Comm comm);
636
641 void
643 const unsigned int n_targets,
644 const std::function<void(const unsigned int, const AnswerType &)>
645 &process_answer,
646 const MPI_Comm comm);
647
652 void
654 };
655
656
657
714 template <typename RequestType, typename AnswerType>
715 std::vector<unsigned int>
716 pex(const std::vector<unsigned int> &targets,
717 const std::function<RequestType(const unsigned int)> &create_request,
718 const std::function<AnswerType(const unsigned int,
719 const RequestType &)> &answer_request,
720 const std::function<void(const unsigned int, const AnswerType &)>
721 &process_answer,
722 const MPI_Comm comm);
723
761 template <typename RequestType>
762 std::vector<unsigned int>
763 pex(const std::vector<unsigned int> &targets,
764 const std::function<RequestType(const unsigned int)> &create_request,
765 const std::function<void(const unsigned int, const RequestType &)>
766 &process_request,
767 const MPI_Comm comm);
768
769
774 template <typename RequestType, typename AnswerType>
775 class Serial : public Interface<RequestType, AnswerType>
776 {
777 public:
781 Serial() = default;
782
783 // Import the declarations from the base class.
784 using Interface<RequestType, AnswerType>::run;
785
789 virtual std::vector<unsigned int>
791 const std::vector<unsigned int> &targets,
792 const std::function<RequestType(const unsigned int)> &create_request,
793 const std::function<AnswerType(const unsigned int,
794 const RequestType &)> &answer_request,
795 const std::function<void(const unsigned int, const AnswerType &)>
796 &process_answer,
797 const MPI_Comm comm) override;
798 };
799
800
801
834 template <typename RequestType, typename AnswerType>
835 std::vector<unsigned int>
837 const std::vector<unsigned int> &targets,
838 const std::function<RequestType(const unsigned int)> &create_request,
839 const std::function<AnswerType(const unsigned int, const RequestType &)>
840 &answer_request,
841 const std::function<void(const unsigned int, const AnswerType &)>
842 &process_answer,
843 const MPI_Comm comm);
844
874 template <typename RequestType>
875 std::vector<unsigned int>
877 const std::vector<unsigned int> &targets,
878 const std::function<RequestType(const unsigned int)> &create_request,
879 const std::function<void(const unsigned int, const RequestType &)>
880 &process_request,
881 const MPI_Comm comm);
882
883
884
897 template <typename RequestType, typename AnswerType>
898 class Selector : public Interface<RequestType, AnswerType>
899 {
900 public:
904 Selector() = default;
905
909 virtual ~Selector() = default;
910
911 // Import the declarations from the base class.
912 using Interface<RequestType, AnswerType>::run;
913
919 virtual std::vector<unsigned int>
921 const std::vector<unsigned int> &targets,
922 const std::function<RequestType(const unsigned int)> &create_request,
923 const std::function<AnswerType(const unsigned int,
924 const RequestType &)> &answer_request,
925 const std::function<void(const unsigned int, const AnswerType &)>
926 &process_answer,
927 const MPI_Comm comm) override;
928
929 private:
930 // Pointer to the actual ConsensusAlgorithms::Interface implementation.
931 std::shared_ptr<Interface<RequestType, AnswerType>> consensus_algo;
932 };
933
934
935
980 template <typename RequestType, typename AnswerType>
981 std::vector<unsigned int>
983 const std::vector<unsigned int> &targets,
984 const std::function<RequestType(const unsigned int)> &create_request,
985 const std::function<AnswerType(const unsigned int, const RequestType &)>
986 &answer_request,
987 const std::function<void(const unsigned int, const AnswerType &)>
988 &process_answer,
989 const MPI_Comm comm);
990
1028 template <typename RequestType>
1029 std::vector<unsigned int>
1031 const std::vector<unsigned int> &targets,
1032 const std::function<RequestType(const unsigned int)> &create_request,
1033 const std::function<void(const unsigned int, const RequestType &)>
1034 &process_request,
1035 const MPI_Comm comm);
1036
1037
1038
1039#ifndef DOXYGEN
1040 // Implementation of the functions in this namespace.
1041
1042 template <typename RequestType, typename AnswerType>
1043 std::vector<unsigned int>
1044 nbx(const std::vector<unsigned int> &targets,
1045 const std::function<RequestType(const unsigned int)> &create_request,
1046 const std::function<AnswerType(const unsigned int,
1047 const RequestType &)> &answer_request,
1048 const std::function<void(const unsigned int, const AnswerType &)>
1049 &process_answer,
1050 const MPI_Comm comm)
1051 {
1053 targets, create_request, answer_request, process_answer, comm);
1054 }
1055
1056
1057
1058 template <typename RequestType>
1059 std::vector<unsigned int>
1060 nbx(const std::vector<unsigned int> &targets,
1061 const std::function<RequestType(const unsigned int)> &create_request,
1062 const std::function<void(const unsigned int, const RequestType &)>
1063 &process_request,
1064 const MPI_Comm comm)
1065 {
1066 // TODO: For the moment, simply implement this special case by
1067 // forwarding to the other function with rewritten function
1068 // objects and using an empty type as answer type. This way,
1069 // we have the interface in place and can provide a more
1070 // efficient implementation later on.
1071 using EmptyType = std::tuple<>;
1072
1074 targets,
1075 create_request,
1076 // answer_request:
1077 [&process_request](const unsigned int source_rank,
1078 const RequestType &request) -> EmptyType {
1079 process_request(source_rank, request);
1080 // Return something. What it is is arbitrary here, except that
1081 // we want it to be as small an object as possible. Using
1082 // std::tuple<> is interpreted as an empty object that is packed
1083 // down to a zero-length char array.
1084 return {};
1085 },
1086 // process_answer:
1087 [](const unsigned int /*target_rank */,
1088 const EmptyType & /*answer*/) {},
1089 comm);
1090 }
1091
1092
1093
1094 template <typename RequestType, typename AnswerType>
1095 std::vector<unsigned int>
1096 pex(const std::vector<unsigned int> &targets,
1097 const std::function<RequestType(const unsigned int)> &create_request,
1098 const std::function<AnswerType(const unsigned int,
1099 const RequestType &)> &answer_request,
1100 const std::function<void(const unsigned int, const AnswerType &)>
1101 &process_answer,
1102 const MPI_Comm comm)
1103 {
1104 return PEX<RequestType, AnswerType>().run(
1105 targets, create_request, answer_request, process_answer, comm);
1106 }
1107
1108
1109
1110 template <typename RequestType>
1111 std::vector<unsigned int>
1112 pex(const std::vector<unsigned int> &targets,
1113 const std::function<RequestType(const unsigned int)> &create_request,
1114 const std::function<void(const unsigned int, const RequestType &)>
1115 &process_request,
1116 const MPI_Comm comm)
1117 {
1118 // TODO: For the moment, simply implement this special case by
1119 // forwarding to the other function with rewritten function
1120 // objects and using an empty type as answer type. This way,
1121 // we have the interface in place and can provide a more
1122 // efficient implementation later on.
1123 using EmptyType = std::tuple<>;
1124
1126 targets,
1127 create_request,
1128 // answer_request:
1129 [&process_request](const unsigned int source_rank,
1130 const RequestType &request) -> EmptyType {
1131 process_request(source_rank, request);
1132 // Return something. What it is is arbitrary here, except that
1133 // we want it to be as small an object as possible. Using
1134 // std::tuple<> is interpreted as an empty object that is packed
1135 // down to a zero-length char array.
1136 return {};
1137 },
1138 // process_answer:
1139 [](const unsigned int /*target_rank */,
1140 const EmptyType & /*answer*/) {},
1141 comm);
1142 }
1143
1144
1145
1146 template <typename RequestType, typename AnswerType>
1147 std::vector<unsigned int>
1148 serial(
1149 const std::vector<unsigned int> &targets,
1150 const std::function<RequestType(const unsigned int)> &create_request,
1151 const std::function<AnswerType(const unsigned int, const RequestType &)>
1152 &answer_request,
1153 const std::function<void(const unsigned int, const AnswerType &)>
1154 &process_answer,
1155 const MPI_Comm comm)
1156 {
1157 return Serial<RequestType, AnswerType>().run(
1158 targets, create_request, answer_request, process_answer, comm);
1159 }
1160
1161
1162
1163 template <typename RequestType>
1164 std::vector<unsigned int>
1165 serial(
1166 const std::vector<unsigned int> &targets,
1167 const std::function<RequestType(const unsigned int)> &create_request,
1168 const std::function<void(const unsigned int, const RequestType &)>
1169 &process_request,
1170 const MPI_Comm comm)
1171 {
1172 // TODO: For the moment, simply implement this special case by
1173 // forwarding to the other function with rewritten function
1174 // objects and using an empty type as answer type. This way,
1175 // we have the interface in place and can provide a more
1176 // efficient implementation later on.
1177 using EmptyType = std::tuple<>;
1178
1180 targets,
1181 create_request,
1182 // answer_request:
1183 [&process_request](const unsigned int source_rank,
1184 const RequestType &request) -> EmptyType {
1185 process_request(source_rank, request);
1186 // Return something. What it is is arbitrary here, except that
1187 // we want it to be as small an object as possible. Using
1188 // std::tuple<> is interpreted as an empty object that is packed
1189 // down to a zero-length char array.
1190 return {};
1191 },
1192 // process_answer:
1193 [](const unsigned int /*target_rank */,
1194 const EmptyType & /*answer*/) {},
1195 comm);
1196 }
1197
1198
1199
1200 template <typename RequestType, typename AnswerType>
1201 std::vector<unsigned int>
1202 selector(
1203 const std::vector<unsigned int> &targets,
1204 const std::function<RequestType(const unsigned int)> &create_request,
1205 const std::function<AnswerType(const unsigned int, const RequestType &)>
1206 &answer_request,
1207 const std::function<void(const unsigned int, const AnswerType &)>
1208 &process_answer,
1209 const MPI_Comm comm)
1210 {
1211 return Selector<RequestType, AnswerType>().run(
1212 targets, create_request, answer_request, process_answer, comm);
1213 }
1214
1215
1216
1217 template <typename RequestType>
1218 std::vector<unsigned int>
1219 selector(
1220 const std::vector<unsigned int> &targets,
1221 const std::function<RequestType(const unsigned int)> &create_request,
1222 const std::function<void(const unsigned int, const RequestType &)>
1223 &process_request,
1224 const MPI_Comm comm)
1225 {
1226 // TODO: For the moment, simply implement this special case by
1227 // forwarding to the other function with rewritten function
1228 // objects and using an empty type as answer type. This way,
1229 // we have the interface in place and can provide a more
1230 // efficient implementation later on.
1231 using EmptyType = std::tuple<>;
1232
1234 targets,
1235 create_request,
1236 // answer_request:
1237 [&process_request](const unsigned int source_rank,
1238 const RequestType &request) -> EmptyType {
1239 process_request(source_rank, request);
1240 // Return something. What it is is arbitrary here, except that
1241 // we want it to be as small an object as possible. Using
1242 // std::tuple<> is interpreted as an empty object that is packed
1243 // down to a zero-length char array.
1244 return {};
1245 },
1246 // process_answer:
1247 [](const unsigned int /*target_rank */,
1248 const EmptyType & /*answer*/) {},
1249 comm);
1250 }
1251
1252#endif
1253
1254
1255 } // namespace ConsensusAlgorithms
1256 } // end of namespace MPI
1257} // end of namespace Utilities
1258
1259
1260
1261#ifndef DOXYGEN
1262
1263// ----------------- Implementation of template functions
1264
1265namespace Utilities
1266{
1267 namespace MPI
1268 {
1269 namespace ConsensusAlgorithms
1270 {
1271 namespace
1272 {
1277 inline bool
1278 has_unique_elements(const std::vector<unsigned int> &targets)
1279 {
1280 std::vector<unsigned int> my_destinations = targets;
1281 std::sort(my_destinations.begin(), my_destinations.end());
1282 return (std::adjacent_find(my_destinations.begin(),
1283 my_destinations.end()) ==
1284 my_destinations.end());
1285 }
1286
1287
1288
1292 inline void
1293 handle_exception(std::exception_ptr &&exception, const MPI_Comm comm)
1294 {
1295# ifdef DEAL_II_WITH_MPI
1296 // an exception within a ConsensusAlgorithm likely causes an
1297 // MPI deadlock. Abort with a reasonable error message instead.
1298 try
1299 {
1300 std::rethrow_exception(exception);
1301 }
1302 catch (ExceptionBase &exc)
1303 {
1304 // report name of the deal.II exception:
1305 std::cerr
1306 << std::endl
1307 << std::endl
1308 << "----------------------------------------------------"
1309 << std::endl;
1310 std::cerr
1311 << "Exception '" << exc.get_exc_name() << "'"
1312 << " on rank " << Utilities::MPI::this_mpi_process(comm)
1313 << " on processing: " << std::endl
1314 << exc.what() << std::endl
1315 << "Aborting!" << std::endl
1316 << "----------------------------------------------------"
1317 << std::endl;
1318
1319 // Then bring down the whole MPI world
1320 MPI_Abort(comm, 255);
1321 }
1322 catch (std::exception &exc)
1323 {
1324 std::cerr
1325 << std::endl
1326 << std::endl
1327 << "----------------------------------------------------"
1328 << std::endl;
1329 std::cerr
1330 << "Exception within ConsensusAlgorithm"
1331 << " on rank " << Utilities::MPI::this_mpi_process(comm)
1332 << " on processing: " << std::endl
1333 << exc.what() << std::endl
1334 << "Aborting!" << std::endl
1335 << "----------------------------------------------------"
1336 << std::endl;
1337
1338 // Then bring down the whole MPI world
1339 MPI_Abort(comm, 255);
1340 }
1341 catch (...)
1342 {
1343 std::cerr
1344 << std::endl
1345 << std::endl
1346 << "----------------------------------------------------"
1347 << std::endl;
1348 std::cerr
1349 << "Unknown exception within ConsensusAlgorithm!" << std::endl
1350 << "Aborting!" << std::endl
1351 << "----------------------------------------------------"
1352 << std::endl;
1353
1354 // Then bring down the whole MPI world
1355 MPI_Abort(comm, 255);
1356 }
1357# else
1358 (void)comm;
1359
1360 // No need to be concerned about deadlocks without MPI.
1361 // Defer to exception handling further up the callstack.
1362 std::rethrow_exception(exception);
1363# endif
1364 }
1365 } // namespace
1366
1367
1368
1369 template <typename RequestType, typename AnswerType>
1370 void
1372 const RequestType &,
1373 AnswerType &)
1374 {
1375 // nothing to do
1376 }
1377
1378
1379
1380 template <typename RequestType, typename AnswerType>
1381 void
1383 RequestType &)
1384 {
1385 // nothing to do
1386 }
1387
1388
1389
1390 template <typename RequestType, typename AnswerType>
1391 void
1393 const AnswerType &)
1394 {
1395 // nothing to do
1396 }
1397
1398
1399
1400 template <typename RequestType, typename AnswerType>
1401 std::vector<unsigned int>
1403 Process<RequestType, AnswerType> &process,
1404 const MPI_Comm comm)
1405 {
1406 // Unpack the 'process' object and call the function that takes
1407 // function objects for all operations.
1408 return run(
1409 process.compute_targets(),
1410 /* create_request: */
1411 [&process](const unsigned int target) {
1412 RequestType request;
1413 process.create_request(target, request);
1414 return request;
1415 },
1416 /* answer_request: */
1417 [&process](const unsigned int source, const RequestType &request) {
1418 AnswerType answer;
1419 process.answer_request(source, request, answer);
1420 return answer;
1421 },
1422 /* process_answer: */
1423 [&process](const unsigned int target, const AnswerType &answer) {
1424 process.read_answer(target, answer);
1425 },
1426 comm);
1427 }
1428
1429
1430
1431 template <typename RequestType, typename AnswerType>
1432 std::vector<unsigned int>
1434 const std::vector<unsigned int> &targets,
1435 const std::function<RequestType(const unsigned int)> &create_request,
1436 const std::function<AnswerType(const unsigned int, const RequestType &)>
1437 &answer_request,
1438 const std::function<void(const unsigned int, const AnswerType &)>
1439 &process_answer,
1440 const MPI_Comm comm)
1441 {
1442 Assert(has_unique_elements(targets),
1443 ExcMessage("The consensus algorithms expect that each process "
1444 "only sends a single message to another process, "
1445 "but the targets provided include duplicates."));
1446
1447 static CollectiveMutex mutex;
1448 CollectiveMutex::ScopedLock lock(mutex, comm);
1449
1450 try
1451 {
1452 // 1) Send data to identified targets and start receiving
1453 // the answers from these very same processes.
1454 start_communication(targets, create_request, comm);
1455
1456 // 2) Until all posted receive operations are known to have
1457 // completed, answer requests and keep checking whether all
1458 // requests of this process have been answered.
1459 //
1460 // The requests that we catch in the answer_requests()
1461 // function originate elsewhere, that is, they are not in
1462 // response to our own messages
1463 //
1464 // Note also that we may not catch all incoming requests in
1465 // the following two lines: our own requests may have been
1466 // satisfied before we've dealt with all incoming requests.
1467 // That's ok: We will get around to dealing with all
1468 // remaining message later. We just want to move on to the
1469 // next step as early as possible.
1470 while (all_locally_originated_receives_are_completed(process_answer,
1471 comm) == false)
1472 maybe_answer_one_request(answer_request, comm);
1473
1474 // 3) Signal to all other processes that all requests of this
1475 // process have been answered
1476 signal_finish(comm);
1477
1478 // 4) Nevertheless, this process has to keep on answering
1479 // (potential) incoming requests until all processes have
1480 // received the answer to all requests
1481 while (all_remotely_originated_receives_are_completed() == false)
1482 maybe_answer_one_request(answer_request, comm);
1483
1484 // 5) process the answer to all requests
1485 clean_up_and_end_communication(comm);
1486 }
1487 catch (...)
1488 {
1489 handle_exception(std::current_exception(), comm);
1490 }
1491
1492 return std::vector<unsigned int>(requesting_processes.begin(),
1493 requesting_processes.end());
1494 }
1495
1496
1497
1498 template <typename RequestType, typename AnswerType>
1499 void
1501 const std::vector<unsigned int> &targets,
1502 const std::function<RequestType(const unsigned int)> &create_request,
1503 const MPI_Comm comm)
1504 {
1505# ifdef DEAL_II_WITH_MPI
1506 // 1)
1507 const auto n_targets = targets.size();
1508
1509 const int tag_request = Utilities::MPI::internal::Tags::
1511
1512 // 2) allocate memory
1513 send_requests.resize(n_targets);
1514 send_buffers.resize(n_targets);
1515
1516 {
1517 // 4) send and receive
1518 for (unsigned int index = 0; index < n_targets; ++index)
1519 {
1520 const unsigned int rank = targets[index];
1522
1523 auto &send_buffer = send_buffers[index];
1524 send_buffer =
1525 (create_request ? Utilities::pack(create_request(rank), false) :
1526 std::vector<char>());
1527
1528 // Post a request to send data
1529 auto ierr = MPI_Isend(send_buffer.data(),
1530 send_buffer.size(),
1531 MPI_CHAR,
1532 rank,
1533 tag_request,
1534 comm,
1535 &send_requests[index]);
1536 AssertThrowMPI(ierr);
1537 }
1538
1539 // Also record that we expect an answer from each target we sent
1540 // a request to:
1541 n_outstanding_answers = n_targets;
1542 }
1543# else
1544 (void)targets;
1545 (void)create_request;
1546 (void)comm;
1547# endif
1548 }
1549
1550
1551
1552 template <typename RequestType, typename AnswerType>
1553 bool
1556 const std::function<void(const unsigned int, const AnswerType &)>
1557 &process_answer,
1558 const MPI_Comm comm)
1559 {
1560# ifdef DEAL_II_WITH_MPI
1561 // We know that all requests have come in when we have pending
1562 // messages from all targets with the right tag (some of which we may
1563 // have already taken care of below, after discovering their existence).
1564 // We can check for pending messages with MPI_IProbe, which returns
1565 // immediately with a return code that indicates whether
1566 // it has found a message from any process with a given
1567 // tag.
1568 if (n_outstanding_answers == 0)
1569 return true;
1570 else
1571 {
1572 const int tag_deliver = Utilities::MPI::internal::Tags::
1574
1575 int request_is_pending;
1576 MPI_Status status;
1577 const auto ierr = MPI_Iprobe(
1578 MPI_ANY_SOURCE, tag_deliver, comm, &request_is_pending, &status);
1579 AssertThrowMPI(ierr);
1580
1581 // If there is no pending message with this tag,
1582 // then we are clearly not done receiving everything
1583 // yet -- so return false.
1584 if (request_is_pending == 0)
1585 return false;
1586 else
1587 {
1588 // OK, so we have gotten a reply to our request from
1589 // one rank. Let us process it.
1590 const auto target = status.MPI_SOURCE;
1591
1592 // Then query the size of the message, allocate enough memory,
1593 // receive the data, and process it.
1594 int message_size;
1595 {
1596 const int ierr =
1597 MPI_Get_count(&status, MPI_CHAR, &message_size);
1598 AssertThrowMPI(ierr);
1599 }
1600 std::vector<char> recv_buffer(message_size);
1601
1602 {
1603 const int tag_deliver = Utilities::MPI::internal::Tags::
1605
1606 const int ierr = MPI_Recv(recv_buffer.data(),
1607 recv_buffer.size(),
1608 MPI_CHAR,
1609 target,
1610 tag_deliver,
1611 comm,
1612 MPI_STATUS_IGNORE);
1613 AssertThrowMPI(ierr);
1614 }
1615
1616 if (process_answer)
1617 process_answer(target,
1619 false));
1620
1621 // Finally, remove this rank from the list of outstanding
1622 // targets:
1623 --n_outstanding_answers;
1624
1625 // We could do another go-around from the top of this
1626 // else-branch to see whether there are actually other messages
1627 // that are currently pending. But that would mean spending
1628 // substantial time in receiving answers while we should also be
1629 // sending answers to requests we have received from other
1630 // places. So let it be enough for now. If there are outstanding
1631 // answers, we will get back to this function before long and
1632 // can take care of them then.
1633 return (n_outstanding_answers == 0);
1634 }
1635 }
1636
1637# else
1638 (void)process_answer;
1639 (void)comm;
1640
1641 return true;
1642# endif
1643 }
1644
1645
1646
1647 template <typename RequestType, typename AnswerType>
1648 void
1650 const std::function<AnswerType(const unsigned int, const RequestType &)>
1651 &answer_request,
1652 const MPI_Comm comm)
1653 {
1654# ifdef DEAL_II_WITH_MPI
1655
1656 const int tag_request = Utilities::MPI::internal::Tags::
1658 const int tag_deliver = Utilities::MPI::internal::Tags::
1660
1661 // Check if there is a request pending. By selecting the
1662 // tag_request tag, these are other processes asking for
1663 // our own replies, not these other processes' replies
1664 // to our own requests.
1665 //
1666 // There may be multiple such pending messages. We
1667 // only answer one.
1668 MPI_Status status;
1669 int request_is_pending;
1670 const auto ierr = MPI_Iprobe(
1671 MPI_ANY_SOURCE, tag_request, comm, &request_is_pending, &status);
1672 AssertThrowMPI(ierr);
1673
1674 if (request_is_pending != 0)
1675 {
1676 // Get the rank of the requesting process and add it to the
1677 // list of requesting processes (which may contain duplicates).
1678 const auto other_rank = status.MPI_SOURCE;
1679
1680 Assert(requesting_processes.find(other_rank) ==
1681 requesting_processes.end(),
1682 ExcMessage("Process is requesting a second time!"));
1683 requesting_processes.insert(other_rank);
1684
1685 // get size of incoming message
1686 int number_amount;
1687 auto ierr = MPI_Get_count(&status, MPI_CHAR, &number_amount);
1688 AssertThrowMPI(ierr);
1689
1690 // allocate memory for incoming message
1691 std::vector<char> buffer_recv(number_amount);
1692 ierr = MPI_Recv(buffer_recv.data(),
1693 number_amount,
1694 MPI_CHAR,
1695 other_rank,
1696 tag_request,
1697 comm,
1698 MPI_STATUS_IGNORE);
1699 AssertThrowMPI(ierr);
1700
1701 // Allocate memory for an answer message to the current request,
1702 // and ask the 'process' object to produce an answer:
1703 request_buffers.emplace_back(std::make_unique<std::vector<char>>());
1704 auto &request_buffer = *request_buffers.back();
1705 if (answer_request)
1706 request_buffer =
1707 Utilities::pack(answer_request(other_rank,
1709 buffer_recv, false)),
1710 false);
1711
1712 // Then initiate sending the answer back to the requester.
1713 request_requests.emplace_back(std::make_unique<MPI_Request>());
1714 ierr = MPI_Isend(request_buffer.data(),
1715 request_buffer.size(),
1716 MPI_CHAR,
1717 other_rank,
1718 tag_deliver,
1719 comm,
1720 request_requests.back().get());
1721 AssertThrowMPI(ierr);
1722 }
1723# else
1724 (void)answer_request;
1725 (void)comm;
1726# endif
1727 }
1728
1729
1730
1731 template <typename RequestType, typename AnswerType>
1732 void
1734 {
1735# ifdef DEAL_II_WITH_MPI
1736 const auto ierr = MPI_Ibarrier(comm, &barrier_request);
1737 AssertThrowMPI(ierr);
1738# else
1739 (void)comm;
1740# endif
1741 }
1742
1743
1744
1745 template <typename RequestType, typename AnswerType>
1746 bool
1747 NBX<RequestType,
1748 AnswerType>::all_remotely_originated_receives_are_completed()
1749 {
1750# ifdef DEAL_II_WITH_MPI
1751 int all_ranks_reached_barrier;
1752 const auto ierr = MPI_Test(&barrier_request,
1753 &all_ranks_reached_barrier,
1754 MPI_STATUS_IGNORE);
1755 AssertThrowMPI(ierr);
1756 return all_ranks_reached_barrier != 0;
1757# else
1758 return true;
1759# endif
1760 }
1761
1762
1763
1764 template <typename RequestType, typename AnswerType>
1765 void
1767 const MPI_Comm comm)
1768 {
1769 (void)comm;
1770# ifdef DEAL_II_WITH_MPI
1771 // clean up
1772 {
1773 if (send_requests.size() > 0)
1774 {
1775 const int ierr = MPI_Waitall(send_requests.size(),
1776 send_requests.data(),
1777 MPI_STATUSES_IGNORE);
1778 AssertThrowMPI(ierr);
1779 }
1780
1781 int ierr = MPI_Wait(&barrier_request, MPI_STATUS_IGNORE);
1782 AssertThrowMPI(ierr);
1783
1784 for (auto &i : request_requests)
1785 {
1786 ierr = MPI_Wait(i.get(), MPI_STATUS_IGNORE);
1787 AssertThrowMPI(ierr);
1788 }
1789
1790# ifdef DEBUG
1791 // note: IBarrier seems to make problem during testing, this
1792 // additional Barrier seems to help
1793 ierr = MPI_Barrier(comm);
1794 AssertThrowMPI(ierr);
1795# endif
1796 }
1797# endif
1798 }
1799
1800
1801
1802 template <typename RequestType, typename AnswerType>
1803 std::vector<unsigned int>
1805 const std::vector<unsigned int> &targets,
1806 const std::function<RequestType(const unsigned int)> &create_request,
1807 const std::function<AnswerType(const unsigned int, const RequestType &)>
1808 &answer_request,
1809 const std::function<void(const unsigned int, const AnswerType &)>
1810 &process_answer,
1811 const MPI_Comm comm)
1812 {
1813 Assert(has_unique_elements(targets),
1814 ExcMessage("The consensus algorithms expect that each process "
1815 "only sends a single message to another process, "
1816 "but the targets provided include duplicates."));
1817
1818 static CollectiveMutex mutex;
1819 CollectiveMutex::ScopedLock lock(mutex, comm);
1820
1821 try
1822 {
1823 // 1) Send requests and start receiving the answers.
1824 // In particular, determine how many requests we should expect
1825 // on the current process.
1826 const unsigned int n_requests =
1827 start_communication(targets, create_request, comm);
1828
1829 // 2) Answer requests:
1830 for (unsigned int request = 0; request < n_requests; ++request)
1831 answer_one_request(request, answer_request, comm);
1832
1833 // 3) Process answers:
1834 process_incoming_answers(targets.size(), process_answer, comm);
1835
1836 // 4) Make sure all sends have successfully terminated:
1837 clean_up_and_end_communication();
1838 }
1839 catch (...)
1840 {
1841 handle_exception(std::current_exception(), comm);
1842 }
1843
1844 return std::vector<unsigned int>(requesting_processes.begin(),
1845 requesting_processes.end());
1846 }
1847
1848
1849
1850 template <typename RequestType, typename AnswerType>
1851 unsigned int
1853 const std::vector<unsigned int> &targets,
1854 const std::function<RequestType(const unsigned int)> &create_request,
1855 const MPI_Comm comm)
1856 {
1857# ifdef DEAL_II_WITH_MPI
1858 const int tag_request = Utilities::MPI::internal::Tags::
1860
1861 // 1) determine with which processes this process wants to communicate
1862 // with
1863 const unsigned int n_targets = targets.size();
1864
1865 // 2) determine who wants to communicate with this process
1866 const unsigned int n_sources =
1868
1869 // 2) allocate memory
1870 recv_buffers.resize(n_targets);
1871 send_buffers.resize(n_targets);
1872 send_request_requests.resize(n_targets);
1873
1874 send_answer_requests.resize(n_sources);
1875 requests_buffers.resize(n_sources);
1876
1877 // 4) send and receive
1878 for (unsigned int i = 0; i < n_targets; ++i)
1879 {
1880 const unsigned int rank = targets[i];
1882
1883 // pack data which should be sent
1884 auto &send_buffer = send_buffers[i];
1885 if (create_request)
1886 send_buffer = Utilities::pack(create_request(rank), false);
1887
1888 // start to send data
1889 auto ierr = MPI_Isend(send_buffer.data(),
1890 send_buffer.size(),
1891 MPI_CHAR,
1892 rank,
1893 tag_request,
1894 comm,
1895 &send_request_requests[i]);
1896 AssertThrowMPI(ierr);
1897 }
1898
1899 return n_sources;
1900# else
1901 (void)targets;
1902 (void)create_request;
1903 (void)comm;
1904 return 0;
1905# endif
1906 }
1907
1908
1909
1910 template <typename RequestType, typename AnswerType>
1911 void
1913 const unsigned int index,
1914 const std::function<AnswerType(const unsigned int, const RequestType &)>
1915 &answer_request,
1916 const MPI_Comm comm)
1917 {
1918# ifdef DEAL_II_WITH_MPI
1919 const int tag_request = Utilities::MPI::internal::Tags::
1921 const int tag_deliver = Utilities::MPI::internal::Tags::
1923
1924 // Wait until we have a message ready for retrieval, though we don't
1925 // care which process it is from.
1926 MPI_Status status;
1927 int ierr = MPI_Probe(MPI_ANY_SOURCE, tag_request, comm, &status);
1928 AssertThrowMPI(ierr);
1929
1930 // Get rank of incoming message and verify that it makes sense
1931 const unsigned int other_rank = status.MPI_SOURCE;
1932
1933 Assert(requesting_processes.find(other_rank) ==
1934 requesting_processes.end(),
1935 ExcMessage(
1936 "A process is sending a request after a request from "
1937 "the same process has previously already been "
1938 "received. This algorithm does not expect this to happen."));
1939 requesting_processes.insert(other_rank);
1940
1941 // Actually get the incoming message:
1942 int number_amount;
1943 ierr = MPI_Get_count(&status, MPI_CHAR, &number_amount);
1944 AssertThrowMPI(ierr);
1945
1946 std::vector<char> buffer_recv(number_amount);
1947 ierr = MPI_Recv(buffer_recv.data(),
1948 number_amount,
1949 MPI_CHAR,
1950 other_rank,
1951 tag_request,
1952 comm,
1953 &status);
1954 AssertThrowMPI(ierr);
1955
1956 // Process request by asking the user-provided function for
1957 // the answer and post a send for it.
1958 auto &request_buffer = requests_buffers[index];
1959 request_buffer =
1960 (answer_request ?
1961 Utilities::pack(answer_request(other_rank,
1963 buffer_recv, false)),
1964 false) :
1965 std::vector<char>());
1966
1967 ierr = MPI_Isend(request_buffer.data(),
1968 request_buffer.size(),
1969 MPI_CHAR,
1970 other_rank,
1971 tag_deliver,
1972 comm,
1973 &send_answer_requests[index]);
1974 AssertThrowMPI(ierr);
1975# else
1976 (void)answer_request;
1977 (void)comm;
1978 (void)index;
1979# endif
1980 }
1981
1982
1983
1984 template <typename RequestType, typename AnswerType>
1985 void
1987 const unsigned int n_targets,
1988 const std::function<void(const unsigned int, const AnswerType &)>
1989 &process_answer,
1990 const MPI_Comm comm)
1991 {
1992# ifdef DEAL_II_WITH_MPI
1993 const int tag_deliver = Utilities::MPI::internal::Tags::
1995
1996 // We know how many targets we have sent requests to. These
1997 // targets will all eventually send us their responses, but
1998 // we need not process them in order -- rather, just see what
1999 // comes in and then look at message originators' ranks and
2000 // message sizes
2001 for (unsigned int i = 0; i < n_targets; ++i)
2002 {
2003 MPI_Status status;
2004 {
2005 const int ierr =
2006 MPI_Probe(MPI_ANY_SOURCE, tag_deliver, comm, &status);
2007 AssertThrowMPI(ierr);
2008 }
2009
2010 const auto other_rank = status.MPI_SOURCE;
2011 int message_size;
2012 {
2013 const int ierr = MPI_Get_count(&status, MPI_CHAR, &message_size);
2014 AssertThrowMPI(ierr);
2015 }
2016 std::vector<char> recv_buffer(message_size);
2017
2018 // Now actually receive the answer. Because the MPI_Probe
2019 // above blocks until we have a message, we know that the
2020 // following MPI_Recv call will immediately succeed.
2021 {
2022 const int ierr = MPI_Recv(recv_buffer.data(),
2023 recv_buffer.size(),
2024 MPI_CHAR,
2025 other_rank,
2026 tag_deliver,
2027 comm,
2028 MPI_STATUS_IGNORE);
2029 AssertThrowMPI(ierr);
2030 }
2031
2032 if (process_answer)
2033 process_answer(other_rank,
2034 Utilities::unpack<AnswerType>(recv_buffer, false));
2035 }
2036# else
2037 (void)n_targets;
2038 (void)process_answer;
2039 (void)comm;
2040# endif
2041 }
2042
2043
2044
2045 template <typename RequestType, typename AnswerType>
2046 void
2048 {
2049# ifdef DEAL_II_WITH_MPI
2050 // Finalize all MPI_Request objects for both the
2051 // send-request and receive-answer operations.
2052 if (send_request_requests.size() > 0)
2053 {
2054 const int ierr = MPI_Waitall(send_request_requests.size(),
2055 send_request_requests.data(),
2056 MPI_STATUSES_IGNORE);
2057 AssertThrowMPI(ierr);
2058 }
2059
2060 // Then also check the send-answer requests.
2061 if (send_answer_requests.size() > 0)
2062 {
2063 const int ierr = MPI_Waitall(send_answer_requests.size(),
2064 send_answer_requests.data(),
2065 MPI_STATUSES_IGNORE);
2066 AssertThrowMPI(ierr);
2067 }
2068# endif
2069 }
2070
2071
2072
2073 template <typename RequestType, typename AnswerType>
2074 std::vector<unsigned int>
2076 const std::vector<unsigned int> &targets,
2077 const std::function<RequestType(const unsigned int)> &create_request,
2078 const std::function<AnswerType(const unsigned int, const RequestType &)>
2079 &answer_request,
2080 const std::function<void(const unsigned int, const AnswerType &)>
2081 &process_answer,
2082 const MPI_Comm comm)
2083 {
2084 (void)comm;
2086 ExcMessage("You shouldn't use the 'Serial' class on "
2087 "communicators that have more than one process "
2088 "associated with it."));
2089
2090 // The only valid target for a serial program is itself.
2091 if (targets.size() != 0)
2092 {
2093 Assert(targets.size() == 1,
2094 ExcMessage(
2095 "On a single process, the only valid target "
2096 "is process zero (the process itself), which can only be "
2097 "listed once."));
2098 AssertDimension(targets[0], 0);
2099
2100 // Since the caller indicates that there is a target, and since we
2101 // know that it is the current process, let the process send
2102 // something to itself.
2103 const RequestType request =
2104 (create_request ? create_request(0) : RequestType());
2105 const AnswerType answer =
2106 (answer_request ? answer_request(0, request) : AnswerType());
2107
2108 if (process_answer)
2109 process_answer(0, answer);
2110 }
2111
2112 return targets; // nothing to do
2113 }
2114
2115
2116
2117 template <typename RequestType, typename AnswerType>
2118 std::vector<unsigned int>
2120 const std::vector<unsigned int> &targets,
2121 const std::function<RequestType(const unsigned int)> &create_request,
2122 const std::function<AnswerType(const unsigned int, const RequestType &)>
2123 &answer_request,
2124 const std::function<void(const unsigned int, const AnswerType &)>
2125 &process_answer,
2126 const MPI_Comm comm)
2127 {
2128 // Depending on the number of processes we switch between
2129 // implementations. We reduce the threshold for debug mode to be
2130 // able to test also the non-blocking implementation. This feature
2131 // is tested by:
2132 // tests/multigrid/transfer_matrix_free_06.with_mpi=true.with_p4est=true.with_trilinos=true.mpirun=10.output
2133
2134 const unsigned int n_procs = (Utilities::MPI::job_supports_mpi() ?
2136 1);
2137# ifdef DEAL_II_WITH_MPI
2138# ifdef DEBUG
2139 if (n_procs > 10)
2140# else
2141 if (n_procs > 99)
2142# endif
2143 consensus_algo.reset(new NBX<RequestType, AnswerType>());
2144 else
2145# endif
2146 if (n_procs > 1)
2147 consensus_algo.reset(new PEX<RequestType, AnswerType>());
2148 else
2149 consensus_algo.reset(new Serial<RequestType, AnswerType>());
2150
2151 return consensus_algo->run(
2152 targets, create_request, answer_request, process_answer, comm);
2153 }
2154
2155
2156 } // namespace ConsensusAlgorithms
2157 } // end of namespace MPI
2158} // end of namespace Utilities
2159
2160#endif // DOXYGEN
2161
2162
2164
2165#endif
const char * get_exc_name() const
virtual const char * what() const noexcept override
std::vector< unsigned int > run(Process< RequestType, AnswerType > &process, const MPI_Comm comm)
virtual std::vector< unsigned int > run(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm)=0
void start_communication(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const MPI_Comm comm)
void maybe_answer_one_request(const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const MPI_Comm comm)
std::vector< std::unique_ptr< std::vector< char > > > request_buffers
virtual std::vector< unsigned int > run(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm) override
void clean_up_and_end_communication(const MPI_Comm comm)
bool all_locally_originated_receives_are_completed(const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm)
std::vector< std::unique_ptr< MPI_Request > > request_requests
std::vector< std::vector< char > > send_buffers
void signal_finish(const MPI_Comm comm)
unsigned int start_communication(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const MPI_Comm comm)
std::vector< std::vector< char > > requests_buffers
virtual std::vector< unsigned int > run(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm) override
std::vector< std::vector< char > > send_buffers
void answer_one_request(const unsigned int index, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const MPI_Comm comm)
std::vector< std::vector< char > > recv_buffers
void process_incoming_answers(const unsigned int n_targets, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm)
virtual std::vector< unsigned int > compute_targets()=0
virtual void answer_request(const unsigned int other_rank, const RequestType &buffer_recv, AnswerType &request_buffer)
virtual void read_answer(const unsigned int other_rank, const AnswerType &recv_buffer)
virtual void create_request(const unsigned int other_rank, RequestType &send_buffer)
virtual std::vector< unsigned int > run(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm) override
std::shared_ptr< Interface< RequestType, AnswerType > > consensus_algo
virtual std::vector< unsigned int > run(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm) override
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:503
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:504
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertThrowMPI(error_code)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcMessage(std::string arg1)
std::vector< unsigned int > selector(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm)
std::vector< unsigned int > serial(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm)
std::vector< unsigned int > nbx(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm)
std::vector< unsigned int > pex(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm)
@ consensus_algorithm_nbx_process_deliver
ConsensusAlgorithms::NBX::process.
Definition mpi_tags.h:90
@ consensus_algorithm_pex_process_deliver
ConsensusAlgorithms::PEX::process.
Definition mpi_tags.h:95
@ consensus_algorithm_nbx_answer_request
ConsensusAlgorithms::NBX::process.
Definition mpi_tags.h:88
@ consensus_algorithm_pex_answer_request
ConsensusAlgorithms::PEX::process.
Definition mpi_tags.h:93
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition mpi.cc:92
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:107
bool job_supports_mpi()
Definition mpi.cc:697
unsigned int compute_n_point_to_point_communications(const MPI_Comm mpi_comm, const std::vector< unsigned int > &destinations)
Definition mpi.cc:379
size_t pack(const T &object, std::vector< char > &dest_buffer, const bool allow_compression=true)
Definition utilities.h:1381
T unpack(const std::vector< char > &buffer, const bool allow_compression=true)
Definition utilities.h:1538
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
STL namespace.
*braid_SplitCommworld & comm