Reference documentation for deal.II version GIT relicensing-437-g81ec864850 2024-04-19 07:30:02+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
mpi_consensus_algorithms.h
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2020 - 2023 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
15#ifndef dealii_mpi_consensus_algorithm_h
16#define dealii_mpi_consensus_algorithm_h
17
18#include <deal.II/base/config.h>
19
20#include <deal.II/base/mpi.h>
21#include <deal.II/base/mpi.templates.h>
23
25
26
27namespace Utilities
28{
29 namespace MPI
30 {
131 namespace ConsensusAlgorithms
132 {
157 template <typename RequestType, typename AnswerType>
159 {
160 public:
165 virtual ~Process() = default;
166
173 virtual std::vector<unsigned int>
175
185 virtual void
186 create_request(const unsigned int other_rank, RequestType &send_buffer);
187
200 virtual void
201 answer_request(const unsigned int other_rank,
202 const RequestType &buffer_recv,
203 AnswerType &request_buffer);
204
212 virtual void
213 read_answer(const unsigned int other_rank,
214 const AnswerType &recv_buffer);
215 };
216
217
218
232 template <typename RequestType, typename AnswerType>
234 {
235 public:
239 Interface() = default;
240
245 virtual ~Interface() = default;
246
255 std::vector<unsigned int>
257
281 virtual std::vector<unsigned int>
283 const std::vector<unsigned int> &targets,
284 const std::function<RequestType(const unsigned int)> &create_request,
285 const std::function<AnswerType(const unsigned int,
286 const RequestType &)> &answer_request,
287 const std::function<void(const unsigned int, const AnswerType &)>
288 &process_answer,
289 const MPI_Comm comm) = 0;
290 };
291
292
306 template <typename RequestType, typename AnswerType>
307 class NBX : public Interface<RequestType, AnswerType>
308 {
309 public:
313 NBX() = default;
314
318 virtual ~NBX() = default;
319
320 // Import the declarations from the base class.
321 using Interface<RequestType, AnswerType>::run;
322
326 virtual std::vector<unsigned int>
328 const std::vector<unsigned int> &targets,
329 const std::function<RequestType(const unsigned int)> &create_request,
330 const std::function<AnswerType(const unsigned int,
331 const RequestType &)> &answer_request,
332 const std::function<void(const unsigned int, const AnswerType &)>
333 &process_answer,
334 const MPI_Comm comm) override;
335
336 private:
337#ifdef DEAL_II_WITH_MPI
341 std::vector<std::vector<char>> send_buffers;
342
346 std::vector<MPI_Request> send_requests;
347
355 std::vector<std::unique_ptr<std::vector<char>>> request_buffers;
356
360 std::vector<std::unique_ptr<MPI_Request>> request_requests;
361
366
367 // request for barrier
368 MPI_Request barrier_request;
369#endif
370
374 std::set<unsigned int> requesting_processes;
375
381 bool
383 const std::function<void(const unsigned int, const AnswerType &)>
384 &process_answer,
385 const MPI_Comm comm);
386
391 void
393
399 bool
401
407 void
409 const std::function<AnswerType(const unsigned int,
410 const RequestType &)> &answer_request,
411 const MPI_Comm comm);
412
417 void
419 const std::vector<unsigned int> &targets,
420 const std::function<RequestType(const unsigned int)> &create_request,
421 const MPI_Comm comm);
422
427 void
429 };
430
431
476 template <typename RequestType, typename AnswerType>
477 std::vector<unsigned int>
478 nbx(const std::vector<unsigned int> &targets,
479 const std::function<RequestType(const unsigned int)> &create_request,
480 const std::function<AnswerType(const unsigned int,
481 const RequestType &)> &answer_request,
482 const std::function<void(const unsigned int, const AnswerType &)>
483 &process_answer,
484 const MPI_Comm comm);
485
523 template <typename RequestType>
524 std::vector<unsigned int>
525 nbx(const std::vector<unsigned int> &targets,
526 const std::function<RequestType(const unsigned int)> &create_request,
527 const std::function<void(const unsigned int, const RequestType &)>
528 &process_request,
529 const MPI_Comm comm);
530
556 template <typename RequestType, typename AnswerType>
557 class PEX : public Interface<RequestType, AnswerType>
558 {
559 public:
563 PEX() = default;
564
568 virtual ~PEX() = default;
569
570 // Import the declarations from the base class.
571 using Interface<RequestType, AnswerType>::run;
572
576 virtual std::vector<unsigned int>
578 const std::vector<unsigned int> &targets,
579 const std::function<RequestType(const unsigned int)> &create_request,
580 const std::function<AnswerType(const unsigned int,
581 const RequestType &)> &answer_request,
582 const std::function<void(const unsigned int, const AnswerType &)>
583 &process_answer,
584 const MPI_Comm comm) override;
585
586 private:
587#ifdef DEAL_II_WITH_MPI
591 std::vector<std::vector<char>> send_buffers;
592
596 std::vector<std::vector<char>> recv_buffers;
597
601 std::vector<MPI_Request> send_request_requests;
602
606 std::vector<std::vector<char>> requests_buffers;
607
611 std::vector<MPI_Request> send_answer_requests;
612#endif
616 std::set<unsigned int> requesting_processes;
617
622 unsigned int
624 const std::vector<unsigned int> &targets,
625 const std::function<RequestType(const unsigned int)> &create_request,
626 const MPI_Comm comm);
627
632 void
634 const unsigned int index,
635 const std::function<AnswerType(const unsigned int,
636 const RequestType &)> &answer_request,
637 const MPI_Comm comm);
638
643 void
645 const unsigned int n_targets,
646 const std::function<void(const unsigned int, const AnswerType &)>
647 &process_answer,
648 const MPI_Comm comm);
649
654 void
656 };
657
658
659
716 template <typename RequestType, typename AnswerType>
717 std::vector<unsigned int>
718 pex(const std::vector<unsigned int> &targets,
719 const std::function<RequestType(const unsigned int)> &create_request,
720 const std::function<AnswerType(const unsigned int,
721 const RequestType &)> &answer_request,
722 const std::function<void(const unsigned int, const AnswerType &)>
723 &process_answer,
724 const MPI_Comm comm);
725
763 template <typename RequestType>
764 std::vector<unsigned int>
765 pex(const std::vector<unsigned int> &targets,
766 const std::function<RequestType(const unsigned int)> &create_request,
767 const std::function<void(const unsigned int, const RequestType &)>
768 &process_request,
769 const MPI_Comm comm);
770
771
776 template <typename RequestType, typename AnswerType>
777 class Serial : public Interface<RequestType, AnswerType>
778 {
779 public:
783 Serial() = default;
784
785 // Import the declarations from the base class.
786 using Interface<RequestType, AnswerType>::run;
787
791 virtual std::vector<unsigned int>
793 const std::vector<unsigned int> &targets,
794 const std::function<RequestType(const unsigned int)> &create_request,
795 const std::function<AnswerType(const unsigned int,
796 const RequestType &)> &answer_request,
797 const std::function<void(const unsigned int, const AnswerType &)>
798 &process_answer,
799 const MPI_Comm comm) override;
800 };
801
802
803
836 template <typename RequestType, typename AnswerType>
837 std::vector<unsigned int>
839 const std::vector<unsigned int> &targets,
840 const std::function<RequestType(const unsigned int)> &create_request,
841 const std::function<AnswerType(const unsigned int, const RequestType &)>
842 &answer_request,
843 const std::function<void(const unsigned int, const AnswerType &)>
844 &process_answer,
845 const MPI_Comm comm);
846
876 template <typename RequestType>
877 std::vector<unsigned int>
879 const std::vector<unsigned int> &targets,
880 const std::function<RequestType(const unsigned int)> &create_request,
881 const std::function<void(const unsigned int, const RequestType &)>
882 &process_request,
883 const MPI_Comm comm);
884
885
886
899 template <typename RequestType, typename AnswerType>
900 class Selector : public Interface<RequestType, AnswerType>
901 {
902 public:
906 Selector() = default;
907
911 virtual ~Selector() = default;
912
913 // Import the declarations from the base class.
914 using Interface<RequestType, AnswerType>::run;
915
921 virtual std::vector<unsigned int>
923 const std::vector<unsigned int> &targets,
924 const std::function<RequestType(const unsigned int)> &create_request,
925 const std::function<AnswerType(const unsigned int,
926 const RequestType &)> &answer_request,
927 const std::function<void(const unsigned int, const AnswerType &)>
928 &process_answer,
929 const MPI_Comm comm) override;
930
931 private:
932 // Pointer to the actual ConsensusAlgorithms::Interface implementation.
933 std::shared_ptr<Interface<RequestType, AnswerType>> consensus_algo;
934 };
935
936
937
982 template <typename RequestType, typename AnswerType>
983 std::vector<unsigned int>
985 const std::vector<unsigned int> &targets,
986 const std::function<RequestType(const unsigned int)> &create_request,
987 const std::function<AnswerType(const unsigned int, const RequestType &)>
988 &answer_request,
989 const std::function<void(const unsigned int, const AnswerType &)>
990 &process_answer,
991 const MPI_Comm comm);
992
1030 template <typename RequestType>
1031 std::vector<unsigned int>
1033 const std::vector<unsigned int> &targets,
1034 const std::function<RequestType(const unsigned int)> &create_request,
1035 const std::function<void(const unsigned int, const RequestType &)>
1036 &process_request,
1037 const MPI_Comm comm);
1038
1039
1040
1041#ifndef DOXYGEN
1042 // Implementation of the functions in this namespace.
1043
1044 template <typename RequestType, typename AnswerType>
1045 std::vector<unsigned int>
1046 nbx(const std::vector<unsigned int> &targets,
1047 const std::function<RequestType(const unsigned int)> &create_request,
1048 const std::function<AnswerType(const unsigned int,
1049 const RequestType &)> &answer_request,
1050 const std::function<void(const unsigned int, const AnswerType &)>
1051 &process_answer,
1052 const MPI_Comm comm)
1053 {
1055 targets, create_request, answer_request, process_answer, comm);
1056 }
1057
1058
1059
1060 template <typename RequestType>
1061 std::vector<unsigned int>
1062 nbx(const std::vector<unsigned int> &targets,
1063 const std::function<RequestType(const unsigned int)> &create_request,
1064 const std::function<void(const unsigned int, const RequestType &)>
1065 &process_request,
1066 const MPI_Comm comm)
1067 {
1068 // TODO: For the moment, simply implement this special case by
1069 // forwarding to the other function with rewritten function
1070 // objects and using an empty type as answer type. This way,
1071 // we have the interface in place and can provide a more
1072 // efficient implementation later on.
1073 using EmptyType = std::tuple<>;
1074
1075 return nbx<RequestType, EmptyType>(
1076 targets,
1077 create_request,
1078 // answer_request:
1079 [&process_request](const unsigned int source_rank,
1080 const RequestType &request) -> EmptyType {
1081 process_request(source_rank, request);
1082 // Return something. What it is is arbitrary here, except that
1083 // we want it to be as small an object as possible. Using
1084 // std::tuple<> is interpreted as an empty object that is packed
1085 // down to a zero-length char array.
1086 return {};
1087 },
1088 // process_answer:
1089 [](const unsigned int /*target_rank */,
1090 const EmptyType & /*answer*/) {},
1091 comm);
1092 }
1093
1094
1095
1096 template <typename RequestType, typename AnswerType>
1097 std::vector<unsigned int>
1098 pex(const std::vector<unsigned int> &targets,
1099 const std::function<RequestType(const unsigned int)> &create_request,
1100 const std::function<AnswerType(const unsigned int,
1101 const RequestType &)> &answer_request,
1102 const std::function<void(const unsigned int, const AnswerType &)>
1103 &process_answer,
1104 const MPI_Comm comm)
1105 {
1106 return PEX<RequestType, AnswerType>().run(
1107 targets, create_request, answer_request, process_answer, comm);
1108 }
1109
1110
1111
1112 template <typename RequestType>
1113 std::vector<unsigned int>
1114 pex(const std::vector<unsigned int> &targets,
1115 const std::function<RequestType(const unsigned int)> &create_request,
1116 const std::function<void(const unsigned int, const RequestType &)>
1117 &process_request,
1118 const MPI_Comm comm)
1119 {
1120 // TODO: For the moment, simply implement this special case by
1121 // forwarding to the other function with rewritten function
1122 // objects and using an empty type as answer type. This way,
1123 // we have the interface in place and can provide a more
1124 // efficient implementation later on.
1125 using EmptyType = std::tuple<>;
1126
1127 return pex<RequestType, EmptyType>(
1128 targets,
1129 create_request,
1130 // answer_request:
1131 [&process_request](const unsigned int source_rank,
1132 const RequestType &request) -> EmptyType {
1133 process_request(source_rank, request);
1134 // Return something. What it is is arbitrary here, except that
1135 // we want it to be as small an object as possible. Using
1136 // std::tuple<> is interpreted as an empty object that is packed
1137 // down to a zero-length char array.
1138 return {};
1139 },
1140 // process_answer:
1141 [](const unsigned int /*target_rank */,
1142 const EmptyType & /*answer*/) {},
1143 comm);
1144 }
1145
1146
1147
1148 template <typename RequestType, typename AnswerType>
1149 std::vector<unsigned int>
1150 serial(
1151 const std::vector<unsigned int> &targets,
1152 const std::function<RequestType(const unsigned int)> &create_request,
1153 const std::function<AnswerType(const unsigned int, const RequestType &)>
1154 &answer_request,
1155 const std::function<void(const unsigned int, const AnswerType &)>
1156 &process_answer,
1157 const MPI_Comm comm)
1158 {
1159 return Serial<RequestType, AnswerType>().run(
1160 targets, create_request, answer_request, process_answer, comm);
1161 }
1162
1163
1164
1165 template <typename RequestType>
1166 std::vector<unsigned int>
1167 serial(
1168 const std::vector<unsigned int> &targets,
1169 const std::function<RequestType(const unsigned int)> &create_request,
1170 const std::function<void(const unsigned int, const RequestType &)>
1171 &process_request,
1172 const MPI_Comm comm)
1173 {
1174 // TODO: For the moment, simply implement this special case by
1175 // forwarding to the other function with rewritten function
1176 // objects and using an empty type as answer type. This way,
1177 // we have the interface in place and can provide a more
1178 // efficient implementation later on.
1179 using EmptyType = std::tuple<>;
1180
1181 return serial<RequestType, EmptyType>(
1182 targets,
1183 create_request,
1184 // answer_request:
1185 [&process_request](const unsigned int source_rank,
1186 const RequestType &request) -> EmptyType {
1187 process_request(source_rank, request);
1188 // Return something. What it is is arbitrary here, except that
1189 // we want it to be as small an object as possible. Using
1190 // std::tuple<> is interpreted as an empty object that is packed
1191 // down to a zero-length char array.
1192 return {};
1193 },
1194 // process_answer:
1195 [](const unsigned int /*target_rank */,
1196 const EmptyType & /*answer*/) {},
1197 comm);
1198 }
1199
1200
1201
1202 template <typename RequestType, typename AnswerType>
1203 std::vector<unsigned int>
1204 selector(
1205 const std::vector<unsigned int> &targets,
1206 const std::function<RequestType(const unsigned int)> &create_request,
1207 const std::function<AnswerType(const unsigned int, const RequestType &)>
1208 &answer_request,
1209 const std::function<void(const unsigned int, const AnswerType &)>
1210 &process_answer,
1211 const MPI_Comm comm)
1212 {
1213 return Selector<RequestType, AnswerType>().run(
1214 targets, create_request, answer_request, process_answer, comm);
1215 }
1216
1217
1218
1219 template <typename RequestType>
1220 std::vector<unsigned int>
1221 selector(
1222 const std::vector<unsigned int> &targets,
1223 const std::function<RequestType(const unsigned int)> &create_request,
1224 const std::function<void(const unsigned int, const RequestType &)>
1225 &process_request,
1226 const MPI_Comm comm)
1227 {
1228 // TODO: For the moment, simply implement this special case by
1229 // forwarding to the other function with rewritten function
1230 // objects and using an empty type as answer type. This way,
1231 // we have the interface in place and can provide a more
1232 // efficient implementation later on.
1233 using EmptyType = std::tuple<>;
1234
1235 return selector<RequestType, EmptyType>(
1236 targets,
1237 create_request,
1238 // answer_request:
1239 [&process_request](const unsigned int source_rank,
1240 const RequestType &request) -> EmptyType {
1241 process_request(source_rank, request);
1242 // Return something. What it is is arbitrary here, except that
1243 // we want it to be as small an object as possible. Using
1244 // std::tuple<> is interpreted as an empty object that is packed
1245 // down to a zero-length char array.
1246 return {};
1247 },
1248 // process_answer:
1249 [](const unsigned int /*target_rank */,
1250 const EmptyType & /*answer*/) {},
1251 comm);
1252 }
1253
1254#endif
1255
1256
1257 } // namespace ConsensusAlgorithms
1258 } // end of namespace MPI
1259} // end of namespace Utilities
1260
1261
1262
1263#ifndef DOXYGEN
1264
1265// ----------------- Implementation of template functions
1266
1267namespace Utilities
1268{
1269 namespace MPI
1270 {
1271 namespace ConsensusAlgorithms
1272 {
1273 namespace
1274 {
1279 inline bool
1280 has_unique_elements(const std::vector<unsigned int> &targets)
1281 {
1282 std::vector<unsigned int> my_destinations = targets;
1283 std::sort(my_destinations.begin(), my_destinations.end());
1284 return (std::adjacent_find(my_destinations.begin(),
1285 my_destinations.end()) ==
1286 my_destinations.end());
1287 }
1288
1289
1290
1294 inline void
1295 handle_exception(std::exception_ptr &&exception, const MPI_Comm comm)
1296 {
1297# ifdef DEAL_II_WITH_MPI
1298 // an exception within a ConsensusAlgorithm likely causes an
1299 // MPI deadlock. Abort with a reasonable error message instead.
1300 try
1301 {
1302 std::rethrow_exception(exception);
1303 }
1304 catch (ExceptionBase &exc)
1305 {
1306 // report name of the deal.II exception:
1307 std::cerr
1308 << std::endl
1309 << std::endl
1310 << "----------------------------------------------------"
1311 << std::endl;
1312 std::cerr
1313 << "Exception '" << exc.get_exc_name() << "'"
1314 << " on rank " << Utilities::MPI::this_mpi_process(comm)
1315 << " on processing: " << std::endl
1316 << exc.what() << std::endl
1317 << "Aborting!" << std::endl
1318 << "----------------------------------------------------"
1319 << std::endl;
1320
1321 // Then bring down the whole MPI world
1322 MPI_Abort(comm, 255);
1323 }
1324 catch (std::exception &exc)
1325 {
1326 std::cerr
1327 << std::endl
1328 << std::endl
1329 << "----------------------------------------------------"
1330 << std::endl;
1331 std::cerr
1332 << "Exception within ConsensusAlgorithm"
1333 << " on rank " << Utilities::MPI::this_mpi_process(comm)
1334 << " on processing: " << std::endl
1335 << exc.what() << std::endl
1336 << "Aborting!" << std::endl
1337 << "----------------------------------------------------"
1338 << std::endl;
1339
1340 // Then bring down the whole MPI world
1341 MPI_Abort(comm, 255);
1342 }
1343 catch (...)
1344 {
1345 std::cerr
1346 << std::endl
1347 << std::endl
1348 << "----------------------------------------------------"
1349 << std::endl;
1350 std::cerr
1351 << "Unknown exception within ConsensusAlgorithm!" << std::endl
1352 << "Aborting!" << std::endl
1353 << "----------------------------------------------------"
1354 << std::endl;
1355
1356 // Then bring down the whole MPI world
1357 MPI_Abort(comm, 255);
1358 }
1359# else
1360 (void)comm;
1361
1362 // No need to be concerned about deadlocks without MPI.
1363 // Defer to exception handling further up the callstack.
1364 std::rethrow_exception(exception);
1365# endif
1366 }
1367 } // namespace
1368
1369
1370
1371 template <typename RequestType, typename AnswerType>
1372 void
1374 const RequestType &,
1375 AnswerType &)
1376 {
1377 // nothing to do
1378 }
1379
1380
1381
1382 template <typename RequestType, typename AnswerType>
1383 void
1385 RequestType &)
1386 {
1387 // nothing to do
1388 }
1389
1390
1391
1392 template <typename RequestType, typename AnswerType>
1393 void
1395 const AnswerType &)
1396 {
1397 // nothing to do
1398 }
1399
1400
1401
1402 template <typename RequestType, typename AnswerType>
1403 std::vector<unsigned int>
1405 Process<RequestType, AnswerType> &process,
1406 const MPI_Comm comm)
1407 {
1408 // Unpack the 'process' object and call the function that takes
1409 // function objects for all operations.
1410 return run(
1411 process.compute_targets(),
1412 /* create_request: */
1413 [&process](const unsigned int target) {
1414 RequestType request;
1415 process.create_request(target, request);
1416 return request;
1417 },
1418 /* answer_request: */
1419 [&process](const unsigned int source, const RequestType &request) {
1420 AnswerType answer;
1421 process.answer_request(source, request, answer);
1422 return answer;
1423 },
1424 /* process_answer: */
1425 [&process](const unsigned int target, const AnswerType &answer) {
1426 process.read_answer(target, answer);
1427 },
1428 comm);
1429 }
1430
1431
1432
1433 template <typename RequestType, typename AnswerType>
1434 std::vector<unsigned int>
1436 const std::vector<unsigned int> &targets,
1437 const std::function<RequestType(const unsigned int)> &create_request,
1438 const std::function<AnswerType(const unsigned int, const RequestType &)>
1439 &answer_request,
1440 const std::function<void(const unsigned int, const AnswerType &)>
1441 &process_answer,
1442 const MPI_Comm comm)
1443 {
1444 Assert(has_unique_elements(targets),
1445 ExcMessage("The consensus algorithms expect that each process "
1446 "only sends a single message to another process, "
1447 "but the targets provided include duplicates."));
1448
1449 static CollectiveMutex mutex;
1450 CollectiveMutex::ScopedLock lock(mutex, comm);
1451
1452 try
1453 {
1454 // 1) Send data to identified targets and start receiving
1455 // the answers from these very same processes.
1456 start_communication(targets, create_request, comm);
1457
1458 // 2) Until all posted receive operations are known to have
1459 // completed, answer requests and keep checking whether all
1460 // requests of this process have been answered.
1461 //
1462 // The requests that we catch in the answer_requests()
1463 // function originate elsewhere, that is, they are not in
1464 // response to our own messages
1465 //
1466 // Note also that we may not catch all incoming requests in
1467 // the following two lines: our own requests may have been
1468 // satisfied before we've dealt with all incoming requests.
1469 // That's ok: We will get around to dealing with all
1470 // remaining message later. We just want to move on to the
1471 // next step as early as possible.
1472 while (all_locally_originated_receives_are_completed(process_answer,
1473 comm) == false)
1474 maybe_answer_one_request(answer_request, comm);
1475
1476 // 3) Signal to all other processes that all requests of this
1477 // process have been answered
1478 signal_finish(comm);
1479
1480 // 4) Nevertheless, this process has to keep on answering
1481 // (potential) incoming requests until all processes have
1482 // received the answer to all requests
1483 while (all_remotely_originated_receives_are_completed() == false)
1484 maybe_answer_one_request(answer_request, comm);
1485
1486 // 5) process the answer to all requests
1487 clean_up_and_end_communication(comm);
1488 }
1489 catch (...)
1490 {
1491 handle_exception(std::current_exception(), comm);
1492 }
1493
1494 return std::vector<unsigned int>(requesting_processes.begin(),
1495 requesting_processes.end());
1496 }
1497
1498
1499
1500 template <typename RequestType, typename AnswerType>
1501 void
1503 const std::vector<unsigned int> &targets,
1504 const std::function<RequestType(const unsigned int)> &create_request,
1505 const MPI_Comm comm)
1506 {
1507# ifdef DEAL_II_WITH_MPI
1508 // 1)
1509 const auto n_targets = targets.size();
1510
1511 const int tag_request = Utilities::MPI::internal::Tags::
1513
1514 // 2) allocate memory
1515 send_requests.resize(n_targets);
1516 send_buffers.resize(n_targets);
1517
1518 {
1519 // 4) send and receive
1520 for (unsigned int index = 0; index < n_targets; ++index)
1521 {
1522 const unsigned int rank = targets[index];
1524
1525 auto &send_buffer = send_buffers[index];
1526 send_buffer =
1527 (create_request ? Utilities::pack(create_request(rank), false) :
1528 std::vector<char>());
1529
1530 // Post a request to send data
1531 auto ierr = MPI_Isend(send_buffer.data(),
1532 send_buffer.size(),
1533 MPI_CHAR,
1534 rank,
1535 tag_request,
1536 comm,
1537 &send_requests[index]);
1538 AssertThrowMPI(ierr);
1539 }
1540
1541 // Also record that we expect an answer from each target we sent
1542 // a request to:
1543 n_outstanding_answers = n_targets;
1544 }
1545# else
1546 (void)targets;
1547 (void)create_request;
1548 (void)comm;
1549# endif
1550 }
1551
1552
1553
1554 template <typename RequestType, typename AnswerType>
1555 bool
1558 const std::function<void(const unsigned int, const AnswerType &)>
1559 &process_answer,
1560 const MPI_Comm comm)
1561 {
1562# ifdef DEAL_II_WITH_MPI
1563 // We know that all requests have come in when we have pending
1564 // messages from all targets with the right tag (some of which we may
1565 // have already taken care of below, after discovering their existence).
1566 // We can check for pending messages with MPI_IProbe, which returns
1567 // immediately with a return code that indicates whether
1568 // it has found a message from any process with a given
1569 // tag.
1570 if (n_outstanding_answers == 0)
1571 return true;
1572 else
1573 {
1574 const int tag_deliver = Utilities::MPI::internal::Tags::
1576
1577 int request_is_pending;
1578 MPI_Status status;
1579 const auto ierr = MPI_Iprobe(
1580 MPI_ANY_SOURCE, tag_deliver, comm, &request_is_pending, &status);
1581 AssertThrowMPI(ierr);
1582
1583 // If there is no pending message with this tag,
1584 // then we are clearly not done receiving everything
1585 // yet -- so return false.
1586 if (request_is_pending == 0)
1587 return false;
1588 else
1589 {
1590 // OK, so we have gotten a reply to our request from
1591 // one rank. Let us process it.
1592 const auto target = status.MPI_SOURCE;
1593
1594 // Then query the size of the message, allocate enough memory,
1595 // receive the data, and process it.
1596 int message_size;
1597 {
1598 const int ierr =
1599 MPI_Get_count(&status, MPI_CHAR, &message_size);
1600 AssertThrowMPI(ierr);
1601 }
1602 std::vector<char> recv_buffer(message_size);
1603
1604 {
1605 const int tag_deliver = Utilities::MPI::internal::Tags::
1607
1608 const int ierr = MPI_Recv(recv_buffer.data(),
1609 recv_buffer.size(),
1610 MPI_CHAR,
1611 target,
1612 tag_deliver,
1613 comm,
1614 MPI_STATUS_IGNORE);
1615 AssertThrowMPI(ierr);
1616 }
1617
1618 if (process_answer)
1619 process_answer(target,
1620 Utilities::unpack<AnswerType>(recv_buffer,
1621 false));
1622
1623 // Finally, remove this rank from the list of outstanding
1624 // targets:
1625 --n_outstanding_answers;
1626
1627 // We could do another go-around from the top of this
1628 // else-branch to see whether there are actually other messages
1629 // that are currently pending. But that would mean spending
1630 // substantial time in receiving answers while we should also be
1631 // sending answers to requests we have received from other
1632 // places. So let it be enough for now. If there are outstanding
1633 // answers, we will get back to this function before long and
1634 // can take care of them then.
1635 return (n_outstanding_answers == 0);
1636 }
1637 }
1638
1639# else
1640 (void)process_answer;
1641 (void)comm;
1642
1643 return true;
1644# endif
1645 }
1646
1647
1648
1649 template <typename RequestType, typename AnswerType>
1650 void
1652 const std::function<AnswerType(const unsigned int, const RequestType &)>
1653 &answer_request,
1654 const MPI_Comm comm)
1655 {
1656# ifdef DEAL_II_WITH_MPI
1657
1658 const int tag_request = Utilities::MPI::internal::Tags::
1660 const int tag_deliver = Utilities::MPI::internal::Tags::
1662
1663 // Check if there is a request pending. By selecting the
1664 // tag_request tag, these are other processes asking for
1665 // our own replies, not these other processes' replies
1666 // to our own requests.
1667 //
1668 // There may be multiple such pending messages. We
1669 // only answer one.
1670 MPI_Status status;
1671 int request_is_pending;
1672 const auto ierr = MPI_Iprobe(
1673 MPI_ANY_SOURCE, tag_request, comm, &request_is_pending, &status);
1674 AssertThrowMPI(ierr);
1675
1676 if (request_is_pending != 0)
1677 {
1678 // Get the rank of the requesting process and add it to the
1679 // list of requesting processes (which may contain duplicates).
1680 const auto other_rank = status.MPI_SOURCE;
1681
1682 Assert(requesting_processes.find(other_rank) ==
1683 requesting_processes.end(),
1684 ExcMessage("Process is requesting a second time!"));
1685 requesting_processes.insert(other_rank);
1686
1687 // get size of incoming message
1688 int number_amount;
1689 auto ierr = MPI_Get_count(&status, MPI_CHAR, &number_amount);
1690 AssertThrowMPI(ierr);
1691
1692 // allocate memory for incoming message
1693 std::vector<char> buffer_recv(number_amount);
1694 ierr = MPI_Recv(buffer_recv.data(),
1695 number_amount,
1696 MPI_CHAR,
1697 other_rank,
1698 tag_request,
1699 comm,
1700 MPI_STATUS_IGNORE);
1701 AssertThrowMPI(ierr);
1702
1703 // Allocate memory for an answer message to the current request,
1704 // and ask the 'process' object to produce an answer:
1705 request_buffers.emplace_back(std::make_unique<std::vector<char>>());
1706 auto &request_buffer = *request_buffers.back();
1707 if (answer_request)
1708 request_buffer =
1709 Utilities::pack(answer_request(other_rank,
1710 Utilities::unpack<RequestType>(
1711 buffer_recv, false)),
1712 false);
1713
1714 // Then initiate sending the answer back to the requester.
1715 request_requests.emplace_back(std::make_unique<MPI_Request>());
1716 ierr = MPI_Isend(request_buffer.data(),
1717 request_buffer.size(),
1718 MPI_CHAR,
1719 other_rank,
1720 tag_deliver,
1721 comm,
1722 request_requests.back().get());
1723 AssertThrowMPI(ierr);
1724 }
1725# else
1726 (void)answer_request;
1727 (void)comm;
1728# endif
1729 }
1730
1731
1732
1733 template <typename RequestType, typename AnswerType>
1734 void
1736 {
1737# ifdef DEAL_II_WITH_MPI
1738 const auto ierr = MPI_Ibarrier(comm, &barrier_request);
1739 AssertThrowMPI(ierr);
1740# else
1741 (void)comm;
1742# endif
1743 }
1744
1745
1746
1747 template <typename RequestType, typename AnswerType>
1748 bool
1749 NBX<RequestType,
1750 AnswerType>::all_remotely_originated_receives_are_completed()
1751 {
1752# ifdef DEAL_II_WITH_MPI
1753 int all_ranks_reached_barrier;
1754 const auto ierr = MPI_Test(&barrier_request,
1755 &all_ranks_reached_barrier,
1756 MPI_STATUS_IGNORE);
1757 AssertThrowMPI(ierr);
1758 return all_ranks_reached_barrier != 0;
1759# else
1760 return true;
1761# endif
1762 }
1763
1764
1765
1766 template <typename RequestType, typename AnswerType>
1767 void
1769 const MPI_Comm comm)
1770 {
1771 (void)comm;
1772# ifdef DEAL_II_WITH_MPI
1773 // clean up
1774 {
1775 if (send_requests.size() > 0)
1776 {
1777 const int ierr = MPI_Waitall(send_requests.size(),
1778 send_requests.data(),
1779 MPI_STATUSES_IGNORE);
1780 AssertThrowMPI(ierr);
1781 }
1782
1783 int ierr = MPI_Wait(&barrier_request, MPI_STATUS_IGNORE);
1784 AssertThrowMPI(ierr);
1785
1786 for (auto &i : request_requests)
1787 {
1788 ierr = MPI_Wait(i.get(), MPI_STATUS_IGNORE);
1789 AssertThrowMPI(ierr);
1790 }
1791
1792# ifdef DEBUG
1793 // note: IBarrier seems to make problem during testing, this
1794 // additional Barrier seems to help
1795 ierr = MPI_Barrier(comm);
1796 AssertThrowMPI(ierr);
1797# endif
1798 }
1799# endif
1800 }
1801
1802
1803
1804 template <typename RequestType, typename AnswerType>
1805 std::vector<unsigned int>
1807 const std::vector<unsigned int> &targets,
1808 const std::function<RequestType(const unsigned int)> &create_request,
1809 const std::function<AnswerType(const unsigned int, const RequestType &)>
1810 &answer_request,
1811 const std::function<void(const unsigned int, const AnswerType &)>
1812 &process_answer,
1813 const MPI_Comm comm)
1814 {
1815 Assert(has_unique_elements(targets),
1816 ExcMessage("The consensus algorithms expect that each process "
1817 "only sends a single message to another process, "
1818 "but the targets provided include duplicates."));
1819
1820 static CollectiveMutex mutex;
1821 CollectiveMutex::ScopedLock lock(mutex, comm);
1822
1823 try
1824 {
1825 // 1) Send requests and start receiving the answers.
1826 // In particular, determine how many requests we should expect
1827 // on the current process.
1828 const unsigned int n_requests =
1829 start_communication(targets, create_request, comm);
1830
1831 // 2) Answer requests:
1832 for (unsigned int request = 0; request < n_requests; ++request)
1833 answer_one_request(request, answer_request, comm);
1834
1835 // 3) Process answers:
1836 process_incoming_answers(targets.size(), process_answer, comm);
1837
1838 // 4) Make sure all sends have successfully terminated:
1839 clean_up_and_end_communication();
1840 }
1841 catch (...)
1842 {
1843 handle_exception(std::current_exception(), comm);
1844 }
1845
1846 return std::vector<unsigned int>(requesting_processes.begin(),
1847 requesting_processes.end());
1848 }
1849
1850
1851
1852 template <typename RequestType, typename AnswerType>
1853 unsigned int
1855 const std::vector<unsigned int> &targets,
1856 const std::function<RequestType(const unsigned int)> &create_request,
1857 const MPI_Comm comm)
1858 {
1859# ifdef DEAL_II_WITH_MPI
1860 const int tag_request = Utilities::MPI::internal::Tags::
1862
1863 // 1) determine with which processes this process wants to communicate
1864 // with
1865 const unsigned int n_targets = targets.size();
1866
1867 // 2) determine who wants to communicate with this process
1868 const unsigned int n_sources =
1870
1871 // 2) allocate memory
1872 recv_buffers.resize(n_targets);
1873 send_buffers.resize(n_targets);
1874 send_request_requests.resize(n_targets);
1875
1876 send_answer_requests.resize(n_sources);
1877 requests_buffers.resize(n_sources);
1878
1879 // 4) send and receive
1880 for (unsigned int i = 0; i < n_targets; ++i)
1881 {
1882 const unsigned int rank = targets[i];
1884
1885 // pack data which should be sent
1886 auto &send_buffer = send_buffers[i];
1887 if (create_request)
1888 send_buffer = Utilities::pack(create_request(rank), false);
1889
1890 // start to send data
1891 auto ierr = MPI_Isend(send_buffer.data(),
1892 send_buffer.size(),
1893 MPI_CHAR,
1894 rank,
1895 tag_request,
1896 comm,
1897 &send_request_requests[i]);
1898 AssertThrowMPI(ierr);
1899 }
1900
1901 return n_sources;
1902# else
1903 (void)targets;
1904 (void)create_request;
1905 (void)comm;
1906 return 0;
1907# endif
1908 }
1909
1910
1911
1912 template <typename RequestType, typename AnswerType>
1913 void
1915 const unsigned int index,
1916 const std::function<AnswerType(const unsigned int, const RequestType &)>
1917 &answer_request,
1918 const MPI_Comm comm)
1919 {
1920# ifdef DEAL_II_WITH_MPI
1921 const int tag_request = Utilities::MPI::internal::Tags::
1923 const int tag_deliver = Utilities::MPI::internal::Tags::
1925
1926 // Wait until we have a message ready for retrieval, though we don't
1927 // care which process it is from.
1928 MPI_Status status;
1929 int ierr = MPI_Probe(MPI_ANY_SOURCE, tag_request, comm, &status);
1930 AssertThrowMPI(ierr);
1931
1932 // Get rank of incoming message and verify that it makes sense
1933 const unsigned int other_rank = status.MPI_SOURCE;
1934
1935 Assert(requesting_processes.find(other_rank) ==
1936 requesting_processes.end(),
1937 ExcMessage(
1938 "A process is sending a request after a request from "
1939 "the same process has previously already been "
1940 "received. This algorithm does not expect this to happen."));
1941 requesting_processes.insert(other_rank);
1942
1943 // Actually get the incoming message:
1944 int number_amount;
1945 ierr = MPI_Get_count(&status, MPI_CHAR, &number_amount);
1946 AssertThrowMPI(ierr);
1947
1948 std::vector<char> buffer_recv(number_amount);
1949 ierr = MPI_Recv(buffer_recv.data(),
1950 number_amount,
1951 MPI_CHAR,
1952 other_rank,
1953 tag_request,
1954 comm,
1955 &status);
1956 AssertThrowMPI(ierr);
1957
1958 // Process request by asking the user-provided function for
1959 // the answer and post a send for it.
1960 auto &request_buffer = requests_buffers[index];
1961 request_buffer =
1962 (answer_request ?
1963 Utilities::pack(answer_request(other_rank,
1964 Utilities::unpack<RequestType>(
1965 buffer_recv, false)),
1966 false) :
1967 std::vector<char>());
1968
1969 ierr = MPI_Isend(request_buffer.data(),
1970 request_buffer.size(),
1971 MPI_CHAR,
1972 other_rank,
1973 tag_deliver,
1974 comm,
1975 &send_answer_requests[index]);
1976 AssertThrowMPI(ierr);
1977# else
1978 (void)answer_request;
1979 (void)comm;
1980 (void)index;
1981# endif
1982 }
1983
1984
1985
1986 template <typename RequestType, typename AnswerType>
1987 void
1989 const unsigned int n_targets,
1990 const std::function<void(const unsigned int, const AnswerType &)>
1991 &process_answer,
1992 const MPI_Comm comm)
1993 {
1994# ifdef DEAL_II_WITH_MPI
1995 const int tag_deliver = Utilities::MPI::internal::Tags::
1997
1998 // We know how many targets we have sent requests to. These
1999 // targets will all eventually send us their responses, but
2000 // we need not process them in order -- rather, just see what
2001 // comes in and then look at message originators' ranks and
2002 // message sizes
2003 for (unsigned int i = 0; i < n_targets; ++i)
2004 {
2005 MPI_Status status;
2006 {
2007 const int ierr =
2008 MPI_Probe(MPI_ANY_SOURCE, tag_deliver, comm, &status);
2009 AssertThrowMPI(ierr);
2010 }
2011
2012 const auto other_rank = status.MPI_SOURCE;
2013 int message_size;
2014 {
2015 const int ierr = MPI_Get_count(&status, MPI_CHAR, &message_size);
2016 AssertThrowMPI(ierr);
2017 }
2018 std::vector<char> recv_buffer(message_size);
2019
2020 // Now actually receive the answer. Because the MPI_Probe
2021 // above blocks until we have a message, we know that the
2022 // following MPI_Recv call will immediately succeed.
2023 {
2024 const int ierr = MPI_Recv(recv_buffer.data(),
2025 recv_buffer.size(),
2026 MPI_CHAR,
2027 other_rank,
2028 tag_deliver,
2029 comm,
2030 MPI_STATUS_IGNORE);
2031 AssertThrowMPI(ierr);
2032 }
2033
2034 if (process_answer)
2035 process_answer(other_rank,
2036 Utilities::unpack<AnswerType>(recv_buffer, false));
2037 }
2038# else
2039 (void)n_targets;
2040 (void)process_answer;
2041 (void)comm;
2042# endif
2043 }
2044
2045
2046
2047 template <typename RequestType, typename AnswerType>
2048 void
2050 {
2051# ifdef DEAL_II_WITH_MPI
2052 // Finalize all MPI_Request objects for both the
2053 // send-request and receive-answer operations.
2054 if (send_request_requests.size() > 0)
2055 {
2056 const int ierr = MPI_Waitall(send_request_requests.size(),
2057 send_request_requests.data(),
2058 MPI_STATUSES_IGNORE);
2059 AssertThrowMPI(ierr);
2060 }
2061
2062 // Then also check the send-answer requests.
2063 if (send_answer_requests.size() > 0)
2064 {
2065 const int ierr = MPI_Waitall(send_answer_requests.size(),
2066 send_answer_requests.data(),
2067 MPI_STATUSES_IGNORE);
2068 AssertThrowMPI(ierr);
2069 }
2070# endif
2071 }
2072
2073
2074
2075 template <typename RequestType, typename AnswerType>
2076 std::vector<unsigned int>
2078 const std::vector<unsigned int> &targets,
2079 const std::function<RequestType(const unsigned int)> &create_request,
2080 const std::function<AnswerType(const unsigned int, const RequestType &)>
2081 &answer_request,
2082 const std::function<void(const unsigned int, const AnswerType &)>
2083 &process_answer,
2084 const MPI_Comm comm)
2085 {
2086 (void)comm;
2088 ExcMessage("You shouldn't use the 'Serial' class on "
2089 "communicators that have more than one process "
2090 "associated with it."));
2091
2092 // The only valid target for a serial program is itself.
2093 if (targets.size() != 0)
2094 {
2095 Assert(targets.size() == 1,
2096 ExcMessage(
2097 "On a single process, the only valid target "
2098 "is process zero (the process itself), which can only be "
2099 "listed once."));
2100 AssertDimension(targets[0], 0);
2101
2102 // Since the caller indicates that there is a target, and since we
2103 // know that it is the current process, let the process send
2104 // something to itself.
2105 const RequestType request =
2106 (create_request ? create_request(0) : RequestType());
2107 const AnswerType answer =
2108 (answer_request ? answer_request(0, request) : AnswerType());
2109
2110 if (process_answer)
2111 process_answer(0, answer);
2112 }
2113
2114 return targets; // nothing to do
2115 }
2116
2117
2118
2119 template <typename RequestType, typename AnswerType>
2120 std::vector<unsigned int>
2122 const std::vector<unsigned int> &targets,
2123 const std::function<RequestType(const unsigned int)> &create_request,
2124 const std::function<AnswerType(const unsigned int, const RequestType &)>
2125 &answer_request,
2126 const std::function<void(const unsigned int, const AnswerType &)>
2127 &process_answer,
2128 const MPI_Comm comm)
2129 {
2130 // Depending on the number of processes we switch between
2131 // implementations. We reduce the threshold for debug mode to be
2132 // able to test also the non-blocking implementation. This feature
2133 // is tested by:
2134 // tests/multigrid/transfer_matrix_free_06.with_mpi=true.with_p4est=true.with_trilinos=true.mpirun=10.output
2135
2136 const unsigned int n_procs = (Utilities::MPI::job_supports_mpi() ?
2138 1);
2139# ifdef DEAL_II_WITH_MPI
2140# ifdef DEBUG
2141 if (n_procs > 10)
2142# else
2143 if (n_procs > 99)
2144# endif
2145 consensus_algo.reset(new NBX<RequestType, AnswerType>());
2146 else
2147# endif
2148 if (n_procs > 1)
2149 consensus_algo.reset(new PEX<RequestType, AnswerType>());
2150 else
2151 consensus_algo.reset(new Serial<RequestType, AnswerType>());
2152
2153 return consensus_algo->run(
2154 targets, create_request, answer_request, process_answer, comm);
2155 }
2156
2157
2158 } // namespace ConsensusAlgorithms
2159 } // end of namespace MPI
2160} // end of namespace Utilities
2161
2162#endif // DOXYGEN
2163
2164
2166
2167#endif
const char * get_exc_name() const
virtual const char * what() const noexcept override
std::vector< unsigned int > run(Process< RequestType, AnswerType > &process, const MPI_Comm comm)
virtual std::vector< unsigned int > run(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm)=0
void start_communication(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const MPI_Comm comm)
void maybe_answer_one_request(const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const MPI_Comm comm)
std::vector< std::unique_ptr< std::vector< char > > > request_buffers
virtual std::vector< unsigned int > run(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm) override
void clean_up_and_end_communication(const MPI_Comm comm)
bool all_locally_originated_receives_are_completed(const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm)
std::vector< std::unique_ptr< MPI_Request > > request_requests
std::vector< std::vector< char > > send_buffers
void signal_finish(const MPI_Comm comm)
unsigned int start_communication(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const MPI_Comm comm)
std::vector< std::vector< char > > requests_buffers
virtual std::vector< unsigned int > run(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm) override
std::vector< std::vector< char > > send_buffers
void answer_one_request(const unsigned int index, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const MPI_Comm comm)
std::vector< std::vector< char > > recv_buffers
void process_incoming_answers(const unsigned int n_targets, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm)
virtual std::vector< unsigned int > compute_targets()=0
virtual void answer_request(const unsigned int other_rank, const RequestType &buffer_recv, AnswerType &request_buffer)
virtual void read_answer(const unsigned int other_rank, const AnswerType &recv_buffer)
virtual void create_request(const unsigned int other_rank, RequestType &send_buffer)
virtual std::vector< unsigned int > run(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm) override
std::shared_ptr< Interface< RequestType, AnswerType > > consensus_algo
virtual std::vector< unsigned int > run(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm) override
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:502
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:503
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertThrowMPI(error_code)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcMessage(std::string arg1)
std::vector< unsigned int > selector(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm)
std::vector< unsigned int > serial(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm)
std::vector< unsigned int > nbx(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm)
std::vector< unsigned int > pex(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm)
@ consensus_algorithm_nbx_process_deliver
ConsensusAlgorithms::NBX::process.
Definition mpi_tags.h:90
@ consensus_algorithm_pex_process_deliver
ConsensusAlgorithms::PEX::process.
Definition mpi_tags.h:95
@ consensus_algorithm_nbx_answer_request
ConsensusAlgorithms::NBX::process.
Definition mpi_tags.h:88
@ consensus_algorithm_pex_answer_request
ConsensusAlgorithms::PEX::process.
Definition mpi_tags.h:93
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition mpi.cc:92
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:107
bool job_supports_mpi()
Definition mpi.cc:697
unsigned int compute_n_point_to_point_communications(const MPI_Comm mpi_comm, const std::vector< unsigned int > &destinations)
Definition mpi.cc:379
size_t pack(const T &object, std::vector< char > &dest_buffer, const bool allow_compression=true)
Definition utilities.h:1381
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
STL namespace.
*braid_SplitCommworld & comm