deal.II version GIT relicensing-2289-g1e5549a87a 2024-12-21 21:30:00+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
aligned_vector.h
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2014 - 2024 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
15
16#ifndef dealii_aligned_vector_h
17#define dealii_aligned_vector_h
18
19#include <deal.II/base/config.h>
20
23#include <deal.II/base/mpi.h>
26
27// boost::serialization::make_array used to be in array.hpp, but was
28// moved to a different file in BOOST 1.64
29#include <boost/version.hpp>
30#if BOOST_VERSION >= 106400
31# include <boost/serialization/array_wrapper.hpp>
32#else
33# include <boost/serialization/array.hpp>
34#endif
35#include <boost/serialization/split_member.hpp>
36
37#include <cstring>
38#include <memory>
39#include <type_traits>
40
41
42
44
45
59template <class T>
61{
62public:
67 using value_type = T;
69 using const_pointer = const value_type *;
71 using const_iterator = const value_type *;
73 using const_reference = const value_type &;
74 using size_type = std::size_t;
75
80
89 template <
90 typename RandomAccessIterator,
91 typename = std::enable_if_t<std::is_convertible_v<
92 typename std::iterator_traits<RandomAccessIterator>::iterator_category,
93 std::random_access_iterator_tag>>>
94 AlignedVector(RandomAccessIterator begin, RandomAccessIterator end);
95
102 explicit AlignedVector(const size_type size, const T &init = T());
103
107 ~AlignedVector() = default;
108
115
121
129
135
155 void
156 resize_fast(const size_type new_size);
157
170 void
171 resize(const size_type new_size);
172
188 void
189 resize(const size_type new_size, const T &init);
190
211 void
212 reserve(const size_type new_allocated_size);
213
217 void
219
224 void
226
232 void
233 push_back(const T in_data);
234
240
245 back() const;
246
251 template <typename ForwardIterator>
252 void
253 insert_back(ForwardIterator begin, ForwardIterator end);
254
263 template <
264 typename RandomAccessIterator,
265 typename = std::enable_if_t<std::is_convertible_v<
266 typename std::iterator_traits<RandomAccessIterator>::iterator_category,
267 std::random_access_iterator_tag>>>
270 RandomAccessIterator begin,
271 RandomAccessIterator end);
272
282 void
284
293 void
294 fill(const T &element);
295
383 void
385 const unsigned int root_process);
386
390 void
391 swap(AlignedVector<T> &vec) noexcept;
392
396 bool
397 empty() const;
398
403 size() const;
404
410 capacity() const;
411
416 operator[](const size_type index);
417
422 operator[](const size_type index) const;
423
427 pointer
429
434 data() const;
435
441
447
452 begin() const;
453
458 end() const;
459
467
473 template <class Archive>
474 void
475 save(Archive &ar, const unsigned int version) const;
476
482 template <class Archive>
483 void
484 load(Archive &ar, const unsigned int version);
485
486#ifdef DOXYGEN
492 template <class Archive>
493 void
494 serialize(Archive &archive, const unsigned int version);
495#else
496 // This macro defines the serialize() method that is compatible with
497 // the templated save() and load() method that have been implemented.
498 BOOST_SERIALIZATION_SPLIT_MEMBER()
499#endif
500
508 "Changing the vector after a call to "
509 "replicate_across_communicator() is not allowed.");
510
511private:
516 void
517 allocate_and_move(const size_t old_size,
518 const size_t new_size,
519 const size_t new_allocated_size);
520
613 {
614 public:
620 Deleter(AlignedVector<T> *owning_object);
621
622#ifdef DEAL_II_WITH_MPI
630 Deleter(AlignedVector<T> *owning_object,
631 const bool is_shmem_root,
632 T *aligned_shmem_pointer,
633 MPI_Comm shmem_group_communicator,
634 MPI_Win shmem_window);
635#endif
636
642 void
643 operator()(T *ptr);
644
652 void
653 reset_owning_object(const AlignedVector<T> *new_aligned_vector_ptr);
654
655 private:
660 {
661 public:
665 virtual ~DeleterActionBase() = default;
666
672 virtual void
674 };
675
676#ifdef DEAL_II_WITH_MPI
677
683 {
684 public:
692 MPI_Win shmem_window);
693
699 virtual void
700 delete_array(const AlignedVector<T> *aligned_vector, T *ptr);
701
702 private:
707 const bool is_shmem_root;
711 };
712#endif
713
718 std::unique_ptr<DeleterActionBase> deleter_action_object;
719
725 };
726
730 std::unique_ptr<T[], Deleter> elements;
731
736
741
746};
747
748
749// ------------------------------- inline functions --------------------------
750
756namespace internal
757{
776 template <typename RandomAccessIterator, typename T>
779 {
780 static const std::size_t minimum_parallel_grain_size =
781 160000 / sizeof(T) + 1;
782
783 public:
793 AlignedVectorCopyConstruct(RandomAccessIterator source_begin,
794 RandomAccessIterator source_end,
795 T *const destination)
796 : source_(source_begin)
797 , destination_(destination)
798 {
799 Assert(source_end >= source_begin, ExcInternalError());
800 Assert(source_end == source_begin || destination != nullptr,
802 const std::size_t size = source_end - source_begin;
805 else
807 }
808
813 virtual void
814 apply_to_subrange(const std::size_t begin,
815 const std::size_t end) const override
816 {
817 if (end == begin)
818 return;
819
820 // We can use memcpy() with trivially copyable objects.
821 if constexpr (std::is_trivially_copyable_v<T> == true &&
822 (std::is_same_v<T *, RandomAccessIterator> ||
823 std::is_same_v<const T *, RandomAccessIterator>) == true)
824 std::memcpy(destination_ + begin,
825 source_ + begin,
826 (end - begin) * sizeof(T));
827 else
828 for (std::size_t i = begin; i < end; ++i)
829 new (&destination_[i]) T(*(source_ + i));
830 }
831
832 private:
833 RandomAccessIterator source_;
834 T *const destination_;
835 };
836
837
844 template <typename RandomAccessIterator, typename T>
847 {
848 static const std::size_t minimum_parallel_grain_size =
849 160000 / sizeof(T) + 1;
850
851 public:
861 AlignedVectorMoveConstruct(RandomAccessIterator source_begin,
862 RandomAccessIterator source_end,
863 T *const destination)
864 : source_(source_begin)
865 , destination_(destination)
866 {
867 Assert(source_end >= source_begin, ExcInternalError());
868 Assert(source_end == source_begin || destination != nullptr,
870 const std::size_t size = source_end - source_begin;
873 else
875 }
876
881 virtual void
882 apply_to_subrange(const std::size_t begin,
883 const std::size_t end) const override
884 {
885 if (end == begin)
886 return;
887
888 // We can use memcpy() with trivially copyable objects.
889 if constexpr (std::is_trivially_copyable_v<T> == true &&
890 (std::is_same_v<T *, RandomAccessIterator> ||
891 std::is_same_v<const T *, RandomAccessIterator>) == true)
892 std::memcpy(destination_ + begin,
893 source_ + begin,
894 (end - begin) * sizeof(T));
895 else
896 // For everything else just use the move constructor. The original
897 // object remains alive and will be destroyed elsewhere.
898 for (std::size_t i = begin; i < end; ++i)
899 new (&destination_[i]) T(std::move(*(source_ + i)));
900 }
901
902 private:
903 RandomAccessIterator source_;
904 T *const destination_;
905 };
906
907
925 template <typename T, bool initialize_memory>
927 {
928 static const std::size_t minimum_parallel_grain_size =
929 160000 / sizeof(T) + 1;
930
931 public:
936 AlignedVectorInitialize(const std::size_t size,
937 const T &element,
938 T *const destination)
939 : element_(element)
940 , destination_(destination)
941 , trivial_element(false)
942 {
943 if (size == 0)
944 return;
945 Assert(destination != nullptr, ExcInternalError());
946
947 // do not use memcmp() for long double because on some systems it does not
948 // completely fill its memory and may lead to false positives in e.g.
949 // valgrind
950 if constexpr (std::is_trivially_default_constructible_v<T> == true &&
951 std::is_same_v<T, long double> == false)
952 {
953 const unsigned char zero[sizeof(T)] = {};
954 if (std::memcmp(zero, &element, sizeof(T)) == 0)
955 trivial_element = true;
956 }
959 else
961 }
962
966 virtual void
967 apply_to_subrange(const std::size_t begin,
968 const std::size_t end) const override
969 {
970 // Only use memset() with types whose default constructors don't do
971 // anything.
972 if constexpr (std::is_trivially_default_constructible_v<T> == true)
973 if (trivial_element)
974 {
975 std::memset(destination_ + begin, 0, (end - begin) * sizeof(T));
976 return;
977 }
978
980 end,
981 std::bool_constant<initialize_memory>());
982 }
983
984 private:
985 const T &element_;
986 mutable T *destination_;
988
989 // copy assignment operation
990 void
991 copy_construct_or_assign(const std::size_t begin,
992 const std::size_t end,
993 std::bool_constant<false>) const
994 {
995 for (std::size_t i = begin; i < end; ++i)
997 }
998
999 // copy constructor (memory initialization)
1000 void
1001 copy_construct_or_assign(const std::size_t begin,
1002 const std::size_t end,
1003 std::bool_constant<true>) const
1004 {
1005 for (std::size_t i = begin; i < end; ++i)
1006 new (&destination_[i]) T(element_);
1007 }
1008 };
1009
1010
1011
1024 template <typename T, bool initialize_memory>
1027 {
1028 static const std::size_t minimum_parallel_grain_size =
1029 160000 / sizeof(T) + 1;
1030
1031 public:
1036 AlignedVectorDefaultInitialize(const std::size_t size, T *const destination)
1037 : destination_(destination)
1038 {
1039 if (size == 0)
1040 return;
1041 Assert(destination != nullptr, ExcInternalError());
1042
1045 else
1047 }
1048
1052 virtual void
1053 apply_to_subrange(const std::size_t begin,
1054 const std::size_t end) const override
1055 {
1056 // Only use memset() with types whose default constructors don't do
1057 // anything.
1058 if constexpr (std::is_trivially_default_constructible_v<T> == true)
1059 std::memset(destination_ + begin, 0, (end - begin) * sizeof(T));
1060 else
1062 end,
1063 std::bool_constant<initialize_memory>());
1064 }
1065
1066 private:
1067 mutable T *destination_;
1068
1069 // copy assignment operation
1070 void
1071 default_construct_or_assign(const std::size_t begin,
1072 const std::size_t end,
1073 std::bool_constant<false>) const
1074 {
1075 for (std::size_t i = begin; i < end; ++i)
1076 destination_[i] = std::move(T());
1077 }
1078
1079 // copy constructor (memory initialization)
1080 void
1081 default_construct_or_assign(const std::size_t begin,
1082 const std::size_t end,
1083 std::bool_constant<true>) const
1084 {
1085 for (std::size_t i = begin; i < end; ++i)
1086 new (&destination_[i]) T;
1087 }
1088 };
1089
1090} // end of namespace internal
1091
1092
1093#ifndef DOXYGEN
1094
1095
1096
1097template <typename T>
1099 : deleter_action_object(nullptr) // encode default action by using a nullptr
1100 , owning_aligned_vector(owning_object)
1101{}
1102
1103
1104# ifdef DEAL_II_WITH_MPI
1105
1106template <typename T>
1108 const bool is_shmem_root,
1109 T *aligned_shmem_pointer,
1110 MPI_Comm shmem_group_communicator,
1111 MPI_Win shmem_window)
1112 : deleter_action_object(
1113 std::make_unique<MPISharedMemDeleterAction>(is_shmem_root,
1114 aligned_shmem_pointer,
1115 shmem_group_communicator,
1116 shmem_window))
1117 , owning_aligned_vector(owning_object)
1118{}
1119# endif
1120
1121
1122template <typename T>
1123inline void
1125{
1126 // If no special action has been registered (i.e., if the action pointer is
1127 // nullptr), then just perform the default action right here.
1128 if (deleter_action_object == nullptr)
1129 {
1130 if (ptr != nullptr)
1131 {
1132 Assert(owning_aligned_vector->used_elements_end != nullptr,
1134
1135 if (std::is_trivially_destructible_v<T> == false)
1136 for (T *p = owning_aligned_vector->used_elements_end - 1; p >= ptr;
1137 --p)
1138 p->~T();
1139
1140 std::free(ptr);
1141 }
1142 }
1143 else
1144 // Otherwise, let the action object do what is necessary
1145 deleter_action_object->delete_array(owning_aligned_vector, ptr);
1146}
1147
1148
1149
1150template <typename T>
1151inline void
1153 const AlignedVector<T> *new_aligned_vector_ptr)
1154{
1155 owning_aligned_vector = new_aligned_vector_ptr;
1156}
1157
1158
1159# ifdef DEAL_II_WITH_MPI
1160
1161template <typename T>
1163 MPISharedMemDeleterAction(const bool is_shmem_root,
1164 T *aligned_shmem_pointer,
1165 MPI_Comm shmem_group_communicator,
1166 MPI_Win shmem_window)
1167 : is_shmem_root(is_shmem_root)
1168 , aligned_shmem_pointer(aligned_shmem_pointer)
1169 , shmem_group_communicator(shmem_group_communicator)
1170 , shmem_window(shmem_window)
1171{}
1172
1173
1174
1175template <typename T>
1176inline void
1178 const AlignedVector<T> *aligned_vector,
1179 T *ptr)
1180{
1181 (void)ptr;
1182 // It would be nice to assert that aligned_vector->elements.get() equals ptr,
1183 // but it is not guaranteed to work: clang, for example, sets elements.get()
1184 // to nullptr and then calls the deleter on a previously made copy. Hence we
1185 // must assume here that elements.get() (which is managed by the unique_ptr)
1186 // may be nullptr at this point.
1187 //
1188 // used_elements_end is a member variable of AlignedVector (i.e., we control
1189 // it, not unique_ptr) so it is still set to its correct value.
1190
1191 if (is_shmem_root)
1192 if (std::is_trivially_destructible_v<T> == false)
1193 for (T *p = aligned_vector->used_elements_end - 1; p >= ptr; --p)
1194 p->~T();
1195
1196 int ierr;
1197 ierr = MPI_Win_free(&shmem_window);
1198 AssertThrowMPI(ierr);
1199
1200 Utilities::MPI::free_communicator(shmem_group_communicator);
1201}
1202
1203# endif
1204
1205
1206template <class T>
1208 : elements(nullptr, Deleter(this))
1209 , used_elements_end(nullptr)
1210 , allocated_elements_end(nullptr)
1211# ifdef DEBUG
1212 , replicated_across_communicator(false)
1213# endif
1214{}
1215
1216
1217
1218template <class T>
1219template <typename RandomAccessIterator, typename>
1220inline AlignedVector<T>::AlignedVector(RandomAccessIterator begin,
1221 RandomAccessIterator end)
1222 : elements(nullptr, Deleter(this))
1223 , used_elements_end(nullptr)
1224 , allocated_elements_end(nullptr)
1225 , replicated_across_communicator(false)
1226{
1227 allocate_and_move(0u, end - begin, end - begin);
1228 used_elements_end = allocated_elements_end;
1230 end,
1231 data());
1232}
1233
1234
1235template <class T>
1236inline AlignedVector<T>::AlignedVector(const size_type size, const T &init)
1237 : elements(nullptr, Deleter(this))
1238 , used_elements_end(nullptr)
1239 , allocated_elements_end(nullptr)
1240# ifdef DEBUG
1241 , replicated_across_communicator(false)
1242# endif
1243{
1244 if (size > 0)
1245 resize(size, init);
1246}
1247
1248
1249
1250template <class T>
1252 : elements(nullptr, Deleter(this))
1253 , used_elements_end(nullptr)
1254 , allocated_elements_end(nullptr)
1255# ifdef DEBUG
1256 , replicated_across_communicator(false)
1257# endif
1258{
1259 // copy the data from vec
1260 reserve(vec.size());
1261 used_elements_end = allocated_elements_end;
1264 elements.get());
1265}
1266
1267
1268
1269template <class T>
1272{
1273 // forward to the move operator
1274 *this = std::move(vec);
1275}
1276
1277
1278
1279template <class T>
1280inline AlignedVector<T> &
1282{
1283 const size_type new_size = vec.used_elements_end - vec.elements.get();
1284
1285 // First throw away everything and re-allocate memory but leave that
1286 // memory uninitialized for now:
1287 resize(0);
1288 reserve(new_size);
1289
1290 // Then copy the elements over by using the copy constructor on these
1291 // elements:
1294 elements.get());
1295
1296 // Finally adjust the pointer to the end of the elements that are used:
1297 used_elements_end = elements.get() + new_size;
1298
1299 return *this;
1300}
1301
1302
1303
1304template <class T>
1305inline AlignedVector<T> &
1307{
1308 clear();
1309
1310 // Move the actual data in the 'elements' object. One problem is that this
1311 // also moves the deleter object, but the deleter object
1312 // references 'this' (i.e., the 'this' pointer of the *moved-from*
1313 // object). The way this is implemented is that we have to move the
1314 // deleter as well, and then reset the pointer inside the deleter
1315 // that references the outer object.
1316 elements = std::move(vec.elements);
1317 elements.get_deleter().reset_owning_object(this);
1318
1319 // Then also steal the other pointers and clear them in the original object:
1320 used_elements_end = vec.used_elements_end;
1321 allocated_elements_end = vec.allocated_elements_end;
1322
1323 vec.used_elements_end = nullptr;
1324 vec.allocated_elements_end = nullptr;
1325
1326 return *this;
1327}
1328
1329
1330
1331template <class T>
1332inline void
1333AlignedVector<T>::resize_fast(const size_type new_size)
1334{
1335 const size_type old_size = size();
1336
1337 if (new_size == 0)
1338 clear();
1339 else if (new_size == old_size)
1340 {
1341 } // nothing to do here
1342 else if (new_size < old_size)
1343 {
1344 // call destructor on fields that are released, if the type requires it.
1345 // doing it backward releases the elements in reverse order as compared to
1346 // how they were created
1347 if (std::is_trivially_destructible_v<T> == false)
1348 for (T *p = used_elements_end - 1; p >= elements.get() + new_size; --p)
1349 p->~T();
1350 used_elements_end = elements.get() + new_size;
1351 }
1352 else // new_size > old_size
1353 {
1354 // Allocate more space, and claim that space as used
1355 reserve(new_size);
1356 used_elements_end = elements.get() + new_size;
1357
1358 // Leave the new array entries as-is (with undefined values) unless T's
1359 // default constructor is nontrivial (i.e., it is not a no-op)
1360 if (std::is_trivially_default_constructible_v<T> == false)
1362 new_size - old_size, elements.get() + old_size);
1363 }
1364}
1365
1366
1367
1368template <class T>
1369inline void
1370AlignedVector<T>::resize(const size_type new_size)
1371{
1372 const size_type old_size = size();
1373
1374 if (new_size == 0)
1375 clear();
1376 else if (new_size == old_size)
1377 {
1378 } // nothing to do here
1379 else if (new_size < old_size)
1380 {
1381 // call destructor on fields that are released, if the type requires it.
1382 // doing it backward releases the elements in reverse order as compared to
1383 // how they were created
1384 if (std::is_trivially_destructible_v<T> == false)
1385 for (T *p = used_elements_end - 1; p >= elements.get() + new_size; --p)
1386 p->~T();
1387 used_elements_end = elements.get() + new_size;
1388 }
1389 else // new_size > old_size
1390 {
1391 // Allocate more space, and claim that space as used
1392 reserve(new_size);
1393 used_elements_end = elements.get() + new_size;
1394
1395 // finally set the values to the default initializer
1397 new_size - old_size, elements.get() + old_size);
1398 }
1399}
1400
1401
1402
1403template <class T>
1404inline void
1405AlignedVector<T>::resize(const size_type new_size, const T &init)
1406{
1407 const size_type old_size = size();
1408
1409 if (new_size == 0)
1410 clear();
1411 else if (new_size == old_size)
1412 {
1413 } // nothing to do here
1414 else if (new_size < old_size)
1415 {
1416 // call destructor on fields that are released, if the type requires it.
1417 // doing it backward releases the elements in reverse order as compared to
1418 // how they were created
1419 if (std::is_trivially_destructible_v<T> == false)
1420 for (T *p = used_elements_end - 1; p >= elements.get() + new_size; --p)
1421 p->~T();
1422 used_elements_end = elements.get() + new_size;
1423 }
1424 else // new_size > old_size
1425 {
1426 // Allocate more space, and claim that space as used
1427 reserve(new_size);
1428 used_elements_end = elements.get() + new_size;
1429
1430 // finally set the desired init values
1432 new_size - old_size, init, elements.get() + old_size);
1433 }
1434}
1435
1436
1437
1438template <class T>
1439inline void
1440AlignedVector<T>::allocate_and_move(const size_t old_size,
1441 const size_t new_size,
1442 const size_t new_allocated_size)
1443{
1444 // allocate and align along 64-byte boundaries (this is enough for all
1445 // levels of vectorization currently supported by deal.II)
1446 T *new_data_ptr;
1447 Utilities::System::posix_memalign(reinterpret_cast<void **>(&new_data_ptr),
1448 64,
1449 new_size * sizeof(T));
1450
1451 // Now create a deleter that encodes what should happen when the object is
1452 // released: We need to destroy the objects that are currently alive (in
1453 // reverse order, and then release the memory. Note that we catch the
1454 // 'this' pointer because the number of elements currently alive might
1455 // change over time.
1456 Deleter deleter(this);
1457
1458 // copy whatever elements we need to retain
1459 if (new_allocated_size > 0)
1461 elements.get(), elements.get() + old_size, new_data_ptr);
1462
1463 // Now reset all the member variables of the current object
1464 // based on the allocation above. Assigning to a std::unique_ptr
1465 // object also releases the previously pointed to memory.
1466 //
1467 // Note that at the time of releasing the old memory, 'used_elements_end'
1468 // still points to its previous value, and this is important for the
1469 // deleter object of the previously allocated array (see how it loops over
1470 // the to-be-destroyed elements at the Deleter::DefaultDeleterAction
1471 // class).
1472 elements = decltype(elements)(new_data_ptr, std::move(deleter));
1473 used_elements_end = elements.get() + old_size;
1474 allocated_elements_end = elements.get() + new_size;
1475}
1476
1477
1478
1479template <class T>
1480inline void
1481AlignedVector<T>::reserve(const size_type new_allocated_size)
1482{
1483 const size_type old_size = used_elements_end - elements.get();
1484 const size_type old_allocated_size = allocated_elements_end - elements.get();
1485 if (new_allocated_size > old_allocated_size)
1486 {
1487 // if we continuously increase the size of the vector, we might be
1488 // reallocating a lot of times. therefore, try to increase the size more
1489 // aggressively
1490 const size_type new_size =
1491 std::max(new_allocated_size, 2 * old_allocated_size);
1492
1493 allocate_and_move(old_size, new_size, new_allocated_size);
1494 }
1495 else if (new_allocated_size == 0)
1496 clear();
1497 else // size_alloc < allocated_size
1498 {
1499 } // nothing to do here
1500}
1501
1502
1503
1504template <class T>
1505inline void
1507{
1508# ifdef DEBUG
1509 Assert(replicated_across_communicator == false,
1510 ExcAlignedVectorChangeAfterReplication());
1511# endif
1512 const size_type used_size = used_elements_end - elements.get();
1513 const size_type allocated_size = allocated_elements_end - elements.get();
1514 if (allocated_size > used_size)
1515 allocate_and_move(used_size, used_size, used_size);
1516}
1517
1518
1519
1520template <class T>
1521inline void
1523{
1524 // Just release the memory (which also calls the destructor of the elements),
1525 // and then set the auxiliary pointers to invalid values.
1526 //
1527 // Note that at the time of releasing the old memory, 'used_elements_end'
1528 // still points to its previous value, and this is important for the
1529 // deleter object of the previously allocated array (see how it loops over
1530 // the to-be-destroyed elements a few lines above).
1531 elements.reset();
1532 used_elements_end = nullptr;
1533 allocated_elements_end = nullptr;
1534}
1535
1536
1537
1538template <class T>
1539inline void
1540AlignedVector<T>::push_back(const T in_data)
1541{
1542 Assert(used_elements_end <= allocated_elements_end, ExcInternalError());
1543 if (used_elements_end == allocated_elements_end)
1544 reserve(std::max(2 * capacity(), static_cast<size_type>(16)));
1545 new (used_elements_end++) T(in_data);
1546}
1547
1548
1549
1550template <class T>
1551inline typename AlignedVector<T>::reference
1553{
1554 AssertIndexRange(0, size());
1555 T *field = used_elements_end - 1;
1556 return *field;
1557}
1558
1559
1560
1561template <class T>
1564{
1565 AssertIndexRange(0, size());
1566 const T *field = used_elements_end - 1;
1567 return *field;
1568}
1569
1570
1571
1572template <class T>
1573template <typename ForwardIterator>
1574inline void
1575AlignedVector<T>::insert_back(ForwardIterator begin, ForwardIterator end)
1576{
1577 const size_type old_size = size();
1578 reserve(old_size + (end - begin));
1579 for (; begin != end; ++begin, ++used_elements_end)
1580 new (used_elements_end) T(*begin);
1581}
1582
1583
1584
1585template <class T>
1586template <typename RandomAccessIterator, typename>
1587inline typename AlignedVector<T>::iterator
1588AlignedVector<T>::insert(const_iterator position,
1589 RandomAccessIterator begin,
1590 RandomAccessIterator end)
1591{
1592 Assert(replicated_across_communicator == false,
1593 ExcAlignedVectorChangeAfterReplication());
1594 Assert(this->begin() <= position && position <= this->end(),
1595 ExcMessage("The position iterator is not valid."));
1596 const auto offset = position - this->begin();
1597
1598 const size_type old_size = size();
1599 const size_type range_size = end - begin;
1600 const size_type new_size = old_size + range_size;
1601 if (range_size != 0)
1602 {
1603 // This is similar to allocate_and_move(), except that we need to move
1604 // whatever was before position and whatever is after it into two
1605 // different places
1606 T *new_data_ptr = nullptr;
1608 reinterpret_cast<void **>(&new_data_ptr), 64, new_size * sizeof(T));
1609
1610 // Correctly handle the case where the range is inside the present array
1611 // by creating a temporary.
1612 AlignedVector<T> temporary(begin, end);
1614 elements.get(), elements.get() + offset, new_data_ptr);
1616 temporary.begin(), temporary.end(), new_data_ptr + offset);
1618 elements.get() + offset,
1619 elements.get() + old_size,
1620 new_data_ptr + offset + range_size);
1621
1622 Deleter deleter(this);
1623 elements = decltype(elements)(new_data_ptr, std::move(deleter));
1624 used_elements_end = elements.get() + new_size;
1625 allocated_elements_end = elements.get() + new_size;
1626 }
1627 return this->begin() + offset;
1628}
1629
1630
1631
1632template <class T>
1633inline void
1635{
1637 elements.get());
1638}
1639
1640
1641
1642template <class T>
1643inline void
1645{
1647 value,
1648 elements.get());
1649}
1650
1651
1652
1653template <class T>
1654inline void
1656 const unsigned int root_process)
1657{
1658# ifdef DEAL_II_WITH_MPI
1659
1660 // Let the root process broadcast its size. If it is zero, then all
1661 // processes just clear() their memory and reset themselves to a non-shared
1662 // empty object -- there is no point to run through complicated MPI
1663 // calls if the end result is an empty array. Otherwise, we continue on.
1664 const size_type new_size =
1665 Utilities::MPI::broadcast(communicator, size(), root_process);
1666 if (new_size == 0)
1667 {
1668 clear();
1669 return;
1670 }
1671
1672
1673 // **** Step 0 ****
1674 // All but the root process no longer need their data, so release the memory
1675 // used to store the previous elements.
1676 if (Utilities::MPI::this_mpi_process(communicator) != root_process)
1677 {
1678 elements.reset();
1679 used_elements_end = nullptr;
1680 allocated_elements_end = nullptr;
1681 }
1682
1683 // **** Step 1 ****
1684 // Create communicators for each group of processes that can use
1685 // shared memory areas. Within each of these groups, we don't care about
1686 // which rank each of the old processes gets except that we would like to
1687 // make sure that the (global) root process will have rank=0 within
1688 // its own sub-communicator. We can do that through the third argument of
1689 // MPI_Comm_split_type (the "key") which is an integer meant to indicate the
1690 // order of processes within the split communicators, and we should set it to
1691 // zero for the root processes and one for all others -- which means that
1692 // for all of these other processes, MPI can choose whatever order it
1693 // wants because they have the same key (MPI then documents that these ties
1694 // will be broken according to these processes' rank in the old group).
1695 //
1696 // At least that's the theory. In practice, the MPI implementation where
1697 // this function was developed on does not seem to do that. (Bug report
1698 // is here: https://github.com/open-mpi/ompi/issues/8854)
1699 // We work around this by letting MPI_Comm_split_type choose whatever
1700 // rank it wants, and then reshuffle with MPI_Comm_split in a second
1701 // step -- not elegant, nor efficient, but seems to work:
1702 MPI_Comm shmem_group_communicator;
1703 {
1704 MPI_Comm shmem_group_communicator_temp;
1705 int ierr = MPI_Comm_split_type(communicator,
1706 MPI_COMM_TYPE_SHARED,
1707 /* key */ 0,
1708 MPI_INFO_NULL,
1709 &shmem_group_communicator_temp);
1710 AssertThrowMPI(ierr);
1711
1712 const int key =
1713 (Utilities::MPI::this_mpi_process(communicator) == root_process ? 0 : 1);
1714 ierr = MPI_Comm_split(shmem_group_communicator_temp,
1715 /* color */ 0,
1716 key,
1717 &shmem_group_communicator);
1718 AssertThrowMPI(ierr);
1719
1720 // Verify the explanation from above
1721 if (Utilities::MPI::this_mpi_process(communicator) == root_process)
1722 Assert(Utilities::MPI::this_mpi_process(shmem_group_communicator) == 0,
1724
1725 // And get rid of the temporary communicator
1726 Utilities::MPI::free_communicator(shmem_group_communicator_temp);
1727 }
1728 const bool is_shmem_root =
1729 Utilities::MPI::this_mpi_process(shmem_group_communicator) == 0;
1730
1731 // **** Step 2 ****
1732 // We then have to send the state of the current object from the
1733 // root process to one exemplar in each shmem group. To this end,
1734 // we create another subcommunicator that includes the ranks zero
1735 // of all shmem groups, and because of the trick above, we know
1736 // that this also includes the original root process.
1737 //
1738 // There are different ways of creating a "shmem_roots_communicator".
1739 // The conceptually easiest way is to create an MPI_Group that only
1740 // includes the shmem roots and then create a communicator from this
1741 // via MPI_Comm_create or MPI_Comm_create_group. The problem
1742 // with this is that we would have to exchange among all processes
1743 // which ones are shmem roots and which are not. This is awkward.
1744 //
1745 // A simpler way is to use MPI_Comm_split that uses "colors" to
1746 // indicate which sub-communicator each process wants to be in.
1747 // We use color=0 to indicate the group of shmem roots, and color=1
1748 // for all other processes -- the latter will simply not ever do
1749 // anything among themselves with the communicator so created.
1750 //
1751 // Using MPI_Comm_split has the additional benefit that, just as above,
1752 // we can choose where each rank will end up in shmem_roots_communicator.
1753 // We again set key=0 for the original root_process, and key=1 for all other
1754 // ranks; then, the global root becomes rank=0 on the
1755 // shmem_roots_communicator. We don't care how the other processes are
1756 // ordered.
1757 MPI_Comm shmem_roots_communicator;
1758 {
1759 const int key =
1760 (Utilities::MPI::this_mpi_process(communicator) == root_process ? 0 : 1);
1761
1762 const int ierr = MPI_Comm_split(communicator,
1763 /*color=*/
1764 (is_shmem_root ? 0 : 1),
1765 key,
1766 &shmem_roots_communicator);
1767 AssertThrowMPI(ierr);
1768
1769 // Again verify the explanation from above
1770 if (Utilities::MPI::this_mpi_process(communicator) == root_process)
1771 Assert(Utilities::MPI::this_mpi_process(shmem_roots_communicator) == 0,
1773 }
1774
1775 const unsigned int shmem_roots_root_rank = 0;
1776 const bool is_shmem_roots_root =
1777 (is_shmem_root && (Utilities::MPI::this_mpi_process(
1778 shmem_roots_communicator) == shmem_roots_root_rank));
1779
1780 // Now let the original root_process broadcast the current object to all
1781 // shmem roots. We know that the last rank is the original root process that
1782 // has all of the data.
1783 if (is_shmem_root)
1784 {
1785 if (std::is_trivially_copyable_v<T> == true)
1786 {
1787 // The data is trivially copyable, i.e., we can copy things directly
1788 // without having to go through the serialization/deserialization
1789 // machinery of Utilities::MPI::broadcast.
1790 //
1791 // In that case, first tell all of the other shmem roots how many
1792 // elements we will have to deal with, and let them resize their
1793 // (non-shared) arrays.
1794 const size_type new_size =
1795 Utilities::MPI::broadcast(shmem_roots_communicator,
1796 size(),
1797 shmem_roots_root_rank);
1798 if (is_shmem_roots_root == false)
1799 resize(new_size);
1800
1801 // Then directly copy from the root process into these buffers
1802 int ierr = MPI_Bcast(elements.get(),
1803 sizeof(T) * new_size,
1804 MPI_CHAR,
1805 shmem_roots_root_rank,
1806 shmem_roots_communicator);
1807 AssertThrowMPI(ierr);
1808 }
1809 else
1810 {
1811 // The objects to be sent around are not "trivial", and so we have
1812 // to go through the serialization/deserialization machinery. On all
1813 // but the sending process, overwrite the current state with the
1814 // vector just broadcast.
1815 //
1816 // On the root rank, this would lead to resetting the 'entries'
1817 // pointer, which would trigger the deleter which would lead to a
1818 // deadlock. So we just send the result of the broadcast() call to
1819 // nirvana on the root process and keep our current state.
1820 if (Utilities::MPI::this_mpi_process(shmem_roots_communicator) == 0)
1821 Utilities::MPI::broadcast(shmem_roots_communicator,
1822 *this,
1823 shmem_roots_root_rank);
1824 else
1825 *this = Utilities::MPI::broadcast(shmem_roots_communicator,
1826 *this,
1827 shmem_roots_root_rank);
1828 }
1829 }
1830
1831 // We no longer need the shmem roots communicator, so get rid of it
1832 Utilities::MPI::free_communicator(shmem_roots_communicator);
1833
1834
1835 // **** Step 3 ****
1836 // At this point, all shmem groups have one shmem root process that has
1837 // a copy of the data. This is the point where each shmem group should
1838 // establish a shmem area to put the data into. As mentioned above,
1839 // we know that the shmem roots are the last rank in their respective
1840 // shmem_group_communicator.
1841 //
1842 // The process for all of this works as follows: While all processes in
1843 // the shmem group participate in the generation of the shmem memory window,
1844 // only the shmem root actually allocates any memory -- the rest just
1845 // allocate zero bytes of their own. We allocate space for exactly
1846 // size() elements (computed on the shmem_root that already has the data)
1847 // and add however many bytes are necessary so that we know that we can align
1848 // things to 64-byte boundaries. The worst case happens if the memory system
1849 // gives us a pointer to an address one byte past a desired alignment
1850 // boundary, and in that case aligning the memory will require us to waste the
1851 // first (align_by-1) bytes. So we have to ask for
1852 // size() * sizeof(T) + (align_by - 1)
1853 // bytes.
1854 //
1855 // Before MPI 4.0, there was no way to specify that we want memory aligned to
1856 // a certain number of bytes. This is going to come back to bite us further
1857 // down below when we try to get a properly aligned pointer to our memory
1858 // region, see the commentary there. Starting with MPI 4.0, one can set a
1859 // flag in an MPI_Info structure that requests a desired alignment, so we do
1860 // this for forward compatibility; MPI implementations ignore flags they don't
1861 // know anything about, and so setting this flag is backward compatible also
1862 // to older MPI versions.
1863 MPI_Win shmem_window;
1864 void *base_ptr;
1865 const MPI_Aint align_by = 64;
1866 const MPI_Aint alloc_size =
1867 Utilities::MPI::broadcast(shmem_group_communicator,
1868 (size() * sizeof(T) + (align_by - 1)),
1869 0);
1870
1871 {
1872 int ierr;
1873
1874 MPI_Info mpi_info;
1875 ierr = MPI_Info_create(&mpi_info);
1876 AssertThrowMPI(ierr);
1877 ierr = MPI_Info_set(mpi_info,
1878 "mpi_minimum_memory_alignment",
1879 std::to_string(align_by).c_str());
1880 AssertThrowMPI(ierr);
1881 ierr = MPI_Win_allocate_shared((is_shmem_root ? alloc_size : 0),
1882 /* disp_unit = */ 1,
1883 mpi_info,
1884 shmem_group_communicator,
1885 &base_ptr,
1886 &shmem_window);
1887 AssertThrowMPI(ierr);
1888
1889 ierr = MPI_Info_free(&mpi_info);
1890 AssertThrowMPI(ierr);
1891 }
1892
1893
1894 // **** Step 4 ****
1895 // The next step is to teach all non-shmem root processes what the pointer to
1896 // the array is that the shmem-root created. MPI has a nifty way for this
1897 // given that only a single process actually allocated memory in the window:
1898 // When calling MPI_Win_shared_query, the MPI documentation says that
1899 // "When rank is MPI_PROC_NULL, the pointer, disp_unit, and size returned are
1900 // the pointer, disp_unit, and size of the memory segment belonging the lowest
1901 // rank that specified size > 0. If all processes in the group attached to the
1902 // window specified size = 0, then the call returns size = 0 and a baseptr as
1903 // if MPI_ALLOC_MEM was called with size = 0."
1904 //
1905 // This will allow us to obtain the pointer to the shmem root's memory area,
1906 // which is the only one we care about. (None of the other processes have
1907 // even allocated any memory.)
1908 //
1909 // We don't need to do this on the shmem root process: This process has
1910 // already gotten its base_ptr correctly set above, and we can determine the
1911 // array size by just calling size().
1912 if (is_shmem_root == false)
1913 {
1914 int disp_unit;
1915 MPI_Aint alloc_size; // not actually used
1916 const int ierr = MPI_Win_shared_query(
1917 shmem_window, MPI_PROC_NULL, &alloc_size, &disp_unit, &base_ptr);
1918 AssertThrowMPI(ierr);
1919
1920 // Make sure we actually got a pointer, and check that the disp_unit is
1921 // equal to 1 (as set above)
1922 Assert(base_ptr != nullptr, ExcInternalError());
1923 Assert(disp_unit == 1, ExcInternalError());
1924 }
1925
1926
1927 // **** Step 5 ****
1928 // Now that all processes know the address of the space that is visible to
1929 // everyone, we need to figure out whether it is properly aligned and if not,
1930 // find the next aligned address.
1931 //
1932 // std::align does that, but it also modifies its last two arguments. The
1933 // documentation of that function at
1934 // https://en.cppreference.com/w/cpp/memory/align is not entirely clear, but I
1935 // *think* that the following should do given that we do not use base_ptr and
1936 // available_space any further after the call to std::align.
1937 std::size_t available_space = alloc_size;
1938 void *base_ptr_backup = base_ptr;
1939 T *aligned_shmem_pointer = static_cast<T *>(
1940 std::align(align_by, new_size * sizeof(T), base_ptr, available_space));
1941 Assert(aligned_shmem_pointer != nullptr, ExcInternalError());
1942
1943 // There is one step to guard against. It is *conceivable* that the base_ptr
1944 // we have previously obtained from MPI_Win_shared_query is mapped so
1945 // awkwardly into the different MPI processes' memory spaces that it is
1946 // aligned in one memory space, but not another. In that case, different
1947 // processes would align base_ptr differently, and adjust available_space
1948 // differently. We can check that by making sure that the max (or min) over
1949 // all processes is equal to every process's value. If that's not the case,
1950 // then the whole idea of aligning above is wrong and we need to rethink what
1951 // it means to align data in a shared memory space.
1952 //
1953 // One might be tempted to think that this is not how MPI implementations
1954 // actually arrange things. Alas, when developing this functionality in 2021,
1955 // this is really how at least OpenMPI ends up doing things. (This is with an
1956 // OpenMPI implementation of MPI 3.1, so it does not support the flag we set
1957 // in the MPI_Info structure above when allocating the memory window.) Indeed,
1958 // when running this code on three processes, one ends up with base_ptr values
1959 // of
1960 // base_ptr=0x7f0842f02108
1961 // base_ptr=0x7fc0a47881d0
1962 // base_ptr=0x7f64872db108
1963 // which, most annoyingly, are aligned to 8 and 16 byte boundaries -- so there
1964 // is no common offset std::align could find that leads to a 64-byte
1965 // aligned memory address in all three memory spaces. That's a tremendous
1966 // nuisance and there is really nothing we can do about this other than just
1967 // fall back on the (unaligned) base_ptr in that case.
1968 if (Utilities::MPI::min(available_space, shmem_group_communicator) !=
1969 Utilities::MPI::max(available_space, shmem_group_communicator))
1970 aligned_shmem_pointer = static_cast<T *>(base_ptr_backup);
1971
1972
1973 // **** Step 6 ****
1974 // If this is the shmem root process, we need to copy the data into the
1975 // shared memory space.
1976 if (is_shmem_root)
1977 {
1978 if (std::is_trivially_copyable_v<T> == true)
1979 std::memcpy(aligned_shmem_pointer, elements.get(), sizeof(T) * size());
1980 else
1981 for (std::size_t i = 0; i < size(); ++i)
1982 new (&aligned_shmem_pointer[i]) T(std::move(elements[i]));
1983 }
1984
1985 // Make sure that the shared memory host has copied the data before we try to
1986 // access it.
1987 const int ierr = MPI_Barrier(shmem_group_communicator);
1988 AssertThrowMPI(ierr);
1989
1990 // **** Step 7 ****
1991 // Finally, we need to set the pointers of this object to what we just
1992 // learned. This also releases all memory that may have been in use
1993 // previously.
1994 //
1995 // The part that is a bit tricky is how to write the deleter of this
1996 // shared memory object. When we want to get rid of it, we need to
1997 // also release the MPI_Win object along with the shmem_group_communicator
1998 // object. That's because as long as we use the shared memory, we still need
1999 // to hold on to the MPI_Win object, and the MPI_Win object is based on the
2000 // communicator. (The former is definitely true, the latter is not quite clear
2001 // from the MPI documentation, but seems reasonable.) So we need to have a
2002 // deleter for the pointer that ensures that upon release of the memory, we
2003 // not only call the destructor of these memory elements (but only once, on
2004 // the shmem root!) but also destroy the MPI_Win and the communicator. All of
2005 // that is encapsulated in the following call where the deleter makes copies
2006 // of the arguments in the lambda capture.
2007 elements = decltype(elements)(aligned_shmem_pointer,
2008 Deleter(this,
2009 is_shmem_root,
2010 aligned_shmem_pointer,
2011 shmem_group_communicator,
2012 shmem_window));
2013
2014 // We then also have to set the other two pointers that define the state of
2015 // the current object. Note that the new buffer size is exactly as large as
2016 // necessary, i.e., can store size() elements, regardless of the number of
2017 // allocated elements in the original objects.
2018 used_elements_end = elements.get() + new_size;
2019 allocated_elements_end = used_elements_end;
2020
2021 // **** Consistency check ****
2022 // At this point, each process should have a copy of the data.
2023 // Verify this in some sort of round-about way
2024# ifdef DEBUG
2025 replicated_across_communicator = true;
2026 const std::vector<char> packed_data = Utilities::pack(*this);
2027 const int hash =
2028 std::accumulate(packed_data.begin(), packed_data.end(), int(0));
2029 Assert(Utilities::MPI::max(hash, communicator) == hash, ExcInternalError());
2030# endif
2031
2032# else
2033 // No MPI -> nothing to replicate
2034 (void)communicator;
2035 (void)root_process;
2036# endif
2037}
2038
2039
2040
2041template <class T>
2042inline void
2044{
2045 // Swap the data in the 'elements' objects. Then also make sure that
2046 // their respective deleter objects point to the right place.
2047 std::swap(elements, vec.elements);
2048 elements.get_deleter().reset_owning_object(this);
2049 vec.elements.get_deleter().reset_owning_object(&vec);
2050
2051 // Now also swap the remaining members.
2052 std::swap(used_elements_end, vec.used_elements_end);
2053 std::swap(allocated_elements_end, vec.allocated_elements_end);
2054}
2055
2056
2057
2058template <class T>
2059inline bool
2061{
2062 return used_elements_end == elements.get();
2063}
2064
2065
2066
2067template <class T>
2068inline typename AlignedVector<T>::size_type
2070{
2071 return used_elements_end - elements.get();
2072}
2073
2074
2075
2076template <class T>
2077inline typename AlignedVector<T>::size_type
2079{
2080 return allocated_elements_end - elements.get();
2081}
2082
2083
2084
2085template <class T>
2086inline typename AlignedVector<T>::reference
2087AlignedVector<T>::operator[](const size_type index)
2088{
2090 return elements[index];
2091}
2092
2093
2094
2095template <class T>
2097AlignedVector<T>::operator[](const size_type index) const
2098{
2100 return elements[index];
2101}
2102
2103
2104
2105template <typename T>
2106inline typename AlignedVector<T>::pointer
2108{
2109 return elements.get();
2110}
2111
2112
2113
2114template <typename T>
2115inline typename AlignedVector<T>::const_pointer
2117{
2118 return elements.get();
2119}
2120
2121
2122
2123template <class T>
2124inline typename AlignedVector<T>::iterator
2126{
2127 return elements.get();
2128}
2129
2130
2131
2132template <class T>
2133inline typename AlignedVector<T>::iterator
2135{
2136 return used_elements_end;
2137}
2138
2139
2140
2141template <class T>
2144{
2145 return elements.get();
2146}
2147
2148
2149
2150template <class T>
2153{
2154 return used_elements_end;
2155}
2156
2157
2158
2159template <class T>
2160template <class Archive>
2161inline void
2162AlignedVector<T>::save(Archive &ar, const unsigned int) const
2163{
2164 size_type vec_size = size();
2165 ar &vec_size;
2166 if (vec_size > 0)
2167 ar &boost::serialization::make_array(elements.get(), vec_size);
2168}
2169
2170
2171
2172template <class T>
2173template <class Archive>
2174inline void
2175AlignedVector<T>::load(Archive &ar, const unsigned int)
2176{
2177 size_type vec_size = 0;
2178 ar &vec_size;
2179
2180 if (vec_size > 0)
2181 {
2182 reserve(vec_size);
2183 ar &boost::serialization::make_array(elements.get(), vec_size);
2184 used_elements_end = elements.get() + vec_size;
2185 }
2186}
2187
2188
2189
2190template <class T>
2191inline typename AlignedVector<T>::size_type
2193{
2194 size_type memory = sizeof(*this);
2195 for (const T *t = elements.get(); t != used_elements_end; ++t)
2197 memory += sizeof(T) * (allocated_elements_end - used_elements_end);
2198 return memory;
2199}
2200
2201
2202#endif // ifndef DOXYGEN
2203
2204
2210template <class T>
2211bool
2213{
2214 if (lhs.size() != rhs.size())
2215 return false;
2216 for (typename AlignedVector<T>::const_iterator lit = lhs.begin(),
2217 rit = rhs.begin();
2218 lit != lhs.end();
2219 ++lit, ++rit)
2220 if (*lit != *rit)
2221 return false;
2222 return true;
2223}
2224
2225
2226
2232template <class T>
2233bool
2235{
2236 return !(operator==(lhs, rhs));
2237}
2238
2239
2241
2242#endif
bool operator==(const AlignedVector< T > &lhs, const AlignedVector< T > &rhs)
virtual void delete_array(const AlignedVector< T > *owning_aligned_vector, T *ptr)=0
virtual void delete_array(const AlignedVector< T > *aligned_vector, T *ptr)
MPISharedMemDeleterAction(const bool is_shmem_root, T *aligned_shmem_pointer, MPI_Comm shmem_group_communicator, MPI_Win shmem_window)
void operator()(T *ptr)
Deleter(AlignedVector< T > *owning_object, const bool is_shmem_root, T *aligned_shmem_pointer, MPI_Comm shmem_group_communicator, MPI_Win shmem_window)
Deleter(AlignedVector< T > *owning_object)
std::unique_ptr< DeleterActionBase > deleter_action_object
void reset_owning_object(const AlignedVector< T > *new_aligned_vector_ptr)
const AlignedVector< T > * owning_aligned_vector
iterator end()
void replicate_across_communicator(const MPI_Comm communicator, const unsigned int root_process)
size_type memory_consumption() const
void resize_fast(const size_type new_size)
std::unique_ptr< T[], Deleter > elements
reference operator[](const size_type index)
void fill(const T &element)
iterator begin()
const_iterator end() const
~AlignedVector()=default
AlignedVector(AlignedVector< T > &&vec) noexcept
void reserve(const size_type new_allocated_size)
void serialize(Archive &archive, const unsigned int version)
void shrink_to_fit()
bool operator!=(const AlignedVector< T > &lhs, const AlignedVector< T > &rhs)
const_reference operator[](const size_type index) const
pointer data()
void resize(const size_type new_size, const T &init)
AlignedVector & operator=(AlignedVector< T > &&vec) noexcept
AlignedVector(RandomAccessIterator begin, RandomAccessIterator end)
size_type capacity() const
value_type & reference
AlignedVector & operator=(const AlignedVector< T > &vec)
void swap(AlignedVector< T > &vec) noexcept
const value_type * const_pointer
bool replicated_across_communicator
void push_back(const T in_data)
iterator insert(const_iterator position, RandomAccessIterator begin, RandomAccessIterator end)
const_iterator begin() const
AlignedVector(const size_type size, const T &init=T())
bool empty() const
AlignedVector(const AlignedVector< T > &vec)
size_type size() const
const_reference back() const
std::size_t size_type
bool operator==(const AlignedVector< T > &lhs, const AlignedVector< T > &rhs)
const value_type * const_iterator
void resize(const size_type new_size)
void load(Archive &ar, const unsigned int version)
void allocate_and_move(const size_t old_size, const size_t new_size, const size_t new_allocated_size)
void save(Archive &ar, const unsigned int version) const
void insert_back(ForwardIterator begin, ForwardIterator end)
const value_type & const_reference
reference back()
const_pointer data() const
value_type * pointer
value_type * iterator
AlignedVectorCopyConstruct(RandomAccessIterator source_begin, RandomAccessIterator source_end, T *const destination)
static const std::size_t minimum_parallel_grain_size
virtual void apply_to_subrange(const std::size_t begin, const std::size_t end) const override
AlignedVectorDefaultInitialize(const std::size_t size, T *const destination)
static const std::size_t minimum_parallel_grain_size
virtual void apply_to_subrange(const std::size_t begin, const std::size_t end) const override
void default_construct_or_assign(const std::size_t begin, const std::size_t end, std::bool_constant< false >) const
void default_construct_or_assign(const std::size_t begin, const std::size_t end, std::bool_constant< true >) const
void copy_construct_or_assign(const std::size_t begin, const std::size_t end, std::bool_constant< false >) const
static const std::size_t minimum_parallel_grain_size
virtual void apply_to_subrange(const std::size_t begin, const std::size_t end) const override
void copy_construct_or_assign(const std::size_t begin, const std::size_t end, std::bool_constant< true >) const
AlignedVectorInitialize(const std::size_t size, const T &element, T *const destination)
AlignedVectorMoveConstruct(RandomAccessIterator source_begin, RandomAccessIterator source_end, T *const destination)
virtual void apply_to_subrange(const std::size_t begin, const std::size_t end) const override
static const std::size_t minimum_parallel_grain_size
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:498
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:499
#define Assert(cond, exc)
#define AssertThrowMPI(error_code)
#define AssertIndexRange(index, range)
#define DeclExceptionMsg(Exception, defaulttext)
Definition exceptions.h:489
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcMessage(std::string arg1)
static ::ExceptionBase & ExcAlignedVectorChangeAfterReplication()
std::vector< index_type > data
Definition mpi.cc:735
std::size_t size
Definition mpi.cc:734
std::enable_if_t< std::is_fundamental_v< T >, std::size_t > memory_consumption(const T &t)
VectorType::value_type * end(VectorType &V)
VectorType::value_type * begin(VectorType &V)
T max(const T &t, const MPI_Comm mpi_communicator)
T min(const T &t, const MPI_Comm mpi_communicator)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:107
void free_communicator(MPI_Comm mpi_communicator)
Definition mpi.cc:154
T broadcast(const MPI_Comm comm, const T &object_to_send, const unsigned int root_process=0)
void posix_memalign(void **memptr, std::size_t alignment, std::size_t size)
size_t pack(const T &object, std::vector< char > &dest_buffer, const bool allow_compression=true)
Definition utilities.h:1381
STL namespace.
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
void apply_parallel(const std::size_t begin, const std::size_t end, const std::size_t minimum_parallel_grain_size) const
Definition parallel.h:743