Processing math: 0%
 deal.II version GIT relicensing-3012-g95e643124d 2025-04-03 05:00:00+00:00
\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}} \newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=} \newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]} \newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages Concepts
aligned_vector.h
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2014 - 2024 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
15
16#ifndef dealii_aligned_vector_h
17#define dealii_aligned_vector_h
18
19#include <deal.II/base/config.h>
20
23#include <deal.II/base/mpi.h>
26
27// boost::serialization::make_array used to be in array.hpp, but was
28// moved to a different file in BOOST 1.64
29#include <boost/version.hpp>
30#if BOOST_VERSION >= 106400
31# include <boost/serialization/array_wrapper.hpp>
32#else
33# include <boost/serialization/array.hpp>
34#endif
35#include <boost/serialization/split_member.hpp>
36
37#include <cstring>
38#include <memory>
39#include <type_traits>
40
41
42
44
45
59template <class T>
61{
62public:
67 using value_type = T;
69 using const_pointer = const value_type *;
71 using const_iterator = const value_type *;
73 using const_reference = const value_type &;
74 using size_type = std::size_t;
75
80
89 template <
90 typename RandomAccessIterator,
91 typename = std::enable_if_t<std::is_convertible_v<
92 typename std::iterator_traits<RandomAccessIterator>::iterator_category,
93 std::random_access_iterator_tag>>>
94 AlignedVector(RandomAccessIterator begin, RandomAccessIterator end);
95
102 explicit AlignedVector(const size_type size, const T &init = T());
103
107 ~AlignedVector() = default;
108
115
121
129
135
155 void
156 resize_fast(const size_type new_size);
157
170 void
171 resize(const size_type new_size);
172
188 void
189 resize(const size_type new_size, const T &init);
190
211 void
212 reserve(const size_type new_allocated_size);
213
217 void
219
224 void
226
232 void
233 push_back(const T in_data);
234
240
245 back() const;
246
251 template <typename ForwardIterator>
252 void
253 insert_back(ForwardIterator begin, ForwardIterator end);
254
263 template <
264 typename RandomAccessIterator,
265 typename = std::enable_if_t<std::is_convertible_v<
266 typename std::iterator_traits<RandomAccessIterator>::iterator_category,
267 std::random_access_iterator_tag>>>
270 RandomAccessIterator begin,
271 RandomAccessIterator end);
272
282 void
284
293 void
294 fill(const T &element);
295
383 void
385 const unsigned int root_process);
386
390 void
391 swap(AlignedVector<T> &vec) noexcept;
392
396 bool
397 empty() const;
398
403 size() const;
404
410 capacity() const;
411
416 operator[](const size_type index);
417
422 operator[](const size_type index) const;
423
427 pointer
429
434 data() const;
435
441
447
452 begin() const;
453
458 end() const;
459
467
473 template <class Archive>
474 void
475 save(Archive &ar, const unsigned int version) const;
476
482 template <class Archive>
483 void
484 load(Archive &ar, const unsigned int version);
485
486#ifdef DOXYGEN
492 template <class Archive>
493 void
494 serialize(Archive &archive, const unsigned int version);
495#else
496 // This macro defines the serialize() method that is compatible with
497 // the templated save() and load() method that have been implemented.
498 BOOST_SERIALIZATION_SPLIT_MEMBER()
499#endif
500
508 "Changing the vector after a call to "
509 "replicate_across_communicator() is not allowed.");
510
511private:
516 void
517 allocate_and_move(const std::size_t old_size,
518 const std::size_t new_size,
519 const std::size_t new_allocated_size);
520
613 {
614 public:
620 Deleter(AlignedVector<T> *owning_object);
621
622#ifdef DEAL_II_WITH_MPI
630 Deleter(AlignedVector<T> *owning_object,
631 const bool is_shmem_root,
632 T *aligned_shmem_pointer,
633 MPI_Comm shmem_group_communicator,
634 MPI_Win shmem_window);
635#endif
636
642 void
643 operator()(T *ptr);
644
652 void
653 reset_owning_object(const AlignedVector<T> *new_aligned_vector_ptr);
654
655 private:
660 {
661 public:
665 virtual ~DeleterActionBase() = default;
666
672 virtual void
674 };
675
676#ifdef DEAL_II_WITH_MPI
677
683 {
684 public:
692 MPI_Win shmem_window);
693
699 virtual void
700 delete_array(const AlignedVector<T> *aligned_vector, T *ptr);
701
702 private:
707 const bool is_shmem_root;
711 };
712#endif
713
718 std::unique_ptr<DeleterActionBase> deleter_action_object;
719
725 };
726
730 std::unique_ptr<T[], Deleter> elements;
731
736
741
746};
747
748
749// ------------------------------- inline functions --------------------------
750
756namespace internal
757{
776 template <typename RandomAccessIterator, typename T>
779 {
780 static const std::size_t minimum_parallel_grain_size =
781 160000 / sizeof(T) + 1;
782
783 public:
793 AlignedVectorCopyConstruct(RandomAccessIterator source_begin,
794 RandomAccessIterator source_end,
795 T *const destination)
796 : source_(source_begin)
797 , destination_(destination)
798 {
799 Assert(source_end >= source_begin, ExcInternalError());
800 Assert(source_end == source_begin || destination != nullptr,
802 const std::size_t size = source_end - source_begin;
805 else
807 }
808
813 virtual void
814 apply_to_subrange(const std::size_t begin,
815 const std::size_t end) const override
816 {
817 if (end == begin)
818 return;
819
820 // We can use memcpy() with trivially copyable objects.
821 if constexpr (std::is_trivially_copyable_v<T> == true &&
822 (std::is_same_v<T *, RandomAccessIterator> ||
823 std::is_same_v<const T *, RandomAccessIterator>) == true)
824 std::memcpy(destination_ + begin,
825 source_ + begin,
826 (end - begin) * sizeof(T));
827 else
828 for (std::size_t i = begin; i < end; ++i)
829 new (&destination_[i]) T(*(source_ + i));
830 }
831
832 private:
833 RandomAccessIterator source_;
834 T *const destination_;
835 };
836
837
844 template <typename RandomAccessIterator, typename T>
847 {
848 static const std::size_t minimum_parallel_grain_size =
849 160000 / sizeof(T) + 1;
850
851 public:
861 AlignedVectorMoveConstruct(RandomAccessIterator source_begin,
862 RandomAccessIterator source_end,
863 T *const destination)
864 : source_(source_begin)
865 , destination_(destination)
866 {
867 Assert(source_end >= source_begin, ExcInternalError());
868 Assert(source_end == source_begin || destination != nullptr,
870 const std::size_t size = source_end - source_begin;
873 else
875 }
876
881 virtual void
882 apply_to_subrange(const std::size_t begin,
883 const std::size_t end) const override
884 {
885 if (end == begin)
886 return;
887
888 // We can use memcpy() with trivially copyable objects.
889 if constexpr (std::is_trivially_copyable_v<T> == true &&
890 (std::is_same_v<T *, RandomAccessIterator> ||
891 std::is_same_v<const T *, RandomAccessIterator>) == true)
892 std::memcpy(destination_ + begin,
893 source_ + begin,
894 (end - begin) * sizeof(T));
895 else
896 // For everything else just use the move constructor. The original
897 // object remains alive and will be destroyed elsewhere.
898 for (std::size_t i = begin; i < end; ++i)
899 new (&destination_[i]) T(std::move(*(source_ + i)));
900 }
901
902 private:
903 RandomAccessIterator source_;
904 T *const destination_;
905 };
906
907
925 template <typename T, bool initialize_memory>
927 {
928 static const std::size_t minimum_parallel_grain_size =
929 160000 / sizeof(T) + 1;
930
931 public:
936 AlignedVectorInitialize(const std::size_t size,
937 const T &element,
938 T *const destination)
939 : element_(element)
940 , destination_(destination)
941 , trivial_element(false)
942 {
943 if (size == 0)
944 return;
945 Assert(destination != nullptr, ExcInternalError());
946
947 // do not use memcmp() for long double because on some systems it does not
948 // completely fill its memory and may lead to false positives in e.g.
949 // valgrind
950 if constexpr (std::is_trivially_default_constructible_v<T> == true &&
951 std::is_same_v<T, long double> == false)
952 {
953 const unsigned char zero[sizeof(T)] = {};
954 if (std::memcmp(zero, &element, sizeof(T)) == 0)
955 trivial_element = true;
956 }
959 else
961 }
962
966 virtual void
967 apply_to_subrange(const std::size_t begin,
968 const std::size_t end) const override
969 {
970 // Only use memset() with types whose default constructors don't do
971 // anything.
972 if constexpr (std::is_trivially_default_constructible_v<T> == true)
973 if (trivial_element)
974 {
975 std::memset(destination_ + begin, 0, (end - begin) * sizeof(T));
976 return;
977 }
978
980 end,
981 std::bool_constant<initialize_memory>());
982 }
983
984 private:
985 const T &element_;
986 mutable T *destination_;
988
989 // copy assignment operation
990 void
991 copy_construct_or_assign(const std::size_t begin,
992 const std::size_t end,
993 std::bool_constant<false>) const
994 {
995 for (std::size_t i = begin; i < end; ++i)
997 }
998
999 // copy constructor (memory initialization)
1000 void
1001 copy_construct_or_assign(const std::size_t begin,
1002 const std::size_t end,
1003 std::bool_constant<true>) const
1004 {
1005 for (std::size_t i = begin; i < end; ++i)
1006 new (&destination_[i]) T(element_);
1007 }
1008 };
1009
1010
1011
1024 template <typename T, bool initialize_memory>
1027 {
1028 static const std::size_t minimum_parallel_grain_size =
1029 160000 / sizeof(T) + 1;
1030
1031 public:
1036 AlignedVectorDefaultInitialize(const std::size_t size, T *const destination)
1037 : destination_(destination)
1038 {
1039 if (size == 0)
1040 return;
1041 Assert(destination != nullptr, ExcInternalError());
1042
1045 else
1047 }
1048
1052 virtual void
1053 apply_to_subrange(const std::size_t begin,
1054 const std::size_t end) const override
1055 {
1056 // Only use memset() with types whose default constructors don't do
1057 // anything.
1058 if constexpr (std::is_trivially_default_constructible_v<T> == true)
1059 std::memset(destination_ + begin, 0, (end - begin) * sizeof(T));
1060 else
1062 end,
1063 std::bool_constant<initialize_memory>());
1064 }
1065
1066 private:
1067 mutable T *destination_;
1068
1069 // copy assignment operation
1070 void
1071 default_construct_or_assign(const std::size_t begin,
1072 const std::size_t end,
1073 std::bool_constant<false>) const
1074 {
1075 for (std::size_t i = begin; i < end; ++i)
1076 destination_[i] = std::move(T());
1077 }
1078
1079 // copy constructor (memory initialization)
1080 void
1081 default_construct_or_assign(const std::size_t begin,
1082 const std::size_t end,
1083 std::bool_constant<true>) const
1084 {
1085 for (std::size_t i = begin; i < end; ++i)
1086 new (&destination_[i]) T;
1087 }
1088 };
1089
1090} // end of namespace internal
1091
1092
1093#ifndef DOXYGEN
1094
1095
1096
1097template <typename T>
1099 : deleter_action_object(nullptr) // encode default action by using a nullptr
1100 , owning_aligned_vector(owning_object)
1101{}
1102
1103
1104# ifdef DEAL_II_WITH_MPI
1105
1106template <typename T>
1108 const bool is_shmem_root,
1109 T *aligned_shmem_pointer,
1110 MPI_Comm shmem_group_communicator,
1111 MPI_Win shmem_window)
1112 : deleter_action_object(
1113 std::make_unique<MPISharedMemDeleterAction>(is_shmem_root,
1114 aligned_shmem_pointer,
1115 shmem_group_communicator,
1116 shmem_window))
1117 , owning_aligned_vector(owning_object)
1118{}
1119# endif
1120
1121
1122template <typename T>
1123inline void
1125{
1126 // If no special action has been registered (i.e., if the action pointer is
1127 // nullptr), then just perform the default action right here.
1128 if (deleter_action_object == nullptr)
1129 {
1130 if (ptr != nullptr)
1131 {
1132 Assert(owning_aligned_vector->used_elements_end != nullptr,
1134
1135 if (std::is_trivially_destructible_v<T> == false)
1136 for (T *p = owning_aligned_vector->used_elements_end - 1; p >= ptr;
1137 --p)
1138 p->~T();
1139
1140 std::free(ptr);
1141 }
1142 }
1143 else
1144 // Otherwise, let the action object do what is necessary
1145 deleter_action_object->delete_array(owning_aligned_vector, ptr);
1146}
1147
1148
1149
1150template <typename T>
1151inline void
1153 const AlignedVector<T> *new_aligned_vector_ptr)
1154{
1155 owning_aligned_vector = new_aligned_vector_ptr;
1156}
1157
1158
1159# ifdef DEAL_II_WITH_MPI
1160
1161template <typename T>
1163 MPISharedMemDeleterAction(const bool is_shmem_root,
1164 T *aligned_shmem_pointer,
1165 MPI_Comm shmem_group_communicator,
1166 MPI_Win shmem_window)
1167 : is_shmem_root(is_shmem_root)
1168 , aligned_shmem_pointer(aligned_shmem_pointer)
1169 , shmem_group_communicator(shmem_group_communicator)
1170 , shmem_window(shmem_window)
1171{}
1172
1173
1174
1175template <typename T>
1176inline void
1178 const AlignedVector<T> *aligned_vector,
1179 T *ptr)
1180{
1181 (void)ptr;
1182 // It would be nice to assert that aligned_vector->elements.get() equals ptr,
1183 // but it is not guaranteed to work: clang, for example, sets elements.get()
1184 // to nullptr and then calls the deleter on a previously made copy. Hence we
1185 // must assume here that elements.get() (which is managed by the unique_ptr)
1186 // may be nullptr at this point.
1187 //
1188 // used_elements_end is a member variable of AlignedVector (i.e., we control
1189 // it, not unique_ptr) so it is still set to its correct value.
1190
1191 if (is_shmem_root)
1192 if (std::is_trivially_destructible_v<T> == false)
1193 for (T *p = aligned_vector->used_elements_end - 1; p >= ptr; --p)
1194 p->~T();
1195
1196 int ierr;
1197 ierr = MPI_Win_free(&shmem_window);
1198 AssertThrowMPI(ierr);
1199
1200 Utilities::MPI::free_communicator(shmem_group_communicator);
1201}
1202
1203# endif
1204
1205
1206template <class T>
1208 : elements(nullptr, Deleter(this))
1209 , used_elements_end(nullptr)
1210 , allocated_elements_end(nullptr)
1211 , replicated_across_communicator(false)
1212{}
1213
1214
1215
1216template <class T>
1217template <typename RandomAccessIterator, typename>
1218inline AlignedVector<T>::AlignedVector(RandomAccessIterator begin,
1219 RandomAccessIterator end)
1220 : elements(nullptr, Deleter(this))
1221 , used_elements_end(nullptr)
1222 , allocated_elements_end(nullptr)
1223 , replicated_across_communicator(false)
1224{
1225 allocate_and_move(0u, end - begin, end - begin);
1226 used_elements_end = allocated_elements_end;
1228 end,
1229 data());
1230}
1231
1232
1233template <class T>
1234inline AlignedVector<T>::AlignedVector(const size_type size, const T &init)
1235 : elements(nullptr, Deleter(this))
1236 , used_elements_end(nullptr)
1237 , allocated_elements_end(nullptr)
1238 , replicated_across_communicator(false)
1239{
1240 if (size > 0)
1241 resize(size, init);
1242}
1243
1244
1245
1246template <class T>
1248 : elements(nullptr, Deleter(this))
1249 , used_elements_end(nullptr)
1250 , allocated_elements_end(nullptr)
1251 , replicated_across_communicator(false)
1252{
1253 // copy the data from vec
1254 reserve(vec.size());
1255 used_elements_end = allocated_elements_end;
1258 elements.get());
1259}
1260
1261
1262
1263template <class T>
1266{
1267 // forward to the move operator
1268 *this = std::move(vec);
1269}
1270
1271
1272
1273template <class T>
1274inline AlignedVector<T> &
1276{
1277 const size_type new_size = vec.used_elements_end - vec.elements.get();
1278
1279 // First throw away everything and re-allocate memory but leave that
1280 // memory uninitialized for now:
1281 resize(0);
1282 reserve(new_size);
1283
1284 // Then copy the elements over by using the copy constructor on these
1285 // elements:
1288 elements.get());
1289
1290 // Finally adjust the pointer to the end of the elements that are used:
1291 used_elements_end = elements.get() + new_size;
1292
1293 return *this;
1294}
1295
1296
1297
1298template <class T>
1299inline AlignedVector<T> &
1301{
1302 clear();
1303
1304 // Move the actual data in the 'elements' object. One problem is that this
1305 // also moves the deleter object, but the deleter object
1306 // references 'this' (i.e., the 'this' pointer of the *moved-from*
1307 // object). The way this is implemented is that we have to move the
1308 // deleter as well, and then reset the pointer inside the deleter
1309 // that references the outer object.
1310 elements = std::move(vec.elements);
1311 elements.get_deleter().reset_owning_object(this);
1312
1313 // Then also steal the other pointers and clear them in the original object:
1314 used_elements_end = vec.used_elements_end;
1315 allocated_elements_end = vec.allocated_elements_end;
1316
1317 vec.used_elements_end = nullptr;
1318 vec.allocated_elements_end = nullptr;
1319
1320 return *this;
1321}
1322
1323
1324
1325template <class T>
1326inline void
1327AlignedVector<T>::resize_fast(const size_type new_size)
1328{
1329 const size_type old_size = size();
1330
1331 if (new_size == 0)
1332 clear();
1333 else if (new_size == old_size)
1334 {
1335 } // nothing to do here
1336 else if (new_size < old_size)
1337 {
1338 // call destructor on fields that are released, if the type requires it.
1339 // doing it backward releases the elements in reverse order as compared to
1340 // how they were created
1341 if (std::is_trivially_destructible_v<T> == false)
1342 for (T *p = used_elements_end - 1; p >= elements.get() + new_size; --p)
1343 p->~T();
1344 used_elements_end = elements.get() + new_size;
1345 }
1346 else // new_size > old_size
1347 {
1348 // Allocate more space, and claim that space as used
1349 reserve(new_size);
1350 used_elements_end = elements.get() + new_size;
1351
1352 // Leave the new array entries as-is (with undefined values) unless T's
1353 // default constructor is nontrivial (i.e., it is not a no-op)
1354 if (std::is_trivially_default_constructible_v<T> == false)
1356 new_size - old_size, elements.get() + old_size);
1357 }
1358}
1359
1360
1361
1362template <class T>
1363inline void
1364AlignedVector<T>::resize(const size_type new_size)
1365{
1366 const size_type old_size = size();
1367
1368 if (new_size == 0)
1369 clear();
1370 else if (new_size == old_size)
1371 {
1372 } // nothing to do here
1373 else if (new_size < old_size)
1374 {
1375 // call destructor on fields that are released, if the type requires it.
1376 // doing it backward releases the elements in reverse order as compared to
1377 // how they were created
1378 if (std::is_trivially_destructible_v<T> == false)
1379 for (T *p = used_elements_end - 1; p >= elements.get() + new_size; --p)
1380 p->~T();
1381 used_elements_end = elements.get() + new_size;
1382 }
1383 else // new_size > old_size
1384 {
1385 // Allocate more space, and claim that space as used
1386 reserve(new_size);
1387 used_elements_end = elements.get() + new_size;
1388
1389 // finally set the values to the default initializer
1391 new_size - old_size, elements.get() + old_size);
1392 }
1393}
1394
1395
1396
1397template <class T>
1398inline void
1399AlignedVector<T>::resize(const size_type new_size, const T &init)
1400{
1401 const size_type old_size = size();
1402
1403 if (new_size == 0)
1404 clear();
1405 else if (new_size == old_size)
1406 {
1407 } // nothing to do here
1408 else if (new_size < old_size)
1409 {
1410 // call destructor on fields that are released, if the type requires it.
1411 // doing it backward releases the elements in reverse order as compared to
1412 // how they were created
1413 if (std::is_trivially_destructible_v<T> == false)
1414 for (T *p = used_elements_end - 1; p >= elements.get() + new_size; --p)
1415 p->~T();
1416 used_elements_end = elements.get() + new_size;
1417 }
1418 else // new_size > old_size
1419 {
1420 // Allocate more space, and claim that space as used
1421 reserve(new_size);
1422 used_elements_end = elements.get() + new_size;
1423
1424 // finally set the desired init values
1426 new_size - old_size, init, elements.get() + old_size);
1427 }
1428}
1429
1430
1431
1432template <class T>
1433inline void
1434AlignedVector<T>::allocate_and_move(const std::size_t old_size,
1435 const std::size_t new_size,
1436 const std::size_t new_allocated_size)
1437{
1438 // allocate and align along 64-byte boundaries (this is enough for all
1439 // levels of vectorization currently supported by deal.II)
1440 T *new_data_ptr;
1441 Utilities::System::posix_memalign(reinterpret_cast<void **>(&new_data_ptr),
1442 64,
1443 new_size * sizeof(T));
1444
1445 // Now create a deleter that encodes what should happen when the object is
1446 // released: We need to destroy the objects that are currently alive (in
1447 // reverse order, and then release the memory. Note that we catch the
1448 // 'this' pointer because the number of elements currently alive might
1449 // change over time.
1450 Deleter deleter(this);
1451
1452 // copy whatever elements we need to retain
1453 if (new_allocated_size > 0)
1455 elements.get(), elements.get() + old_size, new_data_ptr);
1456
1457 // Now reset all the member variables of the current object
1458 // based on the allocation above. Assigning to a std::unique_ptr
1459 // object also releases the previously pointed to memory.
1460 //
1461 // Note that at the time of releasing the old memory, 'used_elements_end'
1462 // still points to its previous value, and this is important for the
1463 // deleter object of the previously allocated array (see how it loops over
1464 // the to-be-destroyed elements at the Deleter::DefaultDeleterAction
1465 // class).
1466 elements = decltype(elements)(new_data_ptr, std::move(deleter));
1467 used_elements_end = elements.get() + old_size;
1468 allocated_elements_end = elements.get() + new_size;
1469}
1470
1471
1472
1473template <class T>
1474inline void
1475AlignedVector<T>::reserve(const size_type new_allocated_size)
1476{
1477 const size_type old_size = used_elements_end - elements.get();
1478 const size_type old_allocated_size = allocated_elements_end - elements.get();
1479 if (new_allocated_size > old_allocated_size)
1480 {
1481 // if we continuously increase the size of the vector, we might be
1482 // reallocating a lot of times. therefore, try to increase the size more
1483 // aggressively
1484 const size_type new_size =
1485 std::max(new_allocated_size, 2 * old_allocated_size);
1486
1487 allocate_and_move(old_size, new_size, new_allocated_size);
1488 }
1489 else if (new_allocated_size == 0)
1490 clear();
1491 else // size_alloc < allocated_size
1492 {
1493 } // nothing to do here
1494}
1495
1496
1497
1498template <class T>
1499inline void
1501{
1502 if constexpr (running_in_debug_mode())
1503 {
1504 Assert(replicated_across_communicator == false,
1505 ExcAlignedVectorChangeAfterReplication());
1506 }
1507 const size_type used_size = used_elements_end - elements.get();
1508 const size_type allocated_size = allocated_elements_end - elements.get();
1509 if (allocated_size > used_size)
1510 allocate_and_move(used_size, used_size, used_size);
1511}
1512
1513
1514
1515template <class T>
1516inline void
1518{
1519 // Just release the memory (which also calls the destructor of the elements),
1520 // and then set the auxiliary pointers to invalid values.
1521 //
1522 // Note that at the time of releasing the old memory, 'used_elements_end'
1523 // still points to its previous value, and this is important for the
1524 // deleter object of the previously allocated array (see how it loops over
1525 // the to-be-destroyed elements a few lines above).
1526 elements.reset();
1527 used_elements_end = nullptr;
1528 allocated_elements_end = nullptr;
1529}
1530
1531
1532
1533template <class T>
1534inline void
1535AlignedVector<T>::push_back(const T in_data)
1536{
1537 Assert(used_elements_end <= allocated_elements_end, ExcInternalError());
1538 if (used_elements_end == allocated_elements_end)
1539 reserve(std::max(2 * capacity(), static_cast<size_type>(16)));
1540 new (used_elements_end++) T(in_data);
1541}
1542
1543
1544
1545template <class T>
1546inline typename AlignedVector<T>::reference
1548{
1549 AssertIndexRange(0, size());
1550 T *field = used_elements_end - 1;
1551 return *field;
1552}
1553
1554
1555
1556template <class T>
1559{
1560 AssertIndexRange(0, size());
1561 const T *field = used_elements_end - 1;
1562 return *field;
1563}
1564
1565
1566
1567template <class T>
1568template <typename ForwardIterator>
1569inline void
1570AlignedVector<T>::insert_back(ForwardIterator begin, ForwardIterator end)
1571{
1572 const size_type old_size = size();
1573 reserve(old_size + (end - begin));
1574 for (; begin != end; ++begin, ++used_elements_end)
1575 new (used_elements_end) T(*begin);
1576}
1577
1578
1579
1580template <class T>
1581template <typename RandomAccessIterator, typename>
1582inline typename AlignedVector<T>::iterator
1583AlignedVector<T>::insert(const_iterator position,
1584 RandomAccessIterator begin,
1585 RandomAccessIterator end)
1586{
1587 Assert(replicated_across_communicator == false,
1588 ExcAlignedVectorChangeAfterReplication());
1589 Assert(this->begin() <= position && position <= this->end(),
1590 ExcMessage("The position iterator is not valid."));
1591 const auto offset = position - this->begin();
1592
1593 const size_type old_size = size();
1594 const size_type range_size = end - begin;
1595 const size_type new_size = old_size + range_size;
1596 if (range_size != 0)
1597 {
1598 // This is similar to allocate_and_move(), except that we need to move
1599 // whatever was before position and whatever is after it into two
1600 // different places
1601 T *new_data_ptr = nullptr;
1603 reinterpret_cast<void **>(&new_data_ptr), 64, new_size * sizeof(T));
1604
1605 // Correctly handle the case where the range is inside the present array
1606 // by creating a temporary.
1607 AlignedVector<T> temporary(begin, end);
1609 elements.get(), elements.get() + offset, new_data_ptr);
1611 temporary.begin(), temporary.end(), new_data_ptr + offset);
1613 elements.get() + offset,
1614 elements.get() + old_size,
1615 new_data_ptr + offset + range_size);
1616
1617 Deleter deleter(this);
1618 elements = decltype(elements)(new_data_ptr, std::move(deleter));
1619 used_elements_end = elements.get() + new_size;
1620 allocated_elements_end = elements.get() + new_size;
1621 }
1622 return this->begin() + offset;
1623}
1624
1625
1626
1627template <class T>
1628inline void
1630{
1632 elements.get());
1633}
1634
1635
1636
1637template <class T>
1638inline void
1640{
1642 value,
1643 elements.get());
1644}
1645
1646
1647
1648template <class T>
1649inline void
1651 const unsigned int root_process)
1652{
1653# ifdef DEAL_II_WITH_MPI
1654
1655 // Let the root process broadcast its size. If it is zero, then all
1656 // processes just clear() their memory and reset themselves to a non-shared
1657 // empty object -- there is no point to run through complicated MPI
1658 // calls if the end result is an empty array. Otherwise, we continue on.
1659 const size_type new_size =
1660 Utilities::MPI::broadcast(communicator, size(), root_process);
1661 if (new_size == 0)
1662 {
1663 clear();
1664 return;
1665 }
1666
1667
1668 // **** Step 0 ****
1669 // All but the root process no longer need their data, so release the memory
1670 // used to store the previous elements.
1671 if (Utilities::MPI::this_mpi_process(communicator) != root_process)
1672 {
1673 elements.reset();
1674 used_elements_end = nullptr;
1675 allocated_elements_end = nullptr;
1676 }
1677
1678 // **** Step 1 ****
1679 // Create communicators for each group of processes that can use
1680 // shared memory areas. Within each of these groups, we don't care about
1681 // which rank each of the old processes gets except that we would like to
1682 // make sure that the (global) root process will have rank=0 within
1683 // its own sub-communicator. We can do that through the third argument of
1684 // MPI_Comm_split_type (the "key") which is an integer meant to indicate the
1685 // order of processes within the split communicators, and we should set it to
1686 // zero for the root processes and one for all others -- which means that
1687 // for all of these other processes, MPI can choose whatever order it
1688 // wants because they have the same key (MPI then documents that these ties
1689 // will be broken according to these processes' rank in the old group).
1690 //
1691 // At least that's the theory. In practice, the MPI implementation where
1692 // this function was developed on does not seem to do that. (Bug report
1693 // is here: https://github.com/open-mpi/ompi/issues/8854)
1694 // We work around this by letting MPI_Comm_split_type choose whatever
1695 // rank it wants, and then reshuffle with MPI_Comm_split in a second
1696 // step -- not elegant, nor efficient, but seems to work:
1697 MPI_Comm shmem_group_communicator;
1698 {
1699 MPI_Comm shmem_group_communicator_temp;
1700 int ierr = MPI_Comm_split_type(communicator,
1701 MPI_COMM_TYPE_SHARED,
1702 /* key */ 0,
1703 MPI_INFO_NULL,
1704 &shmem_group_communicator_temp);
1705 AssertThrowMPI(ierr);
1706
1707 const int key =
1708 (Utilities::MPI::this_mpi_process(communicator) == root_process ? 0 : 1);
1709 ierr = MPI_Comm_split(shmem_group_communicator_temp,
1710 /* color */ 0,
1711 key,
1712 &shmem_group_communicator);
1713 AssertThrowMPI(ierr);
1714
1715 // Verify the explanation from above
1716 if (Utilities::MPI::this_mpi_process(communicator) == root_process)
1717 Assert(Utilities::MPI::this_mpi_process(shmem_group_communicator) == 0,
1719
1720 // And get rid of the temporary communicator
1721 Utilities::MPI::free_communicator(shmem_group_communicator_temp);
1722 }
1723 const bool is_shmem_root =
1724 Utilities::MPI::this_mpi_process(shmem_group_communicator) == 0;
1725
1726 // **** Step 2 ****
1727 // We then have to send the state of the current object from the
1728 // root process to one exemplar in each shmem group. To this end,
1729 // we create another subcommunicator that includes the ranks zero
1730 // of all shmem groups, and because of the trick above, we know
1731 // that this also includes the original root process.
1732 //
1733 // There are different ways of creating a "shmem_roots_communicator".
1734 // The conceptually easiest way is to create an MPI_Group that only
1735 // includes the shmem roots and then create a communicator from this
1736 // via MPI_Comm_create or MPI_Comm_create_group. The problem
1737 // with this is that we would have to exchange among all processes
1738 // which ones are shmem roots and which are not. This is awkward.
1739 //
1740 // A simpler way is to use MPI_Comm_split that uses "colors" to
1741 // indicate which sub-communicator each process wants to be in.
1742 // We use color=0 to indicate the group of shmem roots, and color=1
1743 // for all other processes -- the latter will simply not ever do
1744 // anything among themselves with the communicator so created.
1745 //
1746 // Using MPI_Comm_split has the additional benefit that, just as above,
1747 // we can choose where each rank will end up in shmem_roots_communicator.
1748 // We again set key=0 for the original root_process, and key=1 for all other
1749 // ranks; then, the global root becomes rank=0 on the
1750 // shmem_roots_communicator. We don't care how the other processes are
1751 // ordered.
1752 MPI_Comm shmem_roots_communicator;
1753 {
1754 const int key =
1755 (Utilities::MPI::this_mpi_process(communicator) == root_process ? 0 : 1);
1756
1757 const int ierr = MPI_Comm_split(communicator,
1758 /*color=*/
1759 (is_shmem_root ? 0 : 1),
1760 key,
1761 &shmem_roots_communicator);
1762 AssertThrowMPI(ierr);
1763
1764 // Again verify the explanation from above
1765 if (Utilities::MPI::this_mpi_process(communicator) == root_process)
1766 Assert(Utilities::MPI::this_mpi_process(shmem_roots_communicator) == 0,
1768 }
1769
1770 const unsigned int shmem_roots_root_rank = 0;
1771 const bool is_shmem_roots_root =
1772 (is_shmem_root && (Utilities::MPI::this_mpi_process(
1773 shmem_roots_communicator) == shmem_roots_root_rank));
1774
1775 // Now let the original root_process broadcast the current object to all
1776 // shmem roots. We know that the last rank is the original root process that
1777 // has all of the data.
1778 if (is_shmem_root)
1779 {
1780 if (std::is_trivially_copyable_v<T> == true)
1781 {
1782 // The data is trivially copyable, i.e., we can copy things directly
1783 // without having to go through the serialization/deserialization
1784 // machinery of Utilities::MPI::broadcast.
1785 //
1786 // In that case, first tell all of the other shmem roots how many
1787 // elements we will have to deal with, and let them resize their
1788 // (non-shared) arrays.
1789 const size_type new_size =
1790 Utilities::MPI::broadcast(shmem_roots_communicator,
1791 size(),
1792 shmem_roots_root_rank);
1793 if (is_shmem_roots_root == false)
1794 resize(new_size);
1795
1796 // Then directly copy from the root process into these buffers
1797 int ierr = MPI_Bcast(elements.get(),
1798 sizeof(T) * new_size,
1799 MPI_CHAR,
1800 shmem_roots_root_rank,
1801 shmem_roots_communicator);
1802 AssertThrowMPI(ierr);
1803 }
1804 else
1805 {
1806 // The objects to be sent around are not "trivial", and so we have
1807 // to go through the serialization/deserialization machinery. On all
1808 // but the sending process, overwrite the current state with the
1809 // vector just broadcast.
1810 //
1811 // On the root rank, this would lead to resetting the 'entries'
1812 // pointer, which would trigger the deleter which would lead to a
1813 // deadlock. So we just send the result of the broadcast() call to
1814 // nirvana on the root process and keep our current state.
1815 if (Utilities::MPI::this_mpi_process(shmem_roots_communicator) == 0)
1816 Utilities::MPI::broadcast(shmem_roots_communicator,
1817 *this,
1818 shmem_roots_root_rank);
1819 else
1820 *this = Utilities::MPI::broadcast(shmem_roots_communicator,
1821 *this,
1822 shmem_roots_root_rank);
1823 }
1824 }
1825
1826 // We no longer need the shmem roots communicator, so get rid of it
1827 Utilities::MPI::free_communicator(shmem_roots_communicator);
1828
1829
1830 // **** Step 3 ****
1831 // At this point, all shmem groups have one shmem root process that has
1832 // a copy of the data. This is the point where each shmem group should
1833 // establish a shmem area to put the data into. As mentioned above,
1834 // we know that the shmem roots are the last rank in their respective
1835 // shmem_group_communicator.
1836 //
1837 // The process for all of this works as follows: While all processes in
1838 // the shmem group participate in the generation of the shmem memory window,
1839 // only the shmem root actually allocates any memory -- the rest just
1840 // allocate zero bytes of their own. We allocate space for exactly
1841 // size() elements (computed on the shmem_root that already has the data)
1842 // and add however many bytes are necessary so that we know that we can align
1843 // things to 64-byte boundaries. The worst case happens if the memory system
1844 // gives us a pointer to an address one byte past a desired alignment
1845 // boundary, and in that case aligning the memory will require us to waste the
1846 // first (align_by-1) bytes. So we have to ask for
1847 // size() * sizeof(T) + (align_by - 1)
1848 // bytes.
1849 //
1850 // Before MPI 4.0, there was no way to specify that we want memory aligned to
1851 // a certain number of bytes. This is going to come back to bite us further
1852 // down below when we try to get a properly aligned pointer to our memory
1853 // region, see the commentary there. Starting with MPI 4.0, one can set a
1854 // flag in an MPI_Info structure that requests a desired alignment, so we do
1855 // this for forward compatibility; MPI implementations ignore flags they don't
1856 // know anything about, and so setting this flag is backward compatible also
1857 // to older MPI versions.
1858 MPI_Win shmem_window;
1859 void *base_ptr;
1860 const MPI_Aint align_by = 64;
1861 const MPI_Aint alloc_size =
1862 Utilities::MPI::broadcast(shmem_group_communicator,
1863 (size() * sizeof(T) + (align_by - 1)),
1864 0);
1865
1866 {
1867 int ierr;
1868
1869 MPI_Info mpi_info;
1870 ierr = MPI_Info_create(&mpi_info);
1871 AssertThrowMPI(ierr);
1872 ierr = MPI_Info_set(mpi_info,
1873 "mpi_minimum_memory_alignment",
1874 std::to_string(align_by).c_str());
1875 AssertThrowMPI(ierr);
1876 ierr = MPI_Win_allocate_shared((is_shmem_root ? alloc_size : 0),
1877 /* disp_unit = */ 1,
1878 mpi_info,
1879 shmem_group_communicator,
1880 &base_ptr,
1881 &shmem_window);
1882 AssertThrowMPI(ierr);
1883
1884 ierr = MPI_Info_free(&mpi_info);
1885 AssertThrowMPI(ierr);
1886 }
1887
1888
1889 // **** Step 4 ****
1890 // The next step is to teach all non-shmem root processes what the pointer to
1891 // the array is that the shmem-root created. MPI has a nifty way for this
1892 // given that only a single process actually allocated memory in the window:
1893 // When calling MPI_Win_shared_query, the MPI documentation says that
1894 // "When rank is MPI_PROC_NULL, the pointer, disp_unit, and size returned are
1895 // the pointer, disp_unit, and size of the memory segment belonging the lowest
1896 // rank that specified size > 0. If all processes in the group attached to the
1897 // window specified size = 0, then the call returns size = 0 and a baseptr as
1898 // if MPI_ALLOC_MEM was called with size = 0."
1899 //
1900 // This will allow us to obtain the pointer to the shmem root's memory area,
1901 // which is the only one we care about. (None of the other processes have
1902 // even allocated any memory.)
1903 //
1904 // We don't need to do this on the shmem root process: This process has
1905 // already gotten its base_ptr correctly set above, and we can determine the
1906 // array size by just calling size().
1907 if (is_shmem_root == false)
1908 {
1909 int disp_unit;
1910 MPI_Aint alloc_size; // not actually used
1911 const int ierr = MPI_Win_shared_query(
1912 shmem_window, MPI_PROC_NULL, &alloc_size, &disp_unit, &base_ptr);
1913 AssertThrowMPI(ierr);
1914
1915 // Make sure we actually got a pointer, and check that the disp_unit is
1916 // equal to 1 (as set above)
1917 Assert(base_ptr != nullptr, ExcInternalError());
1918 Assert(disp_unit == 1, ExcInternalError());
1919 }
1920
1921
1922 // **** Step 5 ****
1923 // Now that all processes know the address of the space that is visible to
1924 // everyone, we need to figure out whether it is properly aligned and if not,
1925 // find the next aligned address.
1926 //
1927 // std::align does that, but it also modifies its last two arguments. The
1928 // documentation of that function at
1929 // https://en.cppreference.com/w/cpp/memory/align is not entirely clear, but I
1930 // *think* that the following should do given that we do not use base_ptr and
1931 // available_space any further after the call to std::align.
1932 std::size_t available_space = alloc_size;
1933 void *base_ptr_backup = base_ptr;
1934 T *aligned_shmem_pointer = static_cast<T *>(
1935 std::align(align_by, new_size * sizeof(T), base_ptr, available_space));
1936 Assert(aligned_shmem_pointer != nullptr, ExcInternalError());
1937
1938 // There is one step to guard against. It is *conceivable* that the base_ptr
1939 // we have previously obtained from MPI_Win_shared_query is mapped so
1940 // awkwardly into the different MPI processes' memory spaces that it is
1941 // aligned in one memory space, but not another. In that case, different
1942 // processes would align base_ptr differently, and adjust available_space
1943 // differently. We can check that by making sure that the max (or min) over
1944 // all processes is equal to every process's value. If that's not the case,
1945 // then the whole idea of aligning above is wrong and we need to rethink what
1946 // it means to align data in a shared memory space.
1947 //
1948 // One might be tempted to think that this is not how MPI implementations
1949 // actually arrange things. Alas, when developing this functionality in 2021,
1950 // this is really how at least OpenMPI ends up doing things. (This is with an
1951 // OpenMPI implementation of MPI 3.1, so it does not support the flag we set
1952 // in the MPI_Info structure above when allocating the memory window.) Indeed,
1953 // when running this code on three processes, one ends up with base_ptr values
1954 // of
1955 // base_ptr=0x7f0842f02108
1956 // base_ptr=0x7fc0a47881d0
1957 // base_ptr=0x7f64872db108
1958 // which, most annoyingly, are aligned to 8 and 16 byte boundaries -- so there
1959 // is no common offset std::align could find that leads to a 64-byte
1960 // aligned memory address in all three memory spaces. That's a tremendous
1961 // nuisance and there is really nothing we can do about this other than just
1962 // fall back on the (unaligned) base_ptr in that case.
1963 if (Utilities::MPI::min(available_space, shmem_group_communicator) !=
1964 Utilities::MPI::max(available_space, shmem_group_communicator))
1965 aligned_shmem_pointer = static_cast<T *>(base_ptr_backup);
1966
1967
1968 // **** Step 6 ****
1969 // If this is the shmem root process, we need to copy the data into the
1970 // shared memory space.
1971 if (is_shmem_root)
1972 {
1973 if (std::is_trivially_copyable_v<T> == true)
1974 std::memcpy(aligned_shmem_pointer, elements.get(), sizeof(T) * size());
1975 else
1976 for (std::size_t i = 0; i < size(); ++i)
1977 new (&aligned_shmem_pointer[i]) T(std::move(elements[i]));
1978 }
1979
1980 // Make sure that the shared memory host has copied the data before we try to
1981 // access it.
1982 const int ierr = MPI_Barrier(shmem_group_communicator);
1983 AssertThrowMPI(ierr);
1984
1985 // **** Step 7 ****
1986 // Finally, we need to set the pointers of this object to what we just
1987 // learned. This also releases all memory that may have been in use
1988 // previously.
1989 //
1990 // The part that is a bit tricky is how to write the deleter of this
1991 // shared memory object. When we want to get rid of it, we need to
1992 // also release the MPI_Win object along with the shmem_group_communicator
1993 // object. That's because as long as we use the shared memory, we still need
1994 // to hold on to the MPI_Win object, and the MPI_Win object is based on the
1995 // communicator. (The former is definitely true, the latter is not quite clear
1996 // from the MPI documentation, but seems reasonable.) So we need to have a
1997 // deleter for the pointer that ensures that upon release of the memory, we
1998 // not only call the destructor of these memory elements (but only once, on
1999 // the shmem root!) but also destroy the MPI_Win and the communicator. All of
2000 // that is encapsulated in the following call where the deleter makes copies
2001 // of the arguments in the lambda capture.
2002 elements = decltype(elements)(aligned_shmem_pointer,
2003 Deleter(this,
2004 is_shmem_root,
2005 aligned_shmem_pointer,
2006 shmem_group_communicator,
2007 shmem_window));
2008
2009 // We then also have to set the other two pointers that define the state of
2010 // the current object. Note that the new buffer size is exactly as large as
2011 // necessary, i.e., can store size() elements, regardless of the number of
2012 // allocated elements in the original objects.
2013 used_elements_end = elements.get() + new_size;
2014 allocated_elements_end = used_elements_end;
2015
2016 // **** Consistency check ****
2017 // At this point, each process should have a copy of the data.
2018 // Verify this in some sort of round-about way
2019 if constexpr (running_in_debug_mode())
2020 {
2021 replicated_across_communicator = true;
2022 const std::vector<char> packed_data = Utilities::pack(*this);
2023 const int hash =
2024 std::accumulate(packed_data.begin(), packed_data.end(), int(0));
2025 Assert(Utilities::MPI::max(hash, communicator) == hash,
2027 }
2028
2029# else
2030 // No MPI -> nothing to replicate
2031 (void)communicator;
2032 (void)root_process;
2033# endif
2034}
2035
2036
2037
2038template <class T>
2039inline void
2041{
2042 // Swap the data in the 'elements' objects. Then also make sure that
2043 // their respective deleter objects point to the right place.
2044 std::swap(elements, vec.elements);
2045 elements.get_deleter().reset_owning_object(this);
2046 vec.elements.get_deleter().reset_owning_object(&vec);
2047
2048 // Now also swap the remaining members.
2049 std::swap(used_elements_end, vec.used_elements_end);
2050 std::swap(allocated_elements_end, vec.allocated_elements_end);
2051}
2052
2053
2054
2055template <class T>
2056inline bool
2058{
2059 return used_elements_end == elements.get();
2060}
2061
2062
2063
2064template <class T>
2065inline typename AlignedVector<T>::size_type
2067{
2068 return used_elements_end - elements.get();
2069}
2070
2071
2072
2073template <class T>
2074inline typename AlignedVector<T>::size_type
2076{
2077 return allocated_elements_end - elements.get();
2078}
2079
2080
2081
2082template <class T>
2083inline typename AlignedVector<T>::reference
2084AlignedVector<T>::operator[](const size_type index)
2085{
2087 return elements[index];
2088}
2089
2090
2091
2092template <class T>
2094AlignedVector<T>::operator[](const size_type index) const
2095{
2097 return elements[index];
2098}
2099
2100
2101
2102template <typename T>
2103inline typename AlignedVector<T>::pointer
2105{
2106 return elements.get();
2107}
2108
2109
2110
2111template <typename T>
2112inline typename AlignedVector<T>::const_pointer
2114{
2115 return elements.get();
2116}
2117
2118
2119
2120template <class T>
2121inline typename AlignedVector<T>::iterator
2123{
2124 return elements.get();
2125}
2126
2127
2128
2129template <class T>
2130inline typename AlignedVector<T>::iterator
2132{
2133 return used_elements_end;
2134}
2135
2136
2137
2138template <class T>
2141{
2142 return elements.get();
2143}
2144
2145
2146
2147template <class T>
2150{
2151 return used_elements_end;
2152}
2153
2154
2155
2156template <class T>
2157template <class Archive>
2158inline void
2159AlignedVector<T>::save(Archive &ar, const unsigned int) const
2160{
2161 size_type vec_size = size();
2162 ar &vec_size;
2163 if (vec_size > 0)
2164 ar &boost::serialization::make_array(elements.get(), vec_size);
2165}
2166
2167
2168
2169template <class T>
2170template <class Archive>
2171inline void
2172AlignedVector<T>::load(Archive &ar, const unsigned int)
2173{
2174 size_type vec_size = 0;
2175 ar &vec_size;
2176
2177 if (vec_size > 0)
2178 {
2179 reserve(vec_size);
2180 ar &boost::serialization::make_array(elements.get(), vec_size);
2181 used_elements_end = elements.get() + vec_size;
2182 }
2183}
2184
2185
2186
2187template <class T>
2188inline typename AlignedVector<T>::size_type
2190{
2191 size_type memory = sizeof(*this);
2192 for (const T *t = elements.get(); t != used_elements_end; ++t)
2194 memory += sizeof(T) * (allocated_elements_end - used_elements_end);
2195 return memory;
2196}
2197
2198
2199#endif // ifndef DOXYGEN
2200
2201
2207template <class T>
2208bool
2210{
2211 if (lhs.size() != rhs.size())
2212 return false;
2213 for (typename AlignedVector<T>::const_iterator lit = lhs.begin(),
2214 rit = rhs.begin();
2215 lit != lhs.end();
2216 ++lit, ++rit)
2217 if (*lit != *rit)
2218 return false;
2219 return true;
2220}
2221
2222
2223
2229template <class T>
2230bool
2232{
2233 return !(operator==(lhs, rhs));
2234}
2235
2236
2238
2239#endif
bool operator==(const AlignedVector< T > &lhs, const AlignedVector< T > &rhs)
virtual void delete_array(const AlignedVector< T > *owning_aligned_vector, T *ptr)=0
virtual void delete_array(const AlignedVector< T > *aligned_vector, T *ptr)
MPISharedMemDeleterAction(const bool is_shmem_root, T *aligned_shmem_pointer, MPI_Comm shmem_group_communicator, MPI_Win shmem_window)
void operator()(T *ptr)
Deleter(AlignedVector< T > *owning_object, const bool is_shmem_root, T *aligned_shmem_pointer, MPI_Comm shmem_group_communicator, MPI_Win shmem_window)
Deleter(AlignedVector< T > *owning_object)
std::unique_ptr< DeleterActionBase > deleter_action_object
void reset_owning_object(const AlignedVector< T > *new_aligned_vector_ptr)
const AlignedVector< T > * owning_aligned_vector
iterator end()
void replicate_across_communicator(const MPI_Comm communicator, const unsigned int root_process)
size_type memory_consumption() const
void resize_fast(const size_type new_size)
std::unique_ptr< T[], Deleter > elements
reference operator[](const size_type index)
void fill(const T &element)
iterator begin()
const_iterator end() const
void allocate_and_move(const std::size_t old_size, const std::size_t new_size, const std::size_t new_allocated_size)
~AlignedVector()=default
AlignedVector(AlignedVector< T > &&vec) noexcept
void reserve(const size_type new_allocated_size)
void serialize(Archive &archive, const unsigned int version)
void shrink_to_fit()
bool operator!=(const AlignedVector< T > &lhs, const AlignedVector< T > &rhs)
const_reference operator[](const size_type index) const
pointer data()
void resize(const size_type new_size, const T &init)
AlignedVector & operator=(AlignedVector< T > &&vec) noexcept
AlignedVector(RandomAccessIterator begin, RandomAccessIterator end)
size_type capacity() const
value_type & reference
AlignedVector & operator=(const AlignedVector< T > &vec)
void swap(AlignedVector< T > &vec) noexcept
const value_type * const_pointer
bool replicated_across_communicator
void push_back(const T in_data)
iterator insert(const_iterator position, RandomAccessIterator begin, RandomAccessIterator end)
const_iterator begin() const
AlignedVector(const size_type size, const T &init=T())
bool empty() const
AlignedVector(const AlignedVector< T > &vec)
size_type size() const
const_reference back() const
std::size_t size_type
bool operator==(const AlignedVector< T > &lhs, const AlignedVector< T > &rhs)
const value_type * const_iterator
void resize(const size_type new_size)
void load(Archive &ar, const unsigned int version)
void save(Archive &ar, const unsigned int version) const
void insert_back(ForwardIterator begin, ForwardIterator end)
const value_type & const_reference
reference back()
const_pointer data() const
value_type * pointer
value_type * iterator
AlignedVectorCopyConstruct(RandomAccessIterator source_begin, RandomAccessIterator source_end, T *const destination)
static const std::size_t minimum_parallel_grain_size
virtual void apply_to_subrange(const std::size_t begin, const std::size_t end) const override
AlignedVectorDefaultInitialize(const std::size_t size, T *const destination)
static const std::size_t minimum_parallel_grain_size
virtual void apply_to_subrange(const std::size_t begin, const std::size_t end) const override
void default_construct_or_assign(const std::size_t begin, const std::size_t end, std::bool_constant< false >) const
void default_construct_or_assign(const std::size_t begin, const std::size_t end, std::bool_constant< true >) const
void copy_construct_or_assign(const std::size_t begin, const std::size_t end, std::bool_constant< false >) const
static const std::size_t minimum_parallel_grain_size
virtual void apply_to_subrange(const std::size_t begin, const std::size_t end) const override
void copy_construct_or_assign(const std::size_t begin, const std::size_t end, std::bool_constant< true >) const
AlignedVectorInitialize(const std::size_t size, const T &element, T *const destination)
AlignedVectorMoveConstruct(RandomAccessIterator source_begin, RandomAccessIterator source_end, T *const destination)
virtual void apply_to_subrange(const std::size_t begin, const std::size_t end) const override
static const std::size_t minimum_parallel_grain_size
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:35
constexpr bool running_in_debug_mode()
Definition config.h:73
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:36
#define Assert(cond, exc)
#define AssertThrowMPI(error_code)
#define AssertIndexRange(index, range)
#define DeclExceptionMsg(Exception, defaulttext)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcMessage(std::string arg1)
static ::ExceptionBase & ExcAlignedVectorChangeAfterReplication()
std::vector< index_type > data
Definition mpi.cc:746
std::size_t size
Definition mpi.cc:745
constexpr char T
std::enable_if_t< std::is_fundamental_v< T >, std::size_t > memory_consumption(const T &t)
VectorType::value_type * end(VectorType &V)
VectorType::value_type * begin(VectorType &V)
T max(const T &t, const MPI_Comm mpi_communicator)
T min(const T &t, const MPI_Comm mpi_communicator)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:114
void free_communicator(MPI_Comm mpi_communicator)
Definition mpi.cc:161
T broadcast(const MPI_Comm comm, const T &object_to_send, const unsigned int root_process=0)
void posix_memalign(void **memptr, std::size_t alignment, std::size_t size)
std::size_t pack(const T &object, std::vector< char > &dest_buffer, const bool allow_compression=true)
Definition utilities.h:1382
STL namespace.
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
void apply_parallel(const std::size_t begin, const std::size_t end, const std::size_t minimum_parallel_grain_size) const
Definition parallel.h:831