deal.II version GIT relicensing-2013-g7f3fb24d6c 2024-10-21 19:30:00+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
aligned_vector.h
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2014 - 2024 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
15
16#ifndef dealii_aligned_vector_h
17#define dealii_aligned_vector_h
18
19#include <deal.II/base/config.h>
20
23#include <deal.II/base/mpi.h>
26
27// boost::serialization::make_array used to be in array.hpp, but was
28// moved to a different file in BOOST 1.64
29#include <boost/version.hpp>
30#if BOOST_VERSION >= 106400
31# include <boost/serialization/array_wrapper.hpp>
32#else
33# include <boost/serialization/array.hpp>
34#endif
35#include <boost/serialization/split_member.hpp>
36
37#include <cstring>
38#include <memory>
39#include <type_traits>
40
41
42
44
45
59template <class T>
61{
62public:
67 using value_type = T;
69 using const_pointer = const value_type *;
71 using const_iterator = const value_type *;
73 using const_reference = const value_type &;
74 using size_type = std::size_t;
75
80
89 template <
90 typename RandomAccessIterator,
91 typename = std::enable_if_t<std::is_convertible_v<
92 typename std::iterator_traits<RandomAccessIterator>::iterator_category,
93 std::random_access_iterator_tag>>>
94 AlignedVector(RandomAccessIterator begin, RandomAccessIterator end);
95
102 explicit AlignedVector(const size_type size, const T &init = T());
103
107 ~AlignedVector() = default;
108
115
121
129
135
158 void
159 resize_fast(const size_type new_size);
160
173 void
174 resize(const size_type new_size);
175
191 void
192 resize(const size_type new_size, const T &init);
193
214 void
215 reserve(const size_type new_allocated_size);
216
220 void
222
227 void
229
235 void
236 push_back(const T in_data);
237
243
248 back() const;
249
254 template <typename ForwardIterator>
255 void
256 insert_back(ForwardIterator begin, ForwardIterator end);
257
266 template <
267 typename RandomAccessIterator,
268 typename = std::enable_if_t<std::is_convertible_v<
269 typename std::iterator_traits<RandomAccessIterator>::iterator_category,
270 std::random_access_iterator_tag>>>
273 RandomAccessIterator begin,
274 RandomAccessIterator end);
275
285 void
287
296 void
297 fill(const T &element);
298
386 void
388 const unsigned int root_process);
389
393 void
394 swap(AlignedVector<T> &vec) noexcept;
395
399 bool
400 empty() const;
401
406 size() const;
407
413 capacity() const;
414
419 operator[](const size_type index);
420
425 operator[](const size_type index) const;
426
430 pointer
432
437 data() const;
438
444
450
455 begin() const;
456
461 end() const;
462
470
476 template <class Archive>
477 void
478 save(Archive &ar, const unsigned int version) const;
479
485 template <class Archive>
486 void
487 load(Archive &ar, const unsigned int version);
488
489#ifdef DOXYGEN
495 template <class Archive>
496 void
497 serialize(Archive &archive, const unsigned int version);
498#else
499 // This macro defines the serialize() method that is compatible with
500 // the templated save() and load() method that have been implemented.
501 BOOST_SERIALIZATION_SPLIT_MEMBER()
502#endif
503
511 "Changing the vector after a call to "
512 "replicate_across_communicator() is not allowed.");
513
514private:
519 void
520 allocate_and_move(const size_t old_size,
521 const size_t new_size,
522 const size_t new_allocated_size);
523
616 {
617 public:
623 Deleter(AlignedVector<T> *owning_object);
624
625#ifdef DEAL_II_WITH_MPI
633 Deleter(AlignedVector<T> *owning_object,
634 const bool is_shmem_root,
635 T *aligned_shmem_pointer,
636 MPI_Comm shmem_group_communicator,
637 MPI_Win shmem_window);
638#endif
639
645 void
646 operator()(T *ptr);
647
655 void
656 reset_owning_object(const AlignedVector<T> *new_aligned_vector_ptr);
657
658 private:
663 {
664 public:
668 virtual ~DeleterActionBase() = default;
669
675 virtual void
677 };
678
679#ifdef DEAL_II_WITH_MPI
680
686 {
687 public:
695 MPI_Win shmem_window);
696
702 virtual void
703 delete_array(const AlignedVector<T> *aligned_vector, T *ptr);
704
705 private:
710 const bool is_shmem_root;
714 };
715#endif
716
721 std::unique_ptr<DeleterActionBase> deleter_action_object;
722
728 };
729
733 std::unique_ptr<T[], Deleter> elements;
734
739
744
749};
750
751
752// ------------------------------- inline functions --------------------------
753
759namespace internal
760{
779 template <typename RandomAccessIterator, typename T>
782 {
783 static const std::size_t minimum_parallel_grain_size =
784 160000 / sizeof(T) + 1;
785
786 public:
796 AlignedVectorCopyConstruct(RandomAccessIterator source_begin,
797 RandomAccessIterator source_end,
798 T *const destination)
799 : source_(source_begin)
800 , destination_(destination)
801 {
802 Assert(source_end >= source_begin, ExcInternalError());
803 Assert(source_end == source_begin || destination != nullptr,
805 const std::size_t size = source_end - source_begin;
808 else
810 }
811
816 virtual void
817 apply_to_subrange(const std::size_t begin,
818 const std::size_t end) const override
819 {
820 if (end == begin)
821 return;
822
823 // for classes trivial assignment can use memcpy.
824 if constexpr (std::is_trivial_v<T> == true &&
825 (std::is_same_v<T *, RandomAccessIterator> ||
826 std::is_same_v<const T *, RandomAccessIterator>) == true)
827 std::memcpy(destination_ + begin,
828 source_ + begin,
829 (end - begin) * sizeof(T));
830 else
831 for (std::size_t i = begin; i < end; ++i)
832 new (&destination_[i]) T(*(source_ + i));
833 }
834
835 private:
836 RandomAccessIterator source_;
837 T *const destination_;
838 };
839
840
847 template <typename RandomAccessIterator, typename T>
850 {
851 static const std::size_t minimum_parallel_grain_size =
852 160000 / sizeof(T) + 1;
853
854 public:
864 AlignedVectorMoveConstruct(RandomAccessIterator source_begin,
865 RandomAccessIterator source_end,
866 T *const destination)
867 : source_(source_begin)
868 , destination_(destination)
869 {
870 Assert(source_end >= source_begin, ExcInternalError());
871 Assert(source_end == source_begin || destination != nullptr,
873 const std::size_t size = source_end - source_begin;
876 else
878 }
879
884 virtual void
885 apply_to_subrange(const std::size_t begin,
886 const std::size_t end) const override
887 {
888 if (end == begin)
889 return;
890
891 // Classes with trivial assignment can use memcpy.
892 if constexpr (std::is_trivial_v<T> == true &&
893 (std::is_same_v<T *, RandomAccessIterator> ||
894 std::is_same_v<const T *, RandomAccessIterator>) == true)
895 std::memcpy(destination_ + begin,
896 source_ + begin,
897 (end - begin) * sizeof(T));
898 else
899 // For everything else just use the move constructor. The original
900 // object remains alive and will be destroyed elsewhere.
901 for (std::size_t i = begin; i < end; ++i)
902 new (&destination_[i]) T(std::move(*(source_ + i)));
903 }
904
905 private:
906 RandomAccessIterator source_;
907 T *const destination_;
908 };
909
910
928 template <typename T, bool initialize_memory>
930 {
931 static const std::size_t minimum_parallel_grain_size =
932 160000 / sizeof(T) + 1;
933
934 public:
939 AlignedVectorInitialize(const std::size_t size,
940 const T &element,
941 T *const destination)
942 : element_(element)
943 , destination_(destination)
944 , trivial_element(false)
945 {
946 if (size == 0)
947 return;
948 Assert(destination != nullptr, ExcInternalError());
949
950 // do not use memcmp for long double because on some systems it does not
951 // completely fill its memory and may lead to false positives in
952 // e.g. valgrind
953 if constexpr (std::is_trivial_v<T> == true &&
954 std::is_same_v<T, long double> == false)
955 {
956 const unsigned char zero[sizeof(T)] = {};
957 if (std::memcmp(zero, &element, sizeof(T)) == 0)
958 trivial_element = true;
959 }
962 else
964 }
965
969 virtual void
970 apply_to_subrange(const std::size_t begin,
971 const std::size_t end) const override
972 {
973 // Classes with trivial assignment of zero can use memset.
974 if constexpr (std::is_trivial_v<T> == true)
975 if (trivial_element)
976 {
977 std::memset(destination_ + begin, 0, (end - begin) * sizeof(T));
978 return;
979 }
980
982 end,
983 std::bool_constant<initialize_memory>());
984 }
985
986 private:
987 const T &element_;
988 mutable T *destination_;
990
991 // copy assignment operation
992 void
993 copy_construct_or_assign(const std::size_t begin,
994 const std::size_t end,
995 std::bool_constant<false>) const
996 {
997 for (std::size_t i = begin; i < end; ++i)
999 }
1000
1001 // copy constructor (memory initialization)
1002 void
1003 copy_construct_or_assign(const std::size_t begin,
1004 const std::size_t end,
1005 std::bool_constant<true>) const
1006 {
1007 for (std::size_t i = begin; i < end; ++i)
1008 new (&destination_[i]) T(element_);
1009 }
1010 };
1011
1012
1013
1026 template <typename T, bool initialize_memory>
1029 {
1030 static const std::size_t minimum_parallel_grain_size =
1031 160000 / sizeof(T) + 1;
1032
1033 public:
1038 AlignedVectorDefaultInitialize(const std::size_t size, T *const destination)
1039 : destination_(destination)
1040 {
1041 if (size == 0)
1042 return;
1043 Assert(destination != nullptr, ExcInternalError());
1044
1045 if (size < minimum_parallel_grain_size)
1047 else
1049 }
1050
1054 virtual void
1055 apply_to_subrange(const std::size_t begin,
1056 const std::size_t end) const override
1057 {
1058 // Classes with trivial assignment of zero can use memset.
1059 if constexpr (std::is_trivial_v<T> == true)
1060 std::memset(destination_ + begin, 0, (end - begin) * sizeof(T));
1061 else
1063 end,
1064 std::bool_constant<initialize_memory>());
1065 }
1066
1067 private:
1068 mutable T *destination_;
1069
1070 // copy assignment operation
1071 void
1072 default_construct_or_assign(const std::size_t begin,
1073 const std::size_t end,
1074 std::bool_constant<false>) const
1075 {
1076 for (std::size_t i = begin; i < end; ++i)
1077 destination_[i] = std::move(T());
1078 }
1079
1080 // copy constructor (memory initialization)
1081 void
1082 default_construct_or_assign(const std::size_t begin,
1083 const std::size_t end,
1084 std::bool_constant<true>) const
1085 {
1086 for (std::size_t i = begin; i < end; ++i)
1087 new (&destination_[i]) T;
1088 }
1089 };
1090
1091} // end of namespace internal
1092
1093
1094#ifndef DOXYGEN
1095
1096
1097
1098template <typename T>
1100 : deleter_action_object(nullptr) // encode default action by using a nullptr
1101 , owning_aligned_vector(owning_object)
1102{}
1103
1104
1105# ifdef DEAL_II_WITH_MPI
1106
1107template <typename T>
1109 const bool is_shmem_root,
1110 T *aligned_shmem_pointer,
1111 MPI_Comm shmem_group_communicator,
1112 MPI_Win shmem_window)
1113 : deleter_action_object(
1114 std::make_unique<MPISharedMemDeleterAction>(is_shmem_root,
1115 aligned_shmem_pointer,
1116 shmem_group_communicator,
1117 shmem_window))
1118 , owning_aligned_vector(owning_object)
1119{}
1120# endif
1121
1122
1123template <typename T>
1124inline void
1126{
1127 // If no special action has been registered (i.e., if the action pointer is
1128 // nullptr), then just perform the default action right here.
1129 if (deleter_action_object == nullptr)
1130 {
1131 if (ptr != nullptr)
1132 {
1133 Assert(owning_aligned_vector->used_elements_end != nullptr,
1135
1136 if (std::is_trivial_v<T> == false)
1137 for (T *p = owning_aligned_vector->used_elements_end - 1; p >= ptr;
1138 --p)
1139 p->~T();
1140
1141 std::free(ptr);
1142 }
1143 }
1144 else
1145 // Otherwise, let the action object do what is necessary
1146 deleter_action_object->delete_array(owning_aligned_vector, ptr);
1147}
1148
1149
1150
1151template <typename T>
1152inline void
1154 const AlignedVector<T> *new_aligned_vector_ptr)
1155{
1156 owning_aligned_vector = new_aligned_vector_ptr;
1157}
1158
1159
1160# ifdef DEAL_II_WITH_MPI
1161
1162template <typename T>
1164 MPISharedMemDeleterAction(const bool is_shmem_root,
1165 T *aligned_shmem_pointer,
1166 MPI_Comm shmem_group_communicator,
1167 MPI_Win shmem_window)
1168 : is_shmem_root(is_shmem_root)
1169 , aligned_shmem_pointer(aligned_shmem_pointer)
1170 , shmem_group_communicator(shmem_group_communicator)
1171 , shmem_window(shmem_window)
1172{}
1173
1174
1175
1176template <typename T>
1177inline void
1179 const AlignedVector<T> *aligned_vector,
1180 T *ptr)
1181{
1182 (void)ptr;
1183 // It would be nice to assert that aligned_vector->elements.get() equals ptr,
1184 // but it is not guaranteed to work: clang, for example, sets elements.get()
1185 // to nullptr and then calls the deleter on a previously made copy. Hence we
1186 // must assume here that elements.get() (which is managed by the unique_ptr)
1187 // may be nullptr at this point.
1188 //
1189 // used_elements_end is a member variable of AlignedVector (i.e., we control
1190 // it, not unique_ptr) so it is still set to its correct value.
1191
1192 if (is_shmem_root)
1193 if (std::is_trivial_v<T> == false)
1194 for (T *p = aligned_vector->used_elements_end - 1; p >= ptr; --p)
1195 p->~T();
1196
1197 int ierr;
1198 ierr = MPI_Win_free(&shmem_window);
1199 AssertThrowMPI(ierr);
1200
1201 Utilities::MPI::free_communicator(shmem_group_communicator);
1202}
1203
1204# endif
1205
1206
1207template <class T>
1209 : elements(nullptr, Deleter(this))
1210 , used_elements_end(nullptr)
1211 , allocated_elements_end(nullptr)
1212# ifdef DEBUG
1213 , replicated_across_communicator(false)
1214# endif
1215{}
1216
1217
1218
1219template <class T>
1220template <typename RandomAccessIterator, typename>
1221inline AlignedVector<T>::AlignedVector(RandomAccessIterator begin,
1222 RandomAccessIterator end)
1223 : elements(nullptr, Deleter(this))
1224 , used_elements_end(nullptr)
1225 , allocated_elements_end(nullptr)
1226 , replicated_across_communicator(false)
1227{
1228 allocate_and_move(0u, end - begin, end - begin);
1229 used_elements_end = allocated_elements_end;
1231 end,
1232 data());
1233}
1234
1235
1236template <class T>
1237inline AlignedVector<T>::AlignedVector(const size_type size, const T &init)
1238 : elements(nullptr, Deleter(this))
1239 , used_elements_end(nullptr)
1240 , allocated_elements_end(nullptr)
1241# ifdef DEBUG
1242 , replicated_across_communicator(false)
1243# endif
1244{
1245 if (size > 0)
1246 resize(size, init);
1247}
1248
1249
1250
1251template <class T>
1253 : elements(nullptr, Deleter(this))
1254 , used_elements_end(nullptr)
1255 , allocated_elements_end(nullptr)
1256# ifdef DEBUG
1257 , replicated_across_communicator(false)
1258# endif
1259{
1260 // copy the data from vec
1261 reserve(vec.size());
1262 used_elements_end = allocated_elements_end;
1265 elements.get());
1266}
1267
1268
1269
1270template <class T>
1273{
1274 // forward to the move operator
1275 *this = std::move(vec);
1276}
1277
1278
1279
1280template <class T>
1281inline AlignedVector<T> &
1283{
1284 const size_type new_size = vec.used_elements_end - vec.elements.get();
1285
1286 // First throw away everything and re-allocate memory but leave that
1287 // memory uninitialized for now:
1288 resize(0);
1289 reserve(new_size);
1290
1291 // Then copy the elements over by using the copy constructor on these
1292 // elements:
1295 elements.get());
1296
1297 // Finally adjust the pointer to the end of the elements that are used:
1298 used_elements_end = elements.get() + new_size;
1299
1300 return *this;
1301}
1302
1303
1304
1305template <class T>
1306inline AlignedVector<T> &
1308{
1309 clear();
1310
1311 // Move the actual data in the 'elements' object. One problem is that this
1312 // also moves the deleter object, but the deleter object
1313 // references 'this' (i.e., the 'this' pointer of the *moved-from*
1314 // object). The way this is implemented is that we have to move the
1315 // deleter as well, and then reset the pointer inside the deleter
1316 // that references the outer object.
1317 elements = std::move(vec.elements);
1318 elements.get_deleter().reset_owning_object(this);
1319
1320 // Then also steal the other pointers and clear them in the original object:
1321 used_elements_end = vec.used_elements_end;
1322 allocated_elements_end = vec.allocated_elements_end;
1323
1324 vec.used_elements_end = nullptr;
1325 vec.allocated_elements_end = nullptr;
1326
1327 return *this;
1328}
1329
1330
1331
1332template <class T>
1333inline void
1334AlignedVector<T>::resize_fast(const size_type new_size)
1335{
1336 const size_type old_size = size();
1337
1338 if (new_size == 0)
1339 clear();
1340 else if (new_size == old_size)
1341 {
1342 } // nothing to do here
1343 else if (new_size < old_size)
1344 {
1345 // call destructor on fields that are released, if the type requires it.
1346 // doing it backward releases the elements in reverse order as compared to
1347 // how they were created
1348 if (std::is_trivial_v<T> == false)
1349 for (T *p = used_elements_end - 1; p >= elements.get() + new_size; --p)
1350 p->~T();
1351 used_elements_end = elements.get() + new_size;
1352 }
1353 else // new_size > old_size
1354 {
1355 // Allocate more space, and claim that space as used
1356 reserve(new_size);
1357 used_elements_end = elements.get() + new_size;
1358
1359 // need to still set the values in case the class is non-trivial because
1360 // virtual classes etc. need to run their (default) constructor
1361 if (std::is_trivial_v<T> == false)
1363 new_size - old_size, elements.get() + old_size);
1364 }
1365}
1366
1367
1368
1369template <class T>
1370inline void
1371AlignedVector<T>::resize(const size_type new_size)
1372{
1373 const size_type old_size = size();
1374
1375 if (new_size == 0)
1376 clear();
1377 else if (new_size == old_size)
1378 {
1379 } // nothing to do here
1380 else if (new_size < old_size)
1381 {
1382 // call destructor on fields that are released, if the type requires it.
1383 // doing it backward releases the elements in reverse order as compared to
1384 // how they were created
1385 if (std::is_trivial_v<T> == false)
1386 for (T *p = used_elements_end - 1; p >= elements.get() + new_size; --p)
1387 p->~T();
1388 used_elements_end = elements.get() + new_size;
1389 }
1390 else // new_size > old_size
1391 {
1392 // Allocate more space, and claim that space as used
1393 reserve(new_size);
1394 used_elements_end = elements.get() + new_size;
1395
1396 // finally set the values to the default initializer
1398 new_size - old_size, elements.get() + old_size);
1399 }
1400}
1401
1402
1403
1404template <class T>
1405inline void
1406AlignedVector<T>::resize(const size_type new_size, const T &init)
1407{
1408 const size_type old_size = size();
1409
1410 if (new_size == 0)
1411 clear();
1412 else if (new_size == old_size)
1413 {
1414 } // nothing to do here
1415 else if (new_size < old_size)
1416 {
1417 // call destructor on fields that are released, if the type requires it.
1418 // doing it backward releases the elements in reverse order as compared to
1419 // how they were created
1420 if (std::is_trivial_v<T> == false)
1421 for (T *p = used_elements_end - 1; p >= elements.get() + new_size; --p)
1422 p->~T();
1423 used_elements_end = elements.get() + new_size;
1424 }
1425 else // new_size > old_size
1426 {
1427 // Allocate more space, and claim that space as used
1428 reserve(new_size);
1429 used_elements_end = elements.get() + new_size;
1430
1431 // finally set the desired init values
1433 new_size - old_size, init, elements.get() + old_size);
1434 }
1435}
1436
1437
1438
1439template <class T>
1440inline void
1441AlignedVector<T>::allocate_and_move(const size_t old_size,
1442 const size_t new_size,
1443 const size_t new_allocated_size)
1444{
1445 // allocate and align along 64-byte boundaries (this is enough for all
1446 // levels of vectorization currently supported by deal.II)
1447 T *new_data_ptr;
1448 Utilities::System::posix_memalign(reinterpret_cast<void **>(&new_data_ptr),
1449 64,
1450 new_size * sizeof(T));
1451
1452 // Now create a deleter that encodes what should happen when the object is
1453 // released: We need to destroy the objects that are currently alive (in
1454 // reverse order, and then release the memory. Note that we catch the
1455 // 'this' pointer because the number of elements currently alive might
1456 // change over time.
1457 Deleter deleter(this);
1458
1459 // copy whatever elements we need to retain
1460 if (new_allocated_size > 0)
1462 elements.get(), elements.get() + old_size, new_data_ptr);
1463
1464 // Now reset all the member variables of the current object
1465 // based on the allocation above. Assigning to a std::unique_ptr
1466 // object also releases the previously pointed to memory.
1467 //
1468 // Note that at the time of releasing the old memory, 'used_elements_end'
1469 // still points to its previous value, and this is important for the
1470 // deleter object of the previously allocated array (see how it loops over
1471 // the to-be-destroyed elements at the Deleter::DefaultDeleterAction
1472 // class).
1473 elements = decltype(elements)(new_data_ptr, std::move(deleter));
1474 used_elements_end = elements.get() + old_size;
1475 allocated_elements_end = elements.get() + new_size;
1476}
1477
1478
1479
1480template <class T>
1481inline void
1482AlignedVector<T>::reserve(const size_type new_allocated_size)
1483{
1484 const size_type old_size = used_elements_end - elements.get();
1485 const size_type old_allocated_size = allocated_elements_end - elements.get();
1486 if (new_allocated_size > old_allocated_size)
1487 {
1488 // if we continuously increase the size of the vector, we might be
1489 // reallocating a lot of times. therefore, try to increase the size more
1490 // aggressively
1491 const size_type new_size =
1492 std::max(new_allocated_size, 2 * old_allocated_size);
1493
1494 allocate_and_move(old_size, new_size, new_allocated_size);
1495 }
1496 else if (new_allocated_size == 0)
1497 clear();
1498 else // size_alloc < allocated_size
1499 {
1500 } // nothing to do here
1501}
1502
1503
1504
1505template <class T>
1506inline void
1508{
1509# ifdef DEBUG
1510 Assert(replicated_across_communicator == false,
1511 ExcAlignedVectorChangeAfterReplication());
1512# endif
1513 const size_type used_size = used_elements_end - elements.get();
1514 const size_type allocated_size = allocated_elements_end - elements.get();
1515 if (allocated_size > used_size)
1516 allocate_and_move(used_size, used_size, used_size);
1517}
1518
1519
1520
1521template <class T>
1522inline void
1524{
1525 // Just release the memory (which also calls the destructor of the elements),
1526 // and then set the auxiliary pointers to invalid values.
1527 //
1528 // Note that at the time of releasing the old memory, 'used_elements_end'
1529 // still points to its previous value, and this is important for the
1530 // deleter object of the previously allocated array (see how it loops over
1531 // the to-be-destroyed elements a few lines above).
1532 elements.reset();
1533 used_elements_end = nullptr;
1534 allocated_elements_end = nullptr;
1535}
1536
1537
1538
1539template <class T>
1540inline void
1541AlignedVector<T>::push_back(const T in_data)
1542{
1543 Assert(used_elements_end <= allocated_elements_end, ExcInternalError());
1544 if (used_elements_end == allocated_elements_end)
1545 reserve(std::max(2 * capacity(), static_cast<size_type>(16)));
1546 if (std::is_trivial_v<T> == false)
1547 new (used_elements_end++) T(in_data);
1548 else
1549 *used_elements_end++ = in_data;
1550}
1551
1552
1553
1554template <class T>
1555inline typename AlignedVector<T>::reference
1557{
1558 AssertIndexRange(0, size());
1559 T *field = used_elements_end - 1;
1560 return *field;
1561}
1562
1563
1564
1565template <class T>
1568{
1569 AssertIndexRange(0, size());
1570 const T *field = used_elements_end - 1;
1571 return *field;
1572}
1573
1574
1575
1576template <class T>
1577template <typename ForwardIterator>
1578inline void
1579AlignedVector<T>::insert_back(ForwardIterator begin, ForwardIterator end)
1580{
1581 const size_type old_size = size();
1582 reserve(old_size + (end - begin));
1583 for (; begin != end; ++begin, ++used_elements_end)
1584 {
1585 if (std::is_trivial_v<T> == false)
1586 new (used_elements_end) T;
1587 *used_elements_end = *begin;
1588 }
1589}
1590
1591
1592
1593template <class T>
1594template <typename RandomAccessIterator, typename>
1595inline typename AlignedVector<T>::iterator
1596AlignedVector<T>::insert(const_iterator position,
1597 RandomAccessIterator begin,
1598 RandomAccessIterator end)
1599{
1600 Assert(replicated_across_communicator == false,
1601 ExcAlignedVectorChangeAfterReplication());
1602 Assert(this->begin() <= position && position <= this->end(),
1603 ExcMessage("The position iterator is not valid."));
1604 const auto offset = position - this->begin();
1605
1606 const size_type old_size = size();
1607 const size_type range_size = end - begin;
1608 const size_type new_size = old_size + range_size;
1609 if (range_size != 0)
1610 {
1611 // This is similar to allocate_and_move(), except that we need to move
1612 // whatever was before position and whatever is after it into two
1613 // different places
1614 T *new_data_ptr = nullptr;
1616 reinterpret_cast<void **>(&new_data_ptr), 64, new_size * sizeof(T));
1617
1618 // Correctly handle the case where the range is inside the present array
1619 // by creating a temporary.
1620 AlignedVector<T> temporary(begin, end);
1622 elements.get(), elements.get() + offset, new_data_ptr);
1624 temporary.begin(), temporary.end(), new_data_ptr + offset);
1626 elements.get() + offset,
1627 elements.get() + old_size,
1628 new_data_ptr + offset + range_size);
1629
1630 Deleter deleter(this);
1631 elements = decltype(elements)(new_data_ptr, std::move(deleter));
1632 used_elements_end = elements.get() + new_size;
1633 allocated_elements_end = elements.get() + new_size;
1634 }
1635 return this->begin() + offset;
1636}
1637
1638
1639
1640template <class T>
1641inline void
1643{
1645 elements.get());
1646}
1647
1648
1649
1650template <class T>
1651inline void
1653{
1655 value,
1656 elements.get());
1657}
1658
1659
1660
1661template <class T>
1662inline void
1664 const unsigned int root_process)
1665{
1666# ifdef DEAL_II_WITH_MPI
1667
1668 // Let the root process broadcast its size. If it is zero, then all
1669 // processes just clear() their memory and reset themselves to a non-shared
1670 // empty object -- there is no point to run through complicated MPI
1671 // calls if the end result is an empty array. Otherwise, we continue on.
1672 const size_type new_size =
1673 Utilities::MPI::broadcast(communicator, size(), root_process);
1674 if (new_size == 0)
1675 {
1676 clear();
1677 return;
1678 }
1679
1680
1681 // **** Step 0 ****
1682 // All but the root process no longer need their data, so release the memory
1683 // used to store the previous elements.
1684 if (Utilities::MPI::this_mpi_process(communicator) != root_process)
1685 {
1686 elements.reset();
1687 used_elements_end = nullptr;
1688 allocated_elements_end = nullptr;
1689 }
1690
1691 // **** Step 1 ****
1692 // Create communicators for each group of processes that can use
1693 // shared memory areas. Within each of these groups, we don't care about
1694 // which rank each of the old processes gets except that we would like to
1695 // make sure that the (global) root process will have rank=0 within
1696 // its own sub-communicator. We can do that through the third argument of
1697 // MPI_Comm_split_type (the "key") which is an integer meant to indicate the
1698 // order of processes within the split communicators, and we should set it to
1699 // zero for the root processes and one for all others -- which means that
1700 // for all of these other processes, MPI can choose whatever order it
1701 // wants because they have the same key (MPI then documents that these ties
1702 // will be broken according to these processes' rank in the old group).
1703 //
1704 // At least that's the theory. In practice, the MPI implementation where
1705 // this function was developed on does not seem to do that. (Bug report
1706 // is here: https://github.com/open-mpi/ompi/issues/8854)
1707 // We work around this by letting MPI_Comm_split_type choose whatever
1708 // rank it wants, and then reshuffle with MPI_Comm_split in a second
1709 // step -- not elegant, nor efficient, but seems to work:
1710 MPI_Comm shmem_group_communicator;
1711 {
1712 MPI_Comm shmem_group_communicator_temp;
1713 int ierr = MPI_Comm_split_type(communicator,
1714 MPI_COMM_TYPE_SHARED,
1715 /* key */ 0,
1716 MPI_INFO_NULL,
1717 &shmem_group_communicator_temp);
1718 AssertThrowMPI(ierr);
1719
1720 const int key =
1721 (Utilities::MPI::this_mpi_process(communicator) == root_process ? 0 : 1);
1722 ierr = MPI_Comm_split(shmem_group_communicator_temp,
1723 /* color */ 0,
1724 key,
1725 &shmem_group_communicator);
1726 AssertThrowMPI(ierr);
1727
1728 // Verify the explanation from above
1729 if (Utilities::MPI::this_mpi_process(communicator) == root_process)
1730 Assert(Utilities::MPI::this_mpi_process(shmem_group_communicator) == 0,
1732
1733 // And get rid of the temporary communicator
1734 Utilities::MPI::free_communicator(shmem_group_communicator_temp);
1735 }
1736 const bool is_shmem_root =
1737 Utilities::MPI::this_mpi_process(shmem_group_communicator) == 0;
1738
1739 // **** Step 2 ****
1740 // We then have to send the state of the current object from the
1741 // root process to one exemplar in each shmem group. To this end,
1742 // we create another subcommunicator that includes the ranks zero
1743 // of all shmem groups, and because of the trick above, we know
1744 // that this also includes the original root process.
1745 //
1746 // There are different ways of creating a "shmem_roots_communicator".
1747 // The conceptually easiest way is to create an MPI_Group that only
1748 // includes the shmem roots and then create a communicator from this
1749 // via MPI_Comm_create or MPI_Comm_create_group. The problem
1750 // with this is that we would have to exchange among all processes
1751 // which ones are shmem roots and which are not. This is awkward.
1752 //
1753 // A simpler way is to use MPI_Comm_split that uses "colors" to
1754 // indicate which sub-communicator each process wants to be in.
1755 // We use color=0 to indicate the group of shmem roots, and color=1
1756 // for all other processes -- the latter will simply not ever do
1757 // anything among themselves with the communicator so created.
1758 //
1759 // Using MPI_Comm_split has the additional benefit that, just as above,
1760 // we can choose where each rank will end up in shmem_roots_communicator.
1761 // We again set key=0 for the original root_process, and key=1 for all other
1762 // ranks; then, the global root becomes rank=0 on the
1763 // shmem_roots_communicator. We don't care how the other processes are
1764 // ordered.
1765 MPI_Comm shmem_roots_communicator;
1766 {
1767 const int key =
1768 (Utilities::MPI::this_mpi_process(communicator) == root_process ? 0 : 1);
1769
1770 const int ierr = MPI_Comm_split(communicator,
1771 /*color=*/
1772 (is_shmem_root ? 0 : 1),
1773 key,
1774 &shmem_roots_communicator);
1775 AssertThrowMPI(ierr);
1776
1777 // Again verify the explanation from above
1778 if (Utilities::MPI::this_mpi_process(communicator) == root_process)
1779 Assert(Utilities::MPI::this_mpi_process(shmem_roots_communicator) == 0,
1781 }
1782
1783 const unsigned int shmem_roots_root_rank = 0;
1784 const bool is_shmem_roots_root =
1785 (is_shmem_root && (Utilities::MPI::this_mpi_process(
1786 shmem_roots_communicator) == shmem_roots_root_rank));
1787
1788 // Now let the original root_process broadcast the current object to all
1789 // shmem roots. We know that the last rank is the original root process that
1790 // has all of the data.
1791 if (is_shmem_root)
1792 {
1793 if (std::is_trivial_v<T>)
1794 {
1795 // The data is "trivial", i.e., we can copy things directly without
1796 // having to go through the serialization/deserialization machinery of
1797 // Utilities::MPI::broadcast.
1798 //
1799 // In that case, first tell all of the other shmem roots how many
1800 // elements we will have to deal with, and let them resize their
1801 // (non-shared) arrays.
1802 const size_type new_size =
1803 Utilities::MPI::broadcast(shmem_roots_communicator,
1804 size(),
1805 shmem_roots_root_rank);
1806 if (is_shmem_roots_root == false)
1807 resize(new_size);
1808
1809 // Then directly copy from the root process into these buffers
1810 int ierr = MPI_Bcast(elements.get(),
1811 sizeof(T) * new_size,
1812 MPI_CHAR,
1813 shmem_roots_root_rank,
1814 shmem_roots_communicator);
1815 AssertThrowMPI(ierr);
1816 }
1817 else
1818 {
1819 // The objects to be sent around are not "trivial", and so we have
1820 // to go through the serialization/deserialization machinery. On all
1821 // but the sending process, overwrite the current state with the
1822 // vector just broadcast.
1823 //
1824 // On the root rank, this would lead to resetting the 'entries'
1825 // pointer, which would trigger the deleter which would lead to a
1826 // deadlock. So we just send the result of the broadcast() call to
1827 // nirvana on the root process and keep our current state.
1828 if (Utilities::MPI::this_mpi_process(shmem_roots_communicator) == 0)
1829 Utilities::MPI::broadcast(shmem_roots_communicator,
1830 *this,
1831 shmem_roots_root_rank);
1832 else
1833 *this = Utilities::MPI::broadcast(shmem_roots_communicator,
1834 *this,
1835 shmem_roots_root_rank);
1836 }
1837 }
1838
1839 // We no longer need the shmem roots communicator, so get rid of it
1840 Utilities::MPI::free_communicator(shmem_roots_communicator);
1841
1842
1843 // **** Step 3 ****
1844 // At this point, all shmem groups have one shmem root process that has
1845 // a copy of the data. This is the point where each shmem group should
1846 // establish a shmem area to put the data into. As mentioned above,
1847 // we know that the shmem roots are the last rank in their respective
1848 // shmem_group_communicator.
1849 //
1850 // The process for all of this works as follows: While all processes in
1851 // the shmem group participate in the generation of the shmem memory window,
1852 // only the shmem root actually allocates any memory -- the rest just
1853 // allocate zero bytes of their own. We allocate space for exactly
1854 // size() elements (computed on the shmem_root that already has the data)
1855 // and add however many bytes are necessary so that we know that we can align
1856 // things to 64-byte boundaries. The worst case happens if the memory system
1857 // gives us a pointer to an address one byte past a desired alignment
1858 // boundary, and in that case aligning the memory will require us to waste the
1859 // first (align_by-1) bytes. So we have to ask for
1860 // size() * sizeof(T) + (align_by - 1)
1861 // bytes.
1862 //
1863 // Before MPI 4.0, there was no way to specify that we want memory aligned to
1864 // a certain number of bytes. This is going to come back to bite us further
1865 // down below when we try to get a properly aligned pointer to our memory
1866 // region, see the commentary there. Starting with MPI 4.0, one can set a
1867 // flag in an MPI_Info structure that requests a desired alignment, so we do
1868 // this for forward compatibility; MPI implementations ignore flags they don't
1869 // know anything about, and so setting this flag is backward compatible also
1870 // to older MPI versions.
1871 MPI_Win shmem_window;
1872 void *base_ptr;
1873 const MPI_Aint align_by = 64;
1874 const MPI_Aint alloc_size =
1875 Utilities::MPI::broadcast(shmem_group_communicator,
1876 (size() * sizeof(T) + (align_by - 1)),
1877 0);
1878
1879 {
1880 int ierr;
1881
1882 MPI_Info mpi_info;
1883 ierr = MPI_Info_create(&mpi_info);
1884 AssertThrowMPI(ierr);
1885 ierr = MPI_Info_set(mpi_info,
1886 "mpi_minimum_memory_alignment",
1887 std::to_string(align_by).c_str());
1888 AssertThrowMPI(ierr);
1889 ierr = MPI_Win_allocate_shared((is_shmem_root ? alloc_size : 0),
1890 /* disp_unit = */ 1,
1891 mpi_info,
1892 shmem_group_communicator,
1893 &base_ptr,
1894 &shmem_window);
1895 AssertThrowMPI(ierr);
1896
1897 ierr = MPI_Info_free(&mpi_info);
1898 AssertThrowMPI(ierr);
1899 }
1900
1901
1902 // **** Step 4 ****
1903 // The next step is to teach all non-shmem root processes what the pointer to
1904 // the array is that the shmem-root created. MPI has a nifty way for this
1905 // given that only a single process actually allocated memory in the window:
1906 // When calling MPI_Win_shared_query, the MPI documentation says that
1907 // "When rank is MPI_PROC_NULL, the pointer, disp_unit, and size returned are
1908 // the pointer, disp_unit, and size of the memory segment belonging the lowest
1909 // rank that specified size > 0. If all processes in the group attached to the
1910 // window specified size = 0, then the call returns size = 0 and a baseptr as
1911 // if MPI_ALLOC_MEM was called with size = 0."
1912 //
1913 // This will allow us to obtain the pointer to the shmem root's memory area,
1914 // which is the only one we care about. (None of the other processes have
1915 // even allocated any memory.)
1916 //
1917 // We don't need to do this on the shmem root process: This process has
1918 // already gotten its base_ptr correctly set above, and we can determine the
1919 // array size by just calling size().
1920 if (is_shmem_root == false)
1921 {
1922 int disp_unit;
1923 MPI_Aint alloc_size; // not actually used
1924 const int ierr = MPI_Win_shared_query(
1925 shmem_window, MPI_PROC_NULL, &alloc_size, &disp_unit, &base_ptr);
1926 AssertThrowMPI(ierr);
1927
1928 // Make sure we actually got a pointer, and check that the disp_unit is
1929 // equal to 1 (as set above)
1930 Assert(base_ptr != nullptr, ExcInternalError());
1931 Assert(disp_unit == 1, ExcInternalError());
1932 }
1933
1934
1935 // **** Step 5 ****
1936 // Now that all processes know the address of the space that is visible to
1937 // everyone, we need to figure out whether it is properly aligned and if not,
1938 // find the next aligned address.
1939 //
1940 // std::align does that, but it also modifies its last two arguments. The
1941 // documentation of that function at
1942 // https://en.cppreference.com/w/cpp/memory/align is not entirely clear, but I
1943 // *think* that the following should do given that we do not use base_ptr and
1944 // available_space any further after the call to std::align.
1945 std::size_t available_space = alloc_size;
1946 void *base_ptr_backup = base_ptr;
1947 T *aligned_shmem_pointer = static_cast<T *>(
1948 std::align(align_by, new_size * sizeof(T), base_ptr, available_space));
1949 Assert(aligned_shmem_pointer != nullptr, ExcInternalError());
1950
1951 // There is one step to guard against. It is *conceivable* that the base_ptr
1952 // we have previously obtained from MPI_Win_shared_query is mapped so
1953 // awkwardly into the different MPI processes' memory spaces that it is
1954 // aligned in one memory space, but not another. In that case, different
1955 // processes would align base_ptr differently, and adjust available_space
1956 // differently. We can check that by making sure that the max (or min) over
1957 // all processes is equal to every process's value. If that's not the case,
1958 // then the whole idea of aligning above is wrong and we need to rethink what
1959 // it means to align data in a shared memory space.
1960 //
1961 // One might be tempted to think that this is not how MPI implementations
1962 // actually arrange things. Alas, when developing this functionality in 2021,
1963 // this is really how at least OpenMPI ends up doing things. (This is with an
1964 // OpenMPI implementation of MPI 3.1, so it does not support the flag we set
1965 // in the MPI_Info structure above when allocating the memory window.) Indeed,
1966 // when running this code on three processes, one ends up with base_ptr values
1967 // of
1968 // base_ptr=0x7f0842f02108
1969 // base_ptr=0x7fc0a47881d0
1970 // base_ptr=0x7f64872db108
1971 // which, most annoyingly, are aligned to 8 and 16 byte boundaries -- so there
1972 // is no common offset std::align could find that leads to a 64-byte
1973 // aligned memory address in all three memory spaces. That's a tremendous
1974 // nuisance and there is really nothing we can do about this other than just
1975 // fall back on the (unaligned) base_ptr in that case.
1976 if (Utilities::MPI::min(available_space, shmem_group_communicator) !=
1977 Utilities::MPI::max(available_space, shmem_group_communicator))
1978 aligned_shmem_pointer = static_cast<T *>(base_ptr_backup);
1979
1980
1981 // **** Step 6 ****
1982 // If this is the shmem root process, we need to copy the data into the
1983 // shared memory space.
1984 if (is_shmem_root)
1985 {
1986 if (std::is_trivial_v<T> == true)
1987 std::memcpy(aligned_shmem_pointer, elements.get(), sizeof(T) * size());
1988 else
1989 for (std::size_t i = 0; i < size(); ++i)
1990 new (&aligned_shmem_pointer[i]) T(std::move(elements[i]));
1991 }
1992
1993 // Make sure that the shared memory host has copied the data before we try to
1994 // access it.
1995 const int ierr = MPI_Barrier(shmem_group_communicator);
1996 AssertThrowMPI(ierr);
1997
1998 // **** Step 7 ****
1999 // Finally, we need to set the pointers of this object to what we just
2000 // learned. This also releases all memory that may have been in use
2001 // previously.
2002 //
2003 // The part that is a bit tricky is how to write the deleter of this
2004 // shared memory object. When we want to get rid of it, we need to
2005 // also release the MPI_Win object along with the shmem_group_communicator
2006 // object. That's because as long as we use the shared memory, we still need
2007 // to hold on to the MPI_Win object, and the MPI_Win object is based on the
2008 // communicator. (The former is definitely true, the latter is not quite clear
2009 // from the MPI documentation, but seems reasonable.) So we need to have a
2010 // deleter for the pointer that ensures that upon release of the memory, we
2011 // not only call the destructor of these memory elements (but only once, on
2012 // the shmem root!) but also destroy the MPI_Win and the communicator. All of
2013 // that is encapsulated in the following call where the deleter makes copies
2014 // of the arguments in the lambda capture.
2015 elements = decltype(elements)(aligned_shmem_pointer,
2016 Deleter(this,
2017 is_shmem_root,
2018 aligned_shmem_pointer,
2019 shmem_group_communicator,
2020 shmem_window));
2021
2022 // We then also have to set the other two pointers that define the state of
2023 // the current object. Note that the new buffer size is exactly as large as
2024 // necessary, i.e., can store size() elements, regardless of the number of
2025 // allocated elements in the original objects.
2026 used_elements_end = elements.get() + new_size;
2027 allocated_elements_end = used_elements_end;
2028
2029 // **** Consistency check ****
2030 // At this point, each process should have a copy of the data.
2031 // Verify this in some sort of round-about way
2032# ifdef DEBUG
2033 replicated_across_communicator = true;
2034 const std::vector<char> packed_data = Utilities::pack(*this);
2035 const int hash =
2036 std::accumulate(packed_data.begin(), packed_data.end(), int(0));
2037 Assert(Utilities::MPI::max(hash, communicator) == hash, ExcInternalError());
2038# endif
2039
2040# else
2041 // No MPI -> nothing to replicate
2042 (void)communicator;
2043 (void)root_process;
2044# endif
2045}
2046
2047
2048
2049template <class T>
2050inline void
2052{
2053 // Swap the data in the 'elements' objects. Then also make sure that
2054 // their respective deleter objects point to the right place.
2055 std::swap(elements, vec.elements);
2056 elements.get_deleter().reset_owning_object(this);
2057 vec.elements.get_deleter().reset_owning_object(&vec);
2058
2059 // Now also swap the remaining members.
2060 std::swap(used_elements_end, vec.used_elements_end);
2061 std::swap(allocated_elements_end, vec.allocated_elements_end);
2062}
2063
2064
2065
2066template <class T>
2067inline bool
2069{
2070 return used_elements_end == elements.get();
2071}
2072
2073
2074
2075template <class T>
2076inline typename AlignedVector<T>::size_type
2078{
2079 return used_elements_end - elements.get();
2080}
2081
2082
2083
2084template <class T>
2085inline typename AlignedVector<T>::size_type
2087{
2088 return allocated_elements_end - elements.get();
2089}
2090
2091
2092
2093template <class T>
2094inline typename AlignedVector<T>::reference
2095AlignedVector<T>::operator[](const size_type index)
2096{
2097 AssertIndexRange(index, size());
2098 return elements[index];
2099}
2100
2101
2102
2103template <class T>
2105AlignedVector<T>::operator[](const size_type index) const
2106{
2107 AssertIndexRange(index, size());
2108 return elements[index];
2109}
2110
2111
2112
2113template <typename T>
2114inline typename AlignedVector<T>::pointer
2116{
2117 return elements.get();
2118}
2119
2120
2121
2122template <typename T>
2123inline typename AlignedVector<T>::const_pointer
2125{
2126 return elements.get();
2127}
2128
2129
2130
2131template <class T>
2132inline typename AlignedVector<T>::iterator
2134{
2135 return elements.get();
2136}
2137
2138
2139
2140template <class T>
2141inline typename AlignedVector<T>::iterator
2143{
2144 return used_elements_end;
2145}
2146
2147
2148
2149template <class T>
2152{
2153 return elements.get();
2154}
2155
2156
2157
2158template <class T>
2161{
2162 return used_elements_end;
2163}
2164
2165
2166
2167template <class T>
2168template <class Archive>
2169inline void
2170AlignedVector<T>::save(Archive &ar, const unsigned int) const
2171{
2172 size_type vec_size = size();
2173 ar &vec_size;
2174 if (vec_size > 0)
2175 ar &boost::serialization::make_array(elements.get(), vec_size);
2176}
2177
2178
2179
2180template <class T>
2181template <class Archive>
2182inline void
2183AlignedVector<T>::load(Archive &ar, const unsigned int)
2184{
2185 size_type vec_size = 0;
2186 ar &vec_size;
2187
2188 if (vec_size > 0)
2189 {
2190 reserve(vec_size);
2191 ar &boost::serialization::make_array(elements.get(), vec_size);
2192 used_elements_end = elements.get() + vec_size;
2193 }
2194}
2195
2196
2197
2198template <class T>
2199inline typename AlignedVector<T>::size_type
2201{
2202 size_type memory = sizeof(*this);
2203 for (const T *t = elements.get(); t != used_elements_end; ++t)
2205 memory += sizeof(T) * (allocated_elements_end - used_elements_end);
2206 return memory;
2207}
2208
2209
2210#endif // ifndef DOXYGEN
2211
2212
2218template <class T>
2219bool
2221{
2222 if (lhs.size() != rhs.size())
2223 return false;
2224 for (typename AlignedVector<T>::const_iterator lit = lhs.begin(),
2225 rit = rhs.begin();
2226 lit != lhs.end();
2227 ++lit, ++rit)
2228 if (*lit != *rit)
2229 return false;
2230 return true;
2231}
2232
2233
2234
2240template <class T>
2241bool
2243{
2244 return !(operator==(lhs, rhs));
2245}
2246
2247
2249
2250#endif
bool operator==(const AlignedVector< T > &lhs, const AlignedVector< T > &rhs)
virtual void delete_array(const AlignedVector< T > *owning_aligned_vector, T *ptr)=0
virtual void delete_array(const AlignedVector< T > *aligned_vector, T *ptr)
MPISharedMemDeleterAction(const bool is_shmem_root, T *aligned_shmem_pointer, MPI_Comm shmem_group_communicator, MPI_Win shmem_window)
void operator()(T *ptr)
Deleter(AlignedVector< T > *owning_object, const bool is_shmem_root, T *aligned_shmem_pointer, MPI_Comm shmem_group_communicator, MPI_Win shmem_window)
Deleter(AlignedVector< T > *owning_object)
std::unique_ptr< DeleterActionBase > deleter_action_object
void reset_owning_object(const AlignedVector< T > *new_aligned_vector_ptr)
const AlignedVector< T > * owning_aligned_vector
iterator end()
void replicate_across_communicator(const MPI_Comm communicator, const unsigned int root_process)
size_type memory_consumption() const
void resize_fast(const size_type new_size)
std::unique_ptr< T[], Deleter > elements
reference operator[](const size_type index)
void fill(const T &element)
iterator begin()
const_iterator end() const
~AlignedVector()=default
AlignedVector(AlignedVector< T > &&vec) noexcept
void reserve(const size_type new_allocated_size)
void serialize(Archive &archive, const unsigned int version)
void shrink_to_fit()
bool operator!=(const AlignedVector< T > &lhs, const AlignedVector< T > &rhs)
const_reference operator[](const size_type index) const
pointer data()
void resize(const size_type new_size, const T &init)
AlignedVector & operator=(AlignedVector< T > &&vec) noexcept
AlignedVector(RandomAccessIterator begin, RandomAccessIterator end)
size_type capacity() const
value_type & reference
AlignedVector & operator=(const AlignedVector< T > &vec)
void swap(AlignedVector< T > &vec) noexcept
const value_type * const_pointer
bool replicated_across_communicator
void push_back(const T in_data)
iterator insert(const_iterator position, RandomAccessIterator begin, RandomAccessIterator end)
const_iterator begin() const
AlignedVector(const size_type size, const T &init=T())
bool empty() const
AlignedVector(const AlignedVector< T > &vec)
size_type size() const
const_reference back() const
std::size_t size_type
bool operator==(const AlignedVector< T > &lhs, const AlignedVector< T > &rhs)
const value_type * const_iterator
void resize(const size_type new_size)
void load(Archive &ar, const unsigned int version)
void allocate_and_move(const size_t old_size, const size_t new_size, const size_t new_allocated_size)
void save(Archive &ar, const unsigned int version) const
void insert_back(ForwardIterator begin, ForwardIterator end)
const value_type & const_reference
reference back()
const_pointer data() const
value_type * pointer
value_type * iterator
AlignedVectorCopyConstruct(RandomAccessIterator source_begin, RandomAccessIterator source_end, T *const destination)
static const std::size_t minimum_parallel_grain_size
virtual void apply_to_subrange(const std::size_t begin, const std::size_t end) const override
AlignedVectorDefaultInitialize(const std::size_t size, T *const destination)
static const std::size_t minimum_parallel_grain_size
virtual void apply_to_subrange(const std::size_t begin, const std::size_t end) const override
void default_construct_or_assign(const std::size_t begin, const std::size_t end, std::bool_constant< false >) const
void default_construct_or_assign(const std::size_t begin, const std::size_t end, std::bool_constant< true >) const
void copy_construct_or_assign(const std::size_t begin, const std::size_t end, std::bool_constant< false >) const
static const std::size_t minimum_parallel_grain_size
virtual void apply_to_subrange(const std::size_t begin, const std::size_t end) const override
void copy_construct_or_assign(const std::size_t begin, const std::size_t end, std::bool_constant< true >) const
AlignedVectorInitialize(const std::size_t size, const T &element, T *const destination)
AlignedVectorMoveConstruct(RandomAccessIterator source_begin, RandomAccessIterator source_end, T *const destination)
virtual void apply_to_subrange(const std::size_t begin, const std::size_t end) const override
static const std::size_t minimum_parallel_grain_size
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:498
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:499
#define Assert(cond, exc)
#define AssertThrowMPI(error_code)
#define AssertIndexRange(index, range)
#define DeclExceptionMsg(Exception, defaulttext)
Definition exceptions.h:489
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcMessage(std::string arg1)
static ::ExceptionBase & ExcAlignedVectorChangeAfterReplication()
std::enable_if_t< std::is_fundamental_v< T >, std::size_t > memory_consumption(const T &t)
VectorType::value_type * end(VectorType &V)
VectorType::value_type * begin(VectorType &V)
T max(const T &t, const MPI_Comm mpi_communicator)
T min(const T &t, const MPI_Comm mpi_communicator)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:107
void free_communicator(MPI_Comm mpi_communicator)
Definition mpi.cc:154
T broadcast(const MPI_Comm comm, const T &object_to_send, const unsigned int root_process=0)
void posix_memalign(void **memptr, std::size_t alignment, std::size_t size)
size_t pack(const T &object, std::vector< char > &dest_buffer, const bool allow_compression=true)
Definition utilities.h:1381
STL namespace.
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
void apply_parallel(const std::size_t begin, const std::size_t end, const std::size_t minimum_parallel_grain_size) const
Definition parallel.h:743