Reference documentation for deal.II version GIT 3779fa9eb4 2023-09-28 13:00:02+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
aligned_vector.h
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2011 - 2023 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #ifndef dealii_aligned_vector_h
18 #define dealii_aligned_vector_h
19 
20 #include <deal.II/base/config.h>
21 
24 #include <deal.II/base/mpi.h>
25 #include <deal.II/base/parallel.h>
26 #include <deal.II/base/utilities.h>
27 
28 // boost::serialization::make_array used to be in array.hpp, but was
29 // moved to a different file in BOOST 1.64
30 #include <boost/version.hpp>
31 #if BOOST_VERSION >= 106400
32 # include <boost/serialization/array_wrapper.hpp>
33 #else
34 # include <boost/serialization/array.hpp>
35 #endif
36 #include <boost/serialization/split_member.hpp>
37 
38 #include <cstring>
39 #include <memory>
40 #include <type_traits>
41 
42 
43 
45 
46 
60 template <class T>
62 {
63 public:
68  using value_type = T;
69  using pointer = value_type *;
70  using const_pointer = const value_type *;
71  using iterator = value_type *;
72  using const_iterator = const value_type *;
73  using reference = value_type &;
74  using const_reference = const value_type &;
75  using size_type = std::size_t;
76 
81 
88  explicit AlignedVector(const size_type size, const T &init = T());
89 
93  ~AlignedVector() = default;
94 
101 
107 
113  AlignedVector &
115 
119  AlignedVector &
120  operator=(AlignedVector<T> &&vec) noexcept;
121 
144  void
145  resize_fast(const size_type new_size);
146 
159  void
160  resize(const size_type new_size);
161 
177  void
178  resize(const size_type new_size, const T &init);
179 
200  void
201  reserve(const size_type new_allocated_size);
202 
206  void
208 
213  void
214  clear();
215 
221  void
222  push_back(const T in_data);
223 
227  reference
228  back();
229 
234  back() const;
235 
240  template <typename ForwardIterator>
241  void
242  insert_back(ForwardIterator begin, ForwardIterator end);
243 
253  void
254  fill();
255 
264  void
265  fill(const T &element);
266 
354  void
355  replicate_across_communicator(const MPI_Comm communicator,
356  const unsigned int root_process);
357 
361  void
363 
367  bool
368  empty() const;
369 
373  size_type
374  size() const;
375 
380  size_type
381  capacity() const;
382 
386  reference
387  operator[](const size_type index);
388 
393  operator[](const size_type index) const;
394 
398  pointer
399  data();
400 
405  data() const;
406 
410  iterator
411  begin();
412 
416  iterator
417  end();
418 
423  begin() const;
424 
429  end() const;
430 
436  size_type
438 
444  template <class Archive>
445  void
446  save(Archive &ar, const unsigned int version) const;
447 
453  template <class Archive>
454  void
455  load(Archive &ar, const unsigned int version);
456 
457 #ifdef DOXYGEN
463  template <class Archive>
464  void
465  serialize(Archive &archive, const unsigned int version);
466 #else
467  // This macro defines the serialize() method that is compatible with
468  // the templated save() and load() method that have been implemented.
469  BOOST_SERIALIZATION_SPLIT_MEMBER()
470 #endif
471 
479  "Changing the vector after a call to "
480  "replicate_across_communicator() is not allowed.");
481 
482 private:
487  void
488  allocate_and_move(const size_t old_size,
489  const size_t new_size,
490  const size_t new_allocated_size);
491 
583  class Deleter
584  {
585  public:
591  Deleter(AlignedVector<T> *owning_object);
592 
593 #ifdef DEAL_II_WITH_MPI
601  Deleter(AlignedVector<T> *owning_object,
602  const bool is_shmem_root,
603  T *aligned_shmem_pointer,
604  MPI_Comm shmem_group_communicator,
605  MPI_Win shmem_window);
606 #endif
607 
613  void
614  operator()(T *ptr);
615 
623  void
624  reset_owning_object(const AlignedVector<T> *new_aligned_vector_ptr);
625 
626  private:
631  {
632  public:
636  virtual ~DeleterActionBase() = default;
637 
643  virtual void
645  };
646 
647 #ifdef DEAL_II_WITH_MPI
648 
654  {
655  public:
662  MPI_Comm shmem_group_communicator,
663  MPI_Win shmem_window);
664 
670  virtual void
671  delete_array(const AlignedVector<T> *aligned_vector, T *ptr);
672 
673  private:
678  const bool is_shmem_root;
681  MPI_Win shmem_window;
682  };
683 #endif
684 
689  std::unique_ptr<DeleterActionBase> deleter_action_object;
690 
696  };
697 
701  std::unique_ptr<T[], Deleter> elements;
702 
707 
712 
717 };
718 
719 
720 // ------------------------------- inline functions --------------------------
721 
727 namespace internal
728 {
747  template <typename T>
750  {
751  static const std::size_t minimum_parallel_grain_size =
752  160000 / sizeof(T) + 1;
753 
754  public:
764  AlignedVectorCopyConstruct(const T *const source_begin,
765  const T *const source_end,
766  T *const destination)
767  : source_(source_begin)
768  , destination_(destination)
769  {
770  Assert(source_end >= source_begin, ExcInternalError());
771  Assert(source_end == source_begin || destination != nullptr,
772  ExcInternalError());
773  const std::size_t size = source_end - source_begin;
774  if (size < minimum_parallel_grain_size)
776  else
778  }
779 
784  virtual void
785  apply_to_subrange(const std::size_t begin,
786  const std::size_t end) const override
787  {
788  if (end == begin)
789  return;
790 
791  // for classes trivial assignment can use memcpy. cast element to
792  // (void*) to silence compiler warning for virtual classes (they will
793  // never arrive here because they are non-trivial).
794 
795  if (std::is_trivial_v<T> == true)
796  std::memcpy(static_cast<void *>(destination_ + begin),
797  static_cast<const void *>(source_ + begin),
798  (end - begin) * sizeof(T));
799  else
800  for (std::size_t i = begin; i < end; ++i)
801  new (&destination_[i]) T(source_[i]);
802  }
803 
804  private:
805  const T *const source_;
806  T *const destination_;
807  };
808 
809 
816  template <typename T>
819  {
820  static const std::size_t minimum_parallel_grain_size =
821  160000 / sizeof(T) + 1;
822 
823  public:
833  AlignedVectorMoveConstruct(T *const source_begin,
834  T *const source_end,
835  T *const destination)
836  : source_(source_begin)
837  , destination_(destination)
838  {
839  Assert(source_end >= source_begin, ExcInternalError());
840  Assert(source_end == source_begin || destination != nullptr,
841  ExcInternalError());
842  const std::size_t size = source_end - source_begin;
843  if (size < minimum_parallel_grain_size)
845  else
847  }
848 
853  virtual void
854  apply_to_subrange(const std::size_t begin,
855  const std::size_t end) const override
856  {
857  if (end == begin)
858  return;
859 
860  // Classes with trivial assignment can use memcpy. cast element to
861  // (void*) to silence compiler warning for virtual classes (they will
862  // never arrive here because they are non-trivial).
863  if (std::is_trivial_v<T> == true)
864  std::memcpy(static_cast<void *>(destination_ + begin),
865  static_cast<void *>(source_ + begin),
866  (end - begin) * sizeof(T));
867  else
868  // For everything else just use the move constructor. The original
869  // object remains alive and will be destroyed elsewhere.
870  for (std::size_t i = begin; i < end; ++i)
871  new (&destination_[i]) T(std::move(source_[i]));
872  }
873 
874  private:
875  T *const source_;
876  T *const destination_;
877  };
878 
879 
897  template <typename T, bool initialize_memory>
899  {
900  static const std::size_t minimum_parallel_grain_size =
901  160000 / sizeof(T) + 1;
902 
903  public:
908  AlignedVectorInitialize(const std::size_t size,
909  const T &element,
910  T *const destination)
911  : element_(element)
912  , destination_(destination)
913  , trivial_element(false)
914  {
915  if (size == 0)
916  return;
917  Assert(destination != nullptr, ExcInternalError());
918 
919  // do not use memcmp for long double because on some systems it does not
920  // completely fill its memory and may lead to false positives in
921  // e.g. valgrind
922  if (std::is_trivial_v<T> == true &&
923  std::is_same_v<T, long double> == false)
924  {
925  const unsigned char zero[sizeof(T)] = {};
926  // cast element to (void*) to silence compiler warning for virtual
927  // classes (they will never arrive here because they are
928  // non-trivial).
929  if (std::memcmp(zero,
930  static_cast<const void *>(&element),
931  sizeof(T)) == 0)
932  trivial_element = true;
933  }
934  if (size < minimum_parallel_grain_size)
936  else
938  }
939 
943  virtual void
944  apply_to_subrange(const std::size_t begin,
945  const std::size_t end) const override
946  {
947  // for classes with trivial assignment of zero can use memset. cast
948  // element to (void*) to silence compiler warning for virtual
949  // classes (they will never arrive here because they are
950  // non-trivial).
951  if (std::is_trivial_v<T> == true && trivial_element)
952  std::memset(static_cast<void *>(destination_ + begin),
953  0,
954  (end - begin) * sizeof(T));
955  else
957  begin, end, std::integral_constant<bool, initialize_memory>());
958  }
959 
960  private:
961  const T &element_;
962  mutable T *destination_;
964 
965  // copy assignment operation
966  void
967  copy_construct_or_assign(const std::size_t begin,
968  const std::size_t end,
969  std::integral_constant<bool, false>) const
970  {
971  for (std::size_t i = begin; i < end; ++i)
972  destination_[i] = element_;
973  }
974 
975  // copy constructor (memory initialization)
976  void
977  copy_construct_or_assign(const std::size_t begin,
978  const std::size_t end,
979  std::integral_constant<bool, true>) const
980  {
981  for (std::size_t i = begin; i < end; ++i)
982  new (&destination_[i]) T(element_);
983  }
984  };
985 
986 
987 
1000  template <typename T, bool initialize_memory>
1003  {
1004  static const std::size_t minimum_parallel_grain_size =
1005  160000 / sizeof(T) + 1;
1006 
1007  public:
1012  AlignedVectorDefaultInitialize(const std::size_t size, T *const destination)
1013  : destination_(destination)
1014  {
1015  if (size == 0)
1016  return;
1017  Assert(destination != nullptr, ExcInternalError());
1018 
1019  if (size < minimum_parallel_grain_size)
1021  else
1023  }
1024 
1028  virtual void
1029  apply_to_subrange(const std::size_t begin,
1030  const std::size_t end) const override
1031  {
1032  // for classes with trivial assignment of zero can use memset. cast
1033  // element to (void*) to silence compiler warning for virtual
1034  // classes (they will never arrive here because they are
1035  // non-trivial).
1036  if (std::is_trivial_v<T> == true)
1037  std::memset(static_cast<void *>(destination_ + begin),
1038  0,
1039  (end - begin) * sizeof(T));
1040  else
1042  begin, end, std::integral_constant<bool, initialize_memory>());
1043  }
1044 
1045  private:
1046  mutable T *destination_;
1047 
1048  // copy assignment operation
1049  void
1051  const std::size_t end,
1052  std::integral_constant<bool, false>) const
1053  {
1054  for (std::size_t i = begin; i < end; ++i)
1055  destination_[i] = std::move(T());
1056  }
1057 
1058  // copy constructor (memory initialization)
1059  void
1061  const std::size_t end,
1062  std::integral_constant<bool, true>) const
1063  {
1064  for (std::size_t i = begin; i < end; ++i)
1065  new (&destination_[i]) T;
1066  }
1067  };
1068 
1069 } // end of namespace internal
1070 
1071 
1072 #ifndef DOXYGEN
1073 
1074 
1075 
1076 template <typename T>
1078  : deleter_action_object(nullptr) // encode default action by using a nullptr
1079  , owning_aligned_vector(owning_object)
1080 {}
1081 
1082 
1083 # ifdef DEAL_II_WITH_MPI
1084 
1085 template <typename T>
1087  const bool is_shmem_root,
1088  T *aligned_shmem_pointer,
1089  MPI_Comm shmem_group_communicator,
1090  MPI_Win shmem_window)
1091  : deleter_action_object(
1092  std::make_unique<MPISharedMemDeleterAction>(is_shmem_root,
1093  aligned_shmem_pointer,
1094  shmem_group_communicator,
1095  shmem_window))
1096  , owning_aligned_vector(owning_object)
1097 {}
1098 # endif
1099 
1100 
1101 template <typename T>
1102 inline void
1104 {
1105  // If no special action has been registered (i.e., if the action pointer is
1106  // nullptr), then just perform the default action right here.
1107  if (deleter_action_object == nullptr)
1108  {
1109  if (ptr != nullptr)
1110  {
1111  Assert(owning_aligned_vector->used_elements_end != nullptr,
1112  ExcInternalError());
1113 
1114  if (std::is_trivial_v<T> == false)
1115  for (T *p = owning_aligned_vector->used_elements_end - 1; p >= ptr;
1116  --p)
1117  p->~T();
1118 
1119  std::free(ptr);
1120  }
1121  }
1122  else
1123  // Otherwise, let the action object do what is necessary
1124  deleter_action_object->delete_array(owning_aligned_vector, ptr);
1125 }
1126 
1127 
1128 
1129 template <typename T>
1130 inline void
1132  const AlignedVector<T> *new_aligned_vector_ptr)
1133 {
1134  owning_aligned_vector = new_aligned_vector_ptr;
1135 }
1136 
1137 
1138 # ifdef DEAL_II_WITH_MPI
1139 
1140 template <typename T>
1142  MPISharedMemDeleterAction(const bool is_shmem_root,
1143  T *aligned_shmem_pointer,
1144  MPI_Comm shmem_group_communicator,
1145  MPI_Win shmem_window)
1146  : is_shmem_root(is_shmem_root)
1147  , aligned_shmem_pointer(aligned_shmem_pointer)
1148  , shmem_group_communicator(shmem_group_communicator)
1149  , shmem_window(shmem_window)
1150 {}
1151 
1152 
1153 
1154 template <typename T>
1155 inline void
1157  const AlignedVector<T> *aligned_vector,
1158  T *ptr)
1159 {
1160  (void)ptr;
1161  // It would be nice to assert that aligned_vector->elements.get() equals ptr,
1162  // but it is not guaranteed to work: clang, for example, sets elements.get()
1163  // to nullptr and then calls the deleter on a previously made copy. Hence we
1164  // must assume here that elements.get() (which is managed by the unique_ptr)
1165  // may be nullptr at this point.
1166  //
1167  // used_elements_end is a member variable of AlignedVector (i.e., we control
1168  // it, not unique_ptr) so it is still set to its correct value.
1169 
1170  if (is_shmem_root)
1171  if (std::is_trivial_v<T> == false)
1172  for (T *p = aligned_vector->used_elements_end - 1; p >= ptr; --p)
1173  p->~T();
1174 
1175  int ierr;
1176  ierr = MPI_Win_free(&shmem_window);
1177  AssertThrowMPI(ierr);
1178 
1179  Utilities::MPI::free_communicator(shmem_group_communicator);
1180 }
1181 
1182 # endif
1183 
1184 
1185 template <class T>
1187  : elements(nullptr, Deleter(this))
1188  , used_elements_end(nullptr)
1189  , allocated_elements_end(nullptr)
1190 # ifdef DEBUG
1191  , replicated_across_communicator(false)
1192 # endif
1193 {}
1194 
1195 
1196 
1197 template <class T>
1198 inline AlignedVector<T>::AlignedVector(const size_type size, const T &init)
1199  : elements(nullptr, Deleter(this))
1200  , used_elements_end(nullptr)
1201  , allocated_elements_end(nullptr)
1202 # ifdef DEBUG
1203  , replicated_across_communicator(false)
1204 # endif
1205 {
1206  if (size > 0)
1207  resize(size, init);
1208 }
1209 
1210 
1211 
1212 template <class T>
1214  : elements(nullptr, Deleter(this))
1215  , used_elements_end(nullptr)
1216  , allocated_elements_end(nullptr)
1217 # ifdef DEBUG
1218  , replicated_across_communicator(false)
1219 # endif
1220 {
1221  // copy the data from vec
1222  reserve(vec.size());
1223  used_elements_end = allocated_elements_end;
1225  vec.used_elements_end,
1226  elements.get());
1227 }
1228 
1229 
1230 
1231 template <class T>
1233  : AlignedVector<T>()
1234 {
1235  // forward to the move operator
1236  *this = std::move(vec);
1237 }
1238 
1239 
1240 
1241 template <class T>
1242 inline AlignedVector<T> &
1244 {
1245  const size_type new_size = vec.used_elements_end - vec.elements.get();
1246 
1247  // First throw away everything and re-allocate memory but leave that
1248  // memory uninitialized for now:
1249  resize(0);
1250  reserve(new_size);
1251 
1252  // Then copy the elements over by using the copy constructor on these
1253  // elements:
1255  vec.used_elements_end,
1256  elements.get());
1257 
1258  // Finally adjust the pointer to the end of the elements that are used:
1259  used_elements_end = elements.get() + new_size;
1260 
1261  return *this;
1262 }
1263 
1264 
1265 
1266 template <class T>
1267 inline AlignedVector<T> &
1269 {
1270  clear();
1271 
1272  // Move the actual data in the 'elements' object. One problem is that this
1273  // also moves the deleter object, but the deleter object
1274  // references 'this' (i.e., the 'this' pointer of the *moved-from*
1275  // object). The way this is implemented is that we have to move the
1276  // deleter as well, and then reset the pointer inside the deleter
1277  // that references the outer object.
1278  elements = std::move(vec.elements);
1279  elements.get_deleter().reset_owning_object(this);
1280 
1281  // Then also steal the other pointers and clear them in the original object:
1282  used_elements_end = vec.used_elements_end;
1283  allocated_elements_end = vec.allocated_elements_end;
1284 
1285  vec.used_elements_end = nullptr;
1286  vec.allocated_elements_end = nullptr;
1287 
1288  return *this;
1289 }
1290 
1291 
1292 
1293 template <class T>
1294 inline void
1296 {
1297  const size_type old_size = size();
1298 
1299  if (new_size == 0)
1300  clear();
1301  else if (new_size == old_size)
1302  {
1303  } // nothing to do here
1304  else if (new_size < old_size)
1305  {
1306  // call destructor on fields that are released, if the type requires it.
1307  // doing it backward releases the elements in reverse order as compared to
1308  // how they were created
1309  if (std::is_trivial_v<T> == false)
1310  for (T *p = used_elements_end - 1; p >= elements.get() + new_size; --p)
1311  p->~T();
1312  used_elements_end = elements.get() + new_size;
1313  }
1314  else // new_size > old_size
1315  {
1316  // Allocate more space, and claim that space as used
1317  reserve(new_size);
1318  used_elements_end = elements.get() + new_size;
1319 
1320  // need to still set the values in case the class is non-trivial because
1321  // virtual classes etc. need to run their (default) constructor
1322  if (std::is_trivial_v<T> == false)
1324  new_size - old_size, elements.get() + old_size);
1325  }
1326 }
1327 
1328 
1329 
1330 template <class T>
1331 inline void
1332 AlignedVector<T>::resize(const size_type new_size)
1333 {
1334  const size_type old_size = size();
1335 
1336  if (new_size == 0)
1337  clear();
1338  else if (new_size == old_size)
1339  {
1340  } // nothing to do here
1341  else if (new_size < old_size)
1342  {
1343  // call destructor on fields that are released, if the type requires it.
1344  // doing it backward releases the elements in reverse order as compared to
1345  // how they were created
1346  if (std::is_trivial_v<T> == false)
1347  for (T *p = used_elements_end - 1; p >= elements.get() + new_size; --p)
1348  p->~T();
1349  used_elements_end = elements.get() + new_size;
1350  }
1351  else // new_size > old_size
1352  {
1353  // Allocate more space, and claim that space as used
1354  reserve(new_size);
1355  used_elements_end = elements.get() + new_size;
1356 
1357  // finally set the values to the default initializer
1359  new_size - old_size, elements.get() + old_size);
1360  }
1361 }
1362 
1363 
1364 
1365 template <class T>
1366 inline void
1367 AlignedVector<T>::resize(const size_type new_size, const T &init)
1368 {
1369  const size_type old_size = size();
1370 
1371  if (new_size == 0)
1372  clear();
1373  else if (new_size == old_size)
1374  {
1375  } // nothing to do here
1376  else if (new_size < old_size)
1377  {
1378  // call destructor on fields that are released, if the type requires it.
1379  // doing it backward releases the elements in reverse order as compared to
1380  // how they were created
1381  if (std::is_trivial_v<T> == false)
1382  for (T *p = used_elements_end - 1; p >= elements.get() + new_size; --p)
1383  p->~T();
1384  used_elements_end = elements.get() + new_size;
1385  }
1386  else // new_size > old_size
1387  {
1388  // Allocate more space, and claim that space as used
1389  reserve(new_size);
1390  used_elements_end = elements.get() + new_size;
1391 
1392  // finally set the desired init values
1394  new_size - old_size, init, elements.get() + old_size);
1395  }
1396 }
1397 
1398 
1399 
1400 template <class T>
1401 inline void
1402 AlignedVector<T>::allocate_and_move(const size_t old_size,
1403  const size_t new_size,
1404  const size_t new_allocated_size)
1405 {
1406  // allocate and align along 64-byte boundaries (this is enough for all
1407  // levels of vectorization currently supported by deal.II)
1408  T *new_data_ptr;
1409  Utilities::System::posix_memalign(reinterpret_cast<void **>(&new_data_ptr),
1410  64,
1411  new_size * sizeof(T));
1412 
1413  // Now create a deleter that encodes what should happen when the object is
1414  // released: We need to destroy the objects that are currently alive (in
1415  // reverse order, and then release the memory. Note that we catch the
1416  // 'this' pointer because the number of elements currently alive might
1417  // change over time.
1418  Deleter deleter(this);
1419 
1420  // copy whatever elements we need to retain
1421  if (new_allocated_size > 0)
1423  elements.get() + old_size,
1424  new_data_ptr);
1425 
1426  // Now reset all the member variables of the current object
1427  // based on the allocation above. Assigning to a std::unique_ptr
1428  // object also releases the previously pointed to memory.
1429  //
1430  // Note that at the time of releasing the old memory, 'used_elements_end'
1431  // still points to its previous value, and this is important for the
1432  // deleter object of the previously allocated array (see how it loops over
1433  // the to-be-destroyed elements at the Deleter::DefaultDeleterAction
1434  // class).
1435  elements = decltype(elements)(new_data_ptr, std::move(deleter));
1436  used_elements_end = elements.get() + old_size;
1437  allocated_elements_end = elements.get() + new_size;
1438 }
1439 
1440 
1441 
1442 template <class T>
1443 inline void
1444 AlignedVector<T>::reserve(const size_type new_allocated_size)
1445 {
1446  const size_type old_size = used_elements_end - elements.get();
1447  const size_type old_allocated_size = allocated_elements_end - elements.get();
1448  if (new_allocated_size > old_allocated_size)
1449  {
1450  // if we continuously increase the size of the vector, we might be
1451  // reallocating a lot of times. therefore, try to increase the size more
1452  // aggressively
1453  const size_type new_size =
1454  std::max(new_allocated_size, 2 * old_allocated_size);
1455 
1456  allocate_and_move(old_size, new_size, new_allocated_size);
1457  }
1458  else if (new_allocated_size == 0)
1459  clear();
1460  else // size_alloc < allocated_size
1461  {
1462  } // nothing to do here
1463 }
1464 
1465 
1466 
1467 template <class T>
1468 inline void
1470 {
1471 # ifdef DEBUG
1472  Assert(replicated_across_communicator == false,
1473  ExcAlignedVectorChangeAfterReplication());
1474 # endif
1475  const size_type used_size = used_elements_end - elements.get();
1476  const size_type allocated_size = allocated_elements_end - elements.get();
1477  if (allocated_size > used_size)
1478  allocate_and_move(used_size, used_size, used_size);
1479 }
1480 
1481 
1482 
1483 template <class T>
1484 inline void
1486 {
1487  // Just release the memory (which also calls the destructor of the elements),
1488  // and then set the auxiliary pointers to invalid values.
1489  //
1490  // Note that at the time of releasing the old memory, 'used_elements_end'
1491  // still points to its previous value, and this is important for the
1492  // deleter object of the previously allocated array (see how it loops over
1493  // the to-be-destroyed elements a few lines above).
1494  elements.reset();
1495  used_elements_end = nullptr;
1496  allocated_elements_end = nullptr;
1497 }
1498 
1499 
1500 
1501 template <class T>
1502 inline void
1503 AlignedVector<T>::push_back(const T in_data)
1504 {
1505  Assert(used_elements_end <= allocated_elements_end, ExcInternalError());
1506  if (used_elements_end == allocated_elements_end)
1507  reserve(std::max(2 * capacity(), static_cast<size_type>(16)));
1508  if (std::is_trivial_v<T> == false)
1509  new (used_elements_end++) T(in_data);
1510  else
1511  *used_elements_end++ = in_data;
1512 }
1513 
1514 
1515 
1516 template <class T>
1517 inline typename AlignedVector<T>::reference
1519 {
1520  AssertIndexRange(0, size());
1521  T *field = used_elements_end - 1;
1522  return *field;
1523 }
1524 
1525 
1526 
1527 template <class T>
1528 inline typename AlignedVector<T>::const_reference
1529 AlignedVector<T>::back() const
1530 {
1531  AssertIndexRange(0, size());
1532  const T *field = used_elements_end - 1;
1533  return *field;
1534 }
1535 
1536 
1537 
1538 template <class T>
1539 template <typename ForwardIterator>
1540 inline void
1541 AlignedVector<T>::insert_back(ForwardIterator begin, ForwardIterator end)
1542 {
1543  const size_type old_size = size();
1544  reserve(old_size + (end - begin));
1545  for (; begin != end; ++begin, ++used_elements_end)
1546  {
1547  if (std::is_trivial_v<T> == false)
1548  new (used_elements_end) T;
1549  *used_elements_end = *begin;
1550  }
1551 }
1552 
1553 
1554 
1555 template <class T>
1556 inline void
1558 {
1560  elements.get());
1561 }
1562 
1563 
1564 
1565 template <class T>
1566 inline void
1567 AlignedVector<T>::fill(const T &value)
1568 {
1570  value,
1571  elements.get());
1572 }
1573 
1574 
1575 
1576 template <class T>
1577 inline void
1578 AlignedVector<T>::replicate_across_communicator(const MPI_Comm communicator,
1579  const unsigned int root_process)
1580 {
1581 # ifdef DEAL_II_WITH_MPI
1582 
1583  // Let the root process broadcast its size. If it is zero, then all
1584  // processes just clear() their memory and reset themselves to a non-shared
1585  // empty object -- there is no point to run through complicated MPI
1586  // calls if the end result is an empty array. Otherwise, we continue on.
1587  const size_type new_size =
1588  Utilities::MPI::broadcast(communicator, size(), root_process);
1589  if (new_size == 0)
1590  {
1591  clear();
1592  return;
1593  }
1594 
1595 
1596  // **** Step 0 ****
1597  // All but the root process no longer need their data, so release the memory
1598  // used to store the previous elements.
1599  if (Utilities::MPI::this_mpi_process(communicator) != root_process)
1600  {
1601  elements.reset();
1602  used_elements_end = nullptr;
1603  allocated_elements_end = nullptr;
1604  }
1605 
1606  // **** Step 1 ****
1607  // Create communicators for each group of processes that can use
1608  // shared memory areas. Within each of these groups, we don't care about
1609  // which rank each of the old processes gets except that we would like to
1610  // make sure that the (global) root process will have rank=0 within
1611  // its own sub-communicator. We can do that through the third argument of
1612  // MPI_Comm_split_type (the "key") which is an integer meant to indicate the
1613  // order of processes within the split communicators, and we should set it to
1614  // zero for the root processes and one for all others -- which means that
1615  // for all of these other processes, MPI can choose whatever order it
1616  // wants because they have the same key (MPI then documents that these ties
1617  // will be broken according to these processes' rank in the old group).
1618  //
1619  // At least that's the theory. In practice, the MPI implementation where
1620  // this function was developed on does not seem to do that. (Bug report
1621  // is here: https://github.com/open-mpi/ompi/issues/8854)
1622  // We work around this by letting MPI_Comm_split_type choose whatever
1623  // rank it wants, and then reshuffle with MPI_Comm_split in a second
1624  // step -- not elegant, nor efficient, but seems to work:
1625  MPI_Comm shmem_group_communicator;
1626  {
1627  MPI_Comm shmem_group_communicator_temp;
1628  int ierr = MPI_Comm_split_type(communicator,
1629  MPI_COMM_TYPE_SHARED,
1630  /* key */ 0,
1631  MPI_INFO_NULL,
1632  &shmem_group_communicator_temp);
1633  AssertThrowMPI(ierr);
1634 
1635  const int key =
1636  (Utilities::MPI::this_mpi_process(communicator) == root_process ? 0 : 1);
1637  ierr = MPI_Comm_split(shmem_group_communicator_temp,
1638  /* color */ 0,
1639  key,
1640  &shmem_group_communicator);
1641  AssertThrowMPI(ierr);
1642 
1643  // Verify the explanation from above
1644  if (Utilities::MPI::this_mpi_process(communicator) == root_process)
1645  Assert(Utilities::MPI::this_mpi_process(shmem_group_communicator) == 0,
1646  ExcInternalError());
1647 
1648  // And get rid of the temporary communicator
1649  Utilities::MPI::free_communicator(shmem_group_communicator_temp);
1650  }
1651  const bool is_shmem_root =
1652  Utilities::MPI::this_mpi_process(shmem_group_communicator) == 0;
1653 
1654  // **** Step 2 ****
1655  // We then have to send the state of the current object from the
1656  // root process to one exemplar in each shmem group. To this end,
1657  // we create another subcommunicator that includes the ranks zero
1658  // of all shmem groups, and because of the trick above, we know
1659  // that this also includes the original root process.
1660  //
1661  // There are different ways of creating a "shmem_roots_communicator".
1662  // The conceptually easiest way is to create an MPI_Group that only
1663  // includes the shmem roots and then create a communicator from this
1664  // via MPI_Comm_create or MPI_Comm_create_group. The problem
1665  // with this is that we would have to exchange among all processes
1666  // which ones are shmem roots and which are not. This is awkward.
1667  //
1668  // A simpler way is to use MPI_Comm_split that uses "colors" to
1669  // indicate which sub-communicator each process wants to be in.
1670  // We use color=0 to indicate the group of shmem roots, and color=1
1671  // for all other processes -- the latter will simply not ever do
1672  // anything among themselves with the communicator so created.
1673  //
1674  // Using MPI_Comm_split has the additional benefit that, just as above,
1675  // we can choose where each rank will end up in shmem_roots_communicator.
1676  // We again set key=0 for the original root_process, and key=1 for all other
1677  // ranks; then, the global root becomes rank=0 on the
1678  // shmem_roots_communicator. We don't care how the other processes are
1679  // ordered.
1680  MPI_Comm shmem_roots_communicator;
1681  {
1682  const int key =
1683  (Utilities::MPI::this_mpi_process(communicator) == root_process ? 0 : 1);
1684 
1685  const int ierr = MPI_Comm_split(communicator,
1686  /*color=*/
1687  (is_shmem_root ? 0 : 1),
1688  key,
1689  &shmem_roots_communicator);
1690  AssertThrowMPI(ierr);
1691 
1692  // Again verify the explanation from above
1693  if (Utilities::MPI::this_mpi_process(communicator) == root_process)
1694  Assert(Utilities::MPI::this_mpi_process(shmem_roots_communicator) == 0,
1695  ExcInternalError());
1696  }
1697 
1698  const unsigned int shmem_roots_root_rank = 0;
1699  const bool is_shmem_roots_root =
1700  (is_shmem_root && (Utilities::MPI::this_mpi_process(
1701  shmem_roots_communicator) == shmem_roots_root_rank));
1702 
1703  // Now let the original root_process broadcast the current object to all
1704  // shmem roots. We know that the last rank is the original root process that
1705  // has all of the data.
1706  if (is_shmem_root)
1707  {
1708  if (std::is_trivial_v<T>)
1709  {
1710  // The data is "trivial", i.e., we can copy things directly without
1711  // having to go through the serialization/deserialization machinery of
1712  // Utilities::MPI::broadcast.
1713  //
1714  // In that case, first tell all of the other shmem roots how many
1715  // elements we will have to deal with, and let them resize their
1716  // (non-shared) arrays.
1717  const size_type new_size =
1718  Utilities::MPI::broadcast(shmem_roots_communicator,
1719  size(),
1720  shmem_roots_root_rank);
1721  if (is_shmem_roots_root == false)
1722  resize(new_size);
1723 
1724  // Then directly copy from the root process into these buffers
1725  int ierr = MPI_Bcast(elements.get(),
1726  sizeof(T) * new_size,
1727  MPI_CHAR,
1728  shmem_roots_root_rank,
1729  shmem_roots_communicator);
1730  AssertThrowMPI(ierr);
1731  }
1732  else
1733  {
1734  // The objects to be sent around are not "trivial", and so we have
1735  // to go through the serialization/deserialization machinery. On all
1736  // but the sending process, overwrite the current state with the
1737  // vector just broadcast.
1738  //
1739  // On the root rank, this would lead to resetting the 'entries'
1740  // pointer, which would trigger the deleter which would lead to a
1741  // deadlock. So we just send the result of the broadcast() call to
1742  // nirvana on the root process and keep our current state.
1743  if (Utilities::MPI::this_mpi_process(shmem_roots_communicator) == 0)
1744  Utilities::MPI::broadcast(shmem_roots_communicator,
1745  *this,
1746  shmem_roots_root_rank);
1747  else
1748  *this = Utilities::MPI::broadcast(shmem_roots_communicator,
1749  *this,
1750  shmem_roots_root_rank);
1751  }
1752  }
1753 
1754  // We no longer need the shmem roots communicator, so get rid of it
1755  Utilities::MPI::free_communicator(shmem_roots_communicator);
1756 
1757 
1758  // **** Step 3 ****
1759  // At this point, all shmem groups have one shmem root process that has
1760  // a copy of the data. This is the point where each shmem group should
1761  // establish a shmem area to put the data into. As mentioned above,
1762  // we know that the shmem roots are the last rank in their respective
1763  // shmem_group_communicator.
1764  //
1765  // The process for all of this works as follows: While all processes in
1766  // the shmem group participate in the generation of the shmem memory window,
1767  // only the shmem root actually allocates any memory -- the rest just
1768  // allocate zero bytes of their own. We allocate space for exactly
1769  // size() elements (computed on the shmem_root that already has the data)
1770  // and add however many bytes are necessary so that we know that we can align
1771  // things to 64-byte boundaries. The worst case happens if the memory system
1772  // gives us a pointer to an address one byte past a desired alignment
1773  // boundary, and in that case aligning the memory will require us to waste the
1774  // first (align_by-1) bytes. So we have to ask for
1775  // size() * sizeof(T) + (align_by - 1)
1776  // bytes.
1777  //
1778  // Before MPI 4.0, there was no way to specify that we want memory aligned to
1779  // a certain number of bytes. This is going to come back to bite us further
1780  // down below when we try to get a properly aligned pointer to our memory
1781  // region, see the commentary there. Starting with MPI 4.0, one can set a
1782  // flag in an MPI_Info structure that requests a desired alignment, so we do
1783  // this for forward compatibility; MPI implementations ignore flags they don't
1784  // know anything about, and so setting this flag is backward compatible also
1785  // to older MPI versions.
1786  MPI_Win shmem_window;
1787  void *base_ptr;
1788  const MPI_Aint align_by = 64;
1789  const MPI_Aint alloc_size =
1790  Utilities::MPI::broadcast(shmem_group_communicator,
1791  (size() * sizeof(T) + (align_by - 1)),
1792  0);
1793 
1794  {
1795  int ierr;
1796 
1797  MPI_Info mpi_info;
1798  ierr = MPI_Info_create(&mpi_info);
1799  AssertThrowMPI(ierr);
1800  ierr = MPI_Info_set(mpi_info,
1801  "mpi_minimum_memory_alignment",
1802  std::to_string(align_by).c_str());
1803  AssertThrowMPI(ierr);
1804  ierr = MPI_Win_allocate_shared((is_shmem_root ? alloc_size : 0),
1805  /* disp_unit = */ 1,
1806  mpi_info,
1807  shmem_group_communicator,
1808  &base_ptr,
1809  &shmem_window);
1810  AssertThrowMPI(ierr);
1811 
1812  ierr = MPI_Info_free(&mpi_info);
1813  AssertThrowMPI(ierr);
1814  }
1815 
1816 
1817  // **** Step 4 ****
1818  // The next step is to teach all non-shmem root processes what the pointer to
1819  // the array is that the shmem-root created. MPI has a nifty way for this
1820  // given that only a single process actually allocated memory in the window:
1821  // When calling MPI_Win_shared_query, the MPI documentation says that
1822  // "When rank is MPI_PROC_NULL, the pointer, disp_unit, and size returned are
1823  // the pointer, disp_unit, and size of the memory segment belonging the lowest
1824  // rank that specified size > 0. If all processes in the group attached to the
1825  // window specified size = 0, then the call returns size = 0 and a baseptr as
1826  // if MPI_ALLOC_MEM was called with size = 0."
1827  //
1828  // This will allow us to obtain the pointer to the shmem root's memory area,
1829  // which is the only one we care about. (None of the other processes have
1830  // even allocated any memory.)
1831  //
1832  // We don't need to do this on the shmem root process: This process has
1833  // already gotten its base_ptr correctly set above, and we can determine the
1834  // array size by just calling size().
1835  if (is_shmem_root == false)
1836  {
1837  int disp_unit;
1838  MPI_Aint alloc_size; // not actually used
1839  const int ierr = MPI_Win_shared_query(
1840  shmem_window, MPI_PROC_NULL, &alloc_size, &disp_unit, &base_ptr);
1841  AssertThrowMPI(ierr);
1842 
1843  // Make sure we actually got a pointer, and check that the disp_unit is
1844  // equal to 1 (as set above)
1845  Assert(base_ptr != nullptr, ExcInternalError());
1846  Assert(disp_unit == 1, ExcInternalError());
1847  }
1848 
1849 
1850  // **** Step 5 ****
1851  // Now that all processes know the address of the space that is visible to
1852  // everyone, we need to figure out whether it is properly aligned and if not,
1853  // find the next aligned address.
1854  //
1855  // std::align does that, but it also modifies its last two arguments. The
1856  // documentation of that function at
1857  // https://en.cppreference.com/w/cpp/memory/align is not entirely clear, but I
1858  // *think* that the following should do given that we do not use base_ptr and
1859  // available_space any further after the call to std::align.
1860  std::size_t available_space = alloc_size;
1861  void *base_ptr_backup = base_ptr;
1862  T *aligned_shmem_pointer = static_cast<T *>(
1863  std::align(align_by, new_size * sizeof(T), base_ptr, available_space));
1864  Assert(aligned_shmem_pointer != nullptr, ExcInternalError());
1865 
1866  // There is one step to guard against. It is *conceivable* that the base_ptr
1867  // we have previously obtained from MPI_Win_shared_query is mapped so
1868  // awkwardly into the different MPI processes' memory spaces that it is
1869  // aligned in one memory space, but not another. In that case, different
1870  // processes would align base_ptr differently, and adjust available_space
1871  // differently. We can check that by making sure that the max (or min) over
1872  // all processes is equal to every process's value. If that's not the case,
1873  // then the whole idea of aligning above is wrong and we need to rethink what
1874  // it means to align data in a shared memory space.
1875  //
1876  // One might be tempted to think that this is not how MPI implementations
1877  // actually arrange things. Alas, when developing this functionality in 2021,
1878  // this is really how at least OpenMPI ends up doing things. (This is with an
1879  // OpenMPI implementation of MPI 3.1, so it does not support the flag we set
1880  // in the MPI_Info structure above when allocating the memory window.) Indeed,
1881  // when running this code on three processes, one ends up with base_ptr values
1882  // of
1883  // base_ptr=0x7f0842f02108
1884  // base_ptr=0x7fc0a47881d0
1885  // base_ptr=0x7f64872db108
1886  // which, most annoyingly, are aligned to 8 and 16 byte boundaries -- so there
1887  // is no common offset std::align could find that leads to a 64-byte
1888  // aligned memory address in all three memory spaces. That's a tremendous
1889  // nuisance and there is really nothing we can do about this other than just
1890  // fall back on the (unaligned) base_ptr in that case.
1891  if (Utilities::MPI::min(available_space, shmem_group_communicator) !=
1892  Utilities::MPI::max(available_space, shmem_group_communicator))
1893  aligned_shmem_pointer = static_cast<T *>(base_ptr_backup);
1894 
1895 
1896  // **** Step 6 ****
1897  // If this is the shmem root process, we need to copy the data into the
1898  // shared memory space.
1899  if (is_shmem_root)
1900  {
1901  if (std::is_trivial_v<T> == true)
1902  std::memcpy(aligned_shmem_pointer, elements.get(), sizeof(T) * size());
1903  else
1904  for (std::size_t i = 0; i < size(); ++i)
1905  new (&aligned_shmem_pointer[i]) T(std::move(elements[i]));
1906  }
1907 
1908  // Make sure that the shared memory host has copied the data before we try to
1909  // access it.
1910  const int ierr = MPI_Barrier(shmem_group_communicator);
1911  AssertThrowMPI(ierr);
1912 
1913  // **** Step 7 ****
1914  // Finally, we need to set the pointers of this object to what we just
1915  // learned. This also releases all memory that may have been in use
1916  // previously.
1917  //
1918  // The part that is a bit tricky is how to write the deleter of this
1919  // shared memory object. When we want to get rid of it, we need to
1920  // also release the MPI_Win object along with the shmem_group_communicator
1921  // object. That's because as long as we use the shared memory, we still need
1922  // to hold on to the MPI_Win object, and the MPI_Win object is based on the
1923  // communicator. (The former is definitely true, the latter is not quite clear
1924  // from the MPI documentation, but seems reasonable.) So we need to have a
1925  // deleter for the pointer that ensures that upon release of the memory, we
1926  // not only call the destructor of these memory elements (but only once, on
1927  // the shmem root!) but also destroy the MPI_Win and the communicator. All of
1928  // that is encapsulated in the following call where the deleter makes copies
1929  // of the arguments in the lambda capture.
1930  elements = decltype(elements)(aligned_shmem_pointer,
1931  Deleter(this,
1932  is_shmem_root,
1933  aligned_shmem_pointer,
1934  shmem_group_communicator,
1935  shmem_window));
1936 
1937  // We then also have to set the other two pointers that define the state of
1938  // the current object. Note that the new buffer size is exactly as large as
1939  // necessary, i.e., can store size() elements, regardless of the number of
1940  // allocated elements in the original objects.
1941  used_elements_end = elements.get() + new_size;
1942  allocated_elements_end = used_elements_end;
1943 
1944  // **** Consistency check ****
1945  // At this point, each process should have a copy of the data.
1946  // Verify this in some sort of round-about way
1947 # ifdef DEBUG
1948  replicated_across_communicator = true;
1949  const std::vector<char> packed_data = Utilities::pack(*this);
1950  const int hash =
1951  std::accumulate(packed_data.begin(), packed_data.end(), int(0));
1952  Assert(Utilities::MPI::max(hash, communicator) == hash, ExcInternalError());
1953 # endif
1954 
1955 # else
1956  // No MPI -> nothing to replicate
1957  (void)communicator;
1958  (void)root_process;
1959 # endif
1960 }
1961 
1962 
1963 
1964 template <class T>
1965 inline void
1967 {
1968  // Swap the data in the 'elements' objects. Then also make sure that
1969  // their respective deleter objects point to the right place.
1970  std::swap(elements, vec.elements);
1971  elements.get_deleter().reset_owning_object(this);
1972  vec.elements.get_deleter().reset_owning_object(&vec);
1973 
1974  // Now also swap the remaining members.
1975  std::swap(used_elements_end, vec.used_elements_end);
1976  std::swap(allocated_elements_end, vec.allocated_elements_end);
1977 }
1978 
1979 
1980 
1981 template <class T>
1982 inline bool
1984 {
1985  return used_elements_end == elements.get();
1986 }
1987 
1988 
1989 
1990 template <class T>
1991 inline typename AlignedVector<T>::size_type
1992 AlignedVector<T>::size() const
1993 {
1994  return used_elements_end - elements.get();
1995 }
1996 
1997 
1998 
1999 template <class T>
2000 inline typename AlignedVector<T>::size_type
2002 {
2003  return allocated_elements_end - elements.get();
2004 }
2005 
2006 
2007 
2008 template <class T>
2009 inline typename AlignedVector<T>::reference
2011 {
2012  AssertIndexRange(index, size());
2013  return elements[index];
2014 }
2015 
2016 
2017 
2018 template <class T>
2019 inline typename AlignedVector<T>::const_reference
2020 AlignedVector<T>::operator[](const size_type index) const
2021 {
2022  AssertIndexRange(index, size());
2023  return elements[index];
2024 }
2025 
2026 
2027 
2028 template <typename T>
2029 inline typename AlignedVector<T>::pointer
2031 {
2032  return elements.get();
2033 }
2034 
2035 
2036 
2037 template <typename T>
2038 inline typename AlignedVector<T>::const_pointer
2039 AlignedVector<T>::data() const
2040 {
2041  return elements.get();
2042 }
2043 
2044 
2045 
2046 template <class T>
2047 inline typename AlignedVector<T>::iterator
2049 {
2050  return elements.get();
2051 }
2052 
2053 
2054 
2055 template <class T>
2056 inline typename AlignedVector<T>::iterator
2058 {
2059  return used_elements_end;
2060 }
2061 
2062 
2063 
2064 template <class T>
2065 inline typename AlignedVector<T>::const_iterator
2067 {
2068  return elements.get();
2069 }
2070 
2071 
2072 
2073 template <class T>
2074 inline typename AlignedVector<T>::const_iterator
2075 AlignedVector<T>::end() const
2076 {
2077  return used_elements_end;
2078 }
2079 
2080 
2081 
2082 template <class T>
2083 template <class Archive>
2084 inline void
2085 AlignedVector<T>::save(Archive &ar, const unsigned int) const
2086 {
2087  size_type vec_size = size();
2088  ar &vec_size;
2089  if (vec_size > 0)
2090  ar &boost::serialization::make_array(elements.get(), vec_size);
2091 }
2092 
2093 
2094 
2095 template <class T>
2096 template <class Archive>
2097 inline void
2098 AlignedVector<T>::load(Archive &ar, const unsigned int)
2099 {
2100  size_type vec_size = 0;
2101  ar &vec_size;
2102 
2103  if (vec_size > 0)
2104  {
2105  reserve(vec_size);
2106  ar &boost::serialization::make_array(elements.get(), vec_size);
2107  used_elements_end = elements.get() + vec_size;
2108  }
2109 }
2110 
2111 
2112 
2113 template <class T>
2114 inline typename AlignedVector<T>::size_type
2116 {
2117  size_type memory = sizeof(*this);
2118  for (const T *t = elements.get(); t != used_elements_end; ++t)
2120  memory += sizeof(T) * (allocated_elements_end - used_elements_end);
2121  return memory;
2122 }
2123 
2124 
2125 #endif // ifndef DOXYGEN
2126 
2127 
2133 template <class T>
2134 bool
2136 {
2137  if (lhs.size() != rhs.size())
2138  return false;
2139  for (typename AlignedVector<T>::const_iterator lit = lhs.begin(),
2140  rit = rhs.begin();
2141  lit != lhs.end();
2142  ++lit, ++rit)
2143  if (*lit != *rit)
2144  return false;
2145  return true;
2146 }
2147 
2148 
2149 
2155 template <class T>
2156 bool
2158 {
2159  return !(operator==(lhs, rhs));
2160 }
2161 
2162 
2164 
2165 #endif
bool operator==(const AlignedVector< T > &lhs, const AlignedVector< T > &rhs)
virtual void delete_array(const AlignedVector< T > *owning_aligned_vector, T *ptr)=0
virtual void delete_array(const AlignedVector< T > *aligned_vector, T *ptr)
MPISharedMemDeleterAction(const bool is_shmem_root, T *aligned_shmem_pointer, MPI_Comm shmem_group_communicator, MPI_Win shmem_window)
void operator()(T *ptr)
Deleter(AlignedVector< T > *owning_object, const bool is_shmem_root, T *aligned_shmem_pointer, MPI_Comm shmem_group_communicator, MPI_Win shmem_window)
Deleter(AlignedVector< T > *owning_object)
std::unique_ptr< DeleterActionBase > deleter_action_object
void reset_owning_object(const AlignedVector< T > *new_aligned_vector_ptr)
const AlignedVector< T > * owning_aligned_vector
iterator end()
void replicate_across_communicator(const MPI_Comm communicator, const unsigned int root_process)
size_type memory_consumption() const
void resize_fast(const size_type new_size)
std::unique_ptr< T[], Deleter > elements
reference operator[](const size_type index)
void fill(const T &element)
AlignedVector & operator=(AlignedVector< T > &&vec) noexcept
iterator begin()
const_iterator end() const
~AlignedVector()=default
AlignedVector(AlignedVector< T > &&vec) noexcept
void reserve(const size_type new_allocated_size)
void serialize(Archive &archive, const unsigned int version)
void shrink_to_fit()
bool operator!=(const AlignedVector< T > &lhs, const AlignedVector< T > &rhs)
const_reference operator[](const size_type index) const
pointer data()
void swap(AlignedVector< T > &vec)
void resize(const size_type new_size, const T &init)
size_type capacity() const
value_type & reference
const value_type * const_pointer
bool replicated_across_communicator
void push_back(const T in_data)
AlignedVector & operator=(const AlignedVector< T > &vec)
const_iterator begin() const
AlignedVector(const size_type size, const T &init=T())
T * allocated_elements_end
bool empty() const
AlignedVector(const AlignedVector< T > &vec)
size_type size() const
const_reference back() const
std::size_t size_type
bool operator==(const AlignedVector< T > &lhs, const AlignedVector< T > &rhs)
const value_type * const_iterator
void resize(const size_type new_size)
void load(Archive &ar, const unsigned int version)
void allocate_and_move(const size_t old_size, const size_t new_size, const size_t new_allocated_size)
void save(Archive &ar, const unsigned int version) const
void insert_back(ForwardIterator begin, ForwardIterator end)
const value_type & const_reference
reference back()
const_pointer data() const
value_type * pointer
value_type * iterator
virtual void apply_to_subrange(const std::size_t begin, const std::size_t end) const override
AlignedVectorCopyConstruct(const T *const source_begin, const T *const source_end, T *const destination)
static const std::size_t minimum_parallel_grain_size
void default_construct_or_assign(const std::size_t begin, const std::size_t end, std::integral_constant< bool, true >) const
void default_construct_or_assign(const std::size_t begin, const std::size_t end, std::integral_constant< bool, false >) const
AlignedVectorDefaultInitialize(const std::size_t size, T *const destination)
static const std::size_t minimum_parallel_grain_size
virtual void apply_to_subrange(const std::size_t begin, const std::size_t end) const override
void copy_construct_or_assign(const std::size_t begin, const std::size_t end, std::integral_constant< bool, true >) const
static const std::size_t minimum_parallel_grain_size
virtual void apply_to_subrange(const std::size_t begin, const std::size_t end) const override
void copy_construct_or_assign(const std::size_t begin, const std::size_t end, std::integral_constant< bool, false >) const
AlignedVectorInitialize(const std::size_t size, const T &element, T *const destination)
static const std::size_t minimum_parallel_grain_size
AlignedVectorMoveConstruct(T *const source_begin, T *const source_end, T *const destination)
virtual void apply_to_subrange(const std::size_t begin, const std::size_t end) const override
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:477
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:478
static ::ExceptionBase & ExcAlignedVectorChangeAfterReplication()
static ::ExceptionBase & ExcInternalError()
#define Assert(cond, exc)
Definition: exceptions.h:1616
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1916
#define AssertIndexRange(index, range)
Definition: exceptions.h:1857
#define DeclExceptionMsg(Exception, defaulttext)
Definition: exceptions.h:490
static const types::blas_int zero
static const char T
types::global_dof_index size_type
Definition: cuda_kernels.h:45
std::enable_if_t< std::is_fundamental_v< T >, std::size_t > memory_consumption(const T &t)
void swap(MemorySpaceData< T, MemorySpace > &u, MemorySpaceData< T, MemorySpace > &v)
std::string to_string(const T &t)
Definition: patterns.h:2391
VectorType::value_type * begin(VectorType &V)
VectorType::value_type * end(VectorType &V)
void free(T *&pointer)
Definition: cuda.h:97
std::enable_if_t< is_mpi_type< T >==false, T > broadcast(const MPI_Comm comm, const T &object_to_send, const unsigned int root_process=0)
T max(const T &t, const MPI_Comm mpi_communicator)
T min(const T &t, const MPI_Comm mpi_communicator)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition: mpi.cc:164
void free_communicator(MPI_Comm mpi_communicator)
Definition: mpi.cc:211
void posix_memalign(void **memptr, std::size_t alignment, std::size_t size)
Definition: utilities.cc:1045
size_t pack(const T &object, std::vector< char > &dest_buffer, const bool allow_compression=true)
Definition: utilities.h:1343
void apply_parallel(const std::size_t begin, const std::size_t end, const std::size_t minimum_parallel_grain_size) const
Definition: parallel.h:743