Reference documentation for deal.II version Git c633c6cdfb 2022-01-24 16:30:41 -0500
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
aligned_vector.h
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2011 - 2021 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #ifndef dealii_aligned_vector_h
18 #define dealii_aligned_vector_h
19 
20 #include <deal.II/base/config.h>
21 
24 #include <deal.II/base/mpi.h>
25 #include <deal.II/base/parallel.h>
26 #include <deal.II/base/utilities.h>
27 
28 // boost::serialization::make_array used to be in array.hpp, but was
29 // moved to a different file in BOOST 1.64
30 #include <boost/version.hpp>
31 #if BOOST_VERSION >= 106400
32 # include <boost/serialization/array_wrapper.hpp>
33 #else
34 # include <boost/serialization/array.hpp>
35 #endif
36 #include <boost/serialization/split_member.hpp>
37 
38 #include <cstring>
39 #include <memory>
40 #include <type_traits>
41 
42 
43 
45 
46 
60 template <class T>
62 {
63 public:
68  using value_type = T;
69  using pointer = value_type *;
70  using const_pointer = const value_type *;
71  using iterator = value_type *;
72  using const_iterator = const value_type *;
73  using reference = value_type &;
74  using const_reference = const value_type &;
75  using size_type = std::size_t;
76 
80  AlignedVector();
81 
88  explicit AlignedVector(const size_type size, const T &init = T());
89 
93  ~AlignedVector() = default;
94 
100  AlignedVector(const AlignedVector<T> &vec);
101 
106  AlignedVector(AlignedVector<T> &&vec) noexcept;
107 
113  AlignedVector &
114  operator=(const AlignedVector<T> &vec);
115 
119  AlignedVector &
120  operator=(AlignedVector<T> &&vec) noexcept;
121 
144  void
145  resize_fast(const size_type new_size);
146 
159  void
160  resize(const size_type new_size);
161 
177  void
178  resize(const size_type new_size, const T &init);
179 
200  void
201  reserve(const size_type new_allocated_size);
202 
207  void
208  clear();
209 
215  void
216  push_back(const T in_data);
217 
221  reference
222  back();
223 
228  back() const;
229 
234  template <typename ForwardIterator>
235  void
236  insert_back(ForwardIterator begin, ForwardIterator end);
237 
247  void
248  fill();
249 
258  void
259  fill(const T &element);
260 
348  void
349  replicate_across_communicator(const MPI_Comm & communicator,
350  const unsigned int root_process);
351 
355  void
356  swap(AlignedVector<T> &vec);
357 
361  bool
362  empty() const;
363 
367  size_type
368  size() const;
369 
374  size_type
375  capacity() const;
376 
380  reference
381  operator[](const size_type index);
382 
387  operator[](const size_type index) const;
388 
392  pointer
393  data();
394 
399  data() const;
400 
404  iterator
405  begin();
406 
410  iterator
411  end();
412 
417  begin() const;
418 
423  end() const;
424 
430  size_type
431  memory_consumption() const;
432 
438  template <class Archive>
439  void
440  save(Archive &ar, const unsigned int version) const;
441 
447  template <class Archive>
448  void
449  load(Archive &ar, const unsigned int version);
450 
451 #ifdef DOXYGEN
452 
457  template <class Archive>
458  void
459  serialize(Archive &archive, const unsigned int version);
460 #else
461  // This macro defines the serialize() method that is compatible with
462  // the templated save() and load() method that have been implemented.
463  BOOST_SERIALIZATION_SPLIT_MEMBER()
464 #endif
465 
466 private:
558  class Deleter
559  {
560  public:
566  Deleter(AlignedVector<T> *owning_object);
567 
568 #ifdef DEAL_II_WITH_MPI
569 
576  Deleter(AlignedVector<T> *owning_object,
577  const bool is_shmem_root,
578  T * aligned_shmem_pointer,
579  MPI_Comm shmem_group_communicator,
580  MPI_Win shmem_window);
581 #endif
582 
588  void
589  operator()(T *ptr);
590 
598  void
599  reset_owning_object(const AlignedVector<T> *new_aligned_vector_ptr);
600 
601  private:
606  {
607  public:
611  virtual ~DeleterActionBase() = default;
612 
618  virtual void
620  };
621 
622 #ifdef DEAL_II_WITH_MPI
623 
629  {
630  public:
635  MPISharedMemDeleterAction(const bool is_shmem_root,
636  T * aligned_shmem_pointer,
637  MPI_Comm shmem_group_communicator,
638  MPI_Win shmem_window);
639 
645  virtual void
646  delete_array(const AlignedVector<T> *aligned_vector, T *ptr);
647 
648  private:
653  const bool is_shmem_root;
656  MPI_Win shmem_window;
657  };
658 #endif
659 
664  std::unique_ptr<DeleterActionBase> deleter_action_object;
665 
671  };
672 
676  std::unique_ptr<T[], Deleter> elements;
677 
682 
687 };
688 
689 
690 // ------------------------------- inline functions --------------------------
691 
697 namespace internal
698 {
717  template <typename T>
720  {
721  static const std::size_t minimum_parallel_grain_size =
722  160000 / sizeof(T) + 1;
723 
724  public:
734  AlignedVectorCopyConstruct(const T *const source_begin,
735  const T *const source_end,
736  T *const destination)
737  : source_(source_begin)
738  , destination_(destination)
739  {
740  Assert(source_end >= source_begin, ExcInternalError());
741  Assert(source_end == source_begin || destination != nullptr,
742  ExcInternalError());
743  const std::size_t size = source_end - source_begin;
744  if (size < minimum_parallel_grain_size)
745  AlignedVectorCopyConstruct::apply_to_subrange(0, size);
746  else
747  apply_parallel(0, size, minimum_parallel_grain_size);
748  }
749 
754  virtual void
755  apply_to_subrange(const std::size_t begin,
756  const std::size_t end) const override
757  {
758  if (end == begin)
759  return;
760 
761  // for classes trivial assignment can use memcpy. cast element to
762  // (void*) to silence compiler warning for virtual classes (they will
763  // never arrive here because they are non-trivial).
764 
765  if (std::is_trivial<T>::value == true)
766  std::memcpy(static_cast<void *>(destination_ + begin),
767  static_cast<const void *>(source_ + begin),
768  (end - begin) * sizeof(T));
769  else
770  for (std::size_t i = begin; i < end; ++i)
771  new (&destination_[i]) T(source_[i]);
772  }
773 
774  private:
775  const T *const source_;
776  T *const destination_;
777  };
778 
779 
786  template <typename T>
789  {
790  static const std::size_t minimum_parallel_grain_size =
791  160000 / sizeof(T) + 1;
792 
793  public:
803  AlignedVectorMoveConstruct(T *const source_begin,
804  T *const source_end,
805  T *const destination)
806  : source_(source_begin)
807  , destination_(destination)
808  {
809  Assert(source_end >= source_begin, ExcInternalError());
810  Assert(source_end == source_begin || destination != nullptr,
811  ExcInternalError());
812  const std::size_t size = source_end - source_begin;
813  if (size < minimum_parallel_grain_size)
814  AlignedVectorMoveConstruct::apply_to_subrange(0, size);
815  else
816  apply_parallel(0, size, minimum_parallel_grain_size);
817  }
818 
823  virtual void
824  apply_to_subrange(const std::size_t begin,
825  const std::size_t end) const override
826  {
827  if (end == begin)
828  return;
829 
830  // Classes with trivial assignment can use memcpy. cast element to
831  // (void*) to silence compiler warning for virtual classes (they will
832  // never arrive here because they are non-trivial).
833  if (std::is_trivial<T>::value == true)
834  std::memcpy(static_cast<void *>(destination_ + begin),
835  static_cast<void *>(source_ + begin),
836  (end - begin) * sizeof(T));
837  else
838  // For everything else just use the move constructor. The original
839  // object remains alive and will be destroyed elsewhere.
840  for (std::size_t i = begin; i < end; ++i)
841  new (&destination_[i]) T(std::move(source_[i]));
842  }
843 
844  private:
845  T *const source_;
846  T *const destination_;
847  };
848 
849 
867  template <typename T, bool initialize_memory>
869  {
870  static const std::size_t minimum_parallel_grain_size =
871  160000 / sizeof(T) + 1;
872 
873  public:
878  AlignedVectorInitialize(const std::size_t size,
879  const T & element,
880  T *const destination)
881  : element_(element)
882  , destination_(destination)
883  , trivial_element(false)
884  {
885  if (size == 0)
886  return;
887  Assert(destination != nullptr, ExcInternalError());
888 
889  // do not use memcmp for long double because on some systems it does not
890  // completely fill its memory and may lead to false positives in
891  // e.g. valgrind
892  if (std::is_trivial<T>::value == true &&
893  std::is_same<T, long double>::value == false)
894  {
895  const unsigned char zero[sizeof(T)] = {};
896  // cast element to (void*) to silence compiler warning for virtual
897  // classes (they will never arrive here because they are
898  // non-trivial).
899  if (std::memcmp(zero,
900  static_cast<const void *>(&element),
901  sizeof(T)) == 0)
902  trivial_element = true;
903  }
904  if (size < minimum_parallel_grain_size)
905  AlignedVectorInitialize::apply_to_subrange(0, size);
906  else
907  apply_parallel(0, size, minimum_parallel_grain_size);
908  }
909 
913  virtual void
914  apply_to_subrange(const std::size_t begin,
915  const std::size_t end) const override
916  {
917  // for classes with trivial assignment of zero can use memset. cast
918  // element to (void*) to silence compiler warning for virtual
919  // classes (they will never arrive here because they are
920  // non-trivial).
921  if (std::is_trivial<T>::value == true && trivial_element)
922  std::memset(static_cast<void *>(destination_ + begin),
923  0,
924  (end - begin) * sizeof(T));
925  else
926  copy_construct_or_assign(
927  begin, end, std::integral_constant<bool, initialize_memory>());
928  }
929 
930  private:
931  const T & element_;
932  mutable T *destination_;
934 
935  // copy assignment operation
936  void
937  copy_construct_or_assign(const std::size_t begin,
938  const std::size_t end,
939  std::integral_constant<bool, false>) const
940  {
941  for (std::size_t i = begin; i < end; ++i)
942  destination_[i] = element_;
943  }
944 
945  // copy constructor (memory initialization)
946  void
947  copy_construct_or_assign(const std::size_t begin,
948  const std::size_t end,
949  std::integral_constant<bool, true>) const
950  {
951  for (std::size_t i = begin; i < end; ++i)
952  new (&destination_[i]) T(element_);
953  }
954  };
955 
956 
957 
970  template <typename T, bool initialize_memory>
973  {
974  static const std::size_t minimum_parallel_grain_size =
975  160000 / sizeof(T) + 1;
976 
977  public:
982  AlignedVectorDefaultInitialize(const std::size_t size, T *const destination)
983  : destination_(destination)
984  {
985  if (size == 0)
986  return;
987  Assert(destination != nullptr, ExcInternalError());
988 
989  if (size < minimum_parallel_grain_size)
990  AlignedVectorDefaultInitialize::apply_to_subrange(0, size);
991  else
992  apply_parallel(0, size, minimum_parallel_grain_size);
993  }
994 
998  virtual void
999  apply_to_subrange(const std::size_t begin,
1000  const std::size_t end) const override
1001  {
1002  // for classes with trivial assignment of zero can use memset. cast
1003  // element to (void*) to silence compiler warning for virtual
1004  // classes (they will never arrive here because they are
1005  // non-trivial).
1006  if (std::is_trivial<T>::value == true)
1007  std::memset(static_cast<void *>(destination_ + begin),
1008  0,
1009  (end - begin) * sizeof(T));
1010  else
1011  default_construct_or_assign(
1012  begin, end, std::integral_constant<bool, initialize_memory>());
1013  }
1014 
1015  private:
1016  mutable T *destination_;
1017 
1018  // copy assignment operation
1019  void
1021  const std::size_t end,
1022  std::integral_constant<bool, false>) const
1023  {
1024  for (std::size_t i = begin; i < end; ++i)
1025  destination_[i] = std::move(T());
1026  }
1027 
1028  // copy constructor (memory initialization)
1029  void
1031  const std::size_t end,
1032  std::integral_constant<bool, true>) const
1033  {
1034  for (std::size_t i = begin; i < end; ++i)
1035  new (&destination_[i]) T;
1036  }
1037  };
1038 
1039 } // end of namespace internal
1040 
1041 
1042 #ifndef DOXYGEN
1043 
1044 
1045 
1046 template <typename T>
1048  : deleter_action_object(nullptr) // encode default action by using a nullptr
1049  , owning_aligned_vector(owning_object)
1050 {}
1051 
1052 
1053 # ifdef DEAL_II_WITH_MPI
1054 
1055 template <typename T>
1057  const bool is_shmem_root,
1058  T * aligned_shmem_pointer,
1059  MPI_Comm shmem_group_communicator,
1060  MPI_Win shmem_window)
1062  std::make_unique<MPISharedMemDeleterAction>(is_shmem_root,
1063  aligned_shmem_pointer,
1064  shmem_group_communicator,
1065  shmem_window))
1066  , owning_aligned_vector(owning_object)
1067 {}
1068 # endif
1069 
1070 
1071 template <typename T>
1072 inline void
1074 {
1075  // If no special action has been registered (i.e., if the action pointer is
1076  // nullptr), then just perform the default action right here.
1077  if (deleter_action_object == nullptr)
1078  {
1079  if (ptr != nullptr)
1080  {
1081  Assert(owning_aligned_vector->used_elements_end != nullptr,
1082  ExcInternalError());
1083 
1084  if (std::is_trivial<T>::value == false)
1085  for (T *p = owning_aligned_vector->used_elements_end - 1; p >= ptr;
1086  --p)
1087  p->~T();
1088 
1089  std::free(ptr);
1090  }
1091  }
1092  else
1093  // Otherwise, let the action object do what is necessary
1094  deleter_action_object->delete_array(owning_aligned_vector, ptr);
1095 }
1096 
1097 
1098 
1099 template <typename T>
1100 inline void
1102  const AlignedVector<T> *new_aligned_vector_ptr)
1103 {
1104  owning_aligned_vector = new_aligned_vector_ptr;
1105 }
1106 
1107 
1108 # ifdef DEAL_II_WITH_MPI
1109 
1110 template <typename T>
1112  MPISharedMemDeleterAction(const bool is_shmem_root,
1113  T * aligned_shmem_pointer,
1114  MPI_Comm shmem_group_communicator,
1115  MPI_Win shmem_window)
1116  : is_shmem_root(is_shmem_root)
1117  , aligned_shmem_pointer(aligned_shmem_pointer)
1118  , shmem_group_communicator(shmem_group_communicator)
1119  , shmem_window(shmem_window)
1120 {}
1121 
1122 
1123 
1124 template <typename T>
1125 inline void
1127  const AlignedVector<T> *aligned_vector,
1128  T * ptr)
1129 {
1130  (void)ptr;
1131  Assert(aligned_vector->elements.get() == ptr, ExcInternalError());
1132 
1133  if (is_shmem_root)
1134  if (std::is_trivial<T>::value == false)
1135  for (T *p = aligned_vector->used_elements_end - 1;
1136  p >= aligned_vector->elements.get();
1137  --p)
1138  p->~T();
1139 
1140  int ierr;
1141  ierr = MPI_Win_free(&shmem_window);
1142  AssertThrowMPI(ierr);
1143 
1144  Utilities::MPI::free_communicator(shmem_group_communicator);
1145 }
1146 
1147 # endif
1148 
1149 
1150 template <class T>
1152  : elements(nullptr, Deleter(this))
1153  , used_elements_end(nullptr)
1154  , allocated_elements_end(nullptr)
1155 {}
1156 
1157 
1158 
1159 template <class T>
1160 inline AlignedVector<T>::AlignedVector(const size_type size, const T &init)
1161  : elements(nullptr, Deleter(this))
1162  , used_elements_end(nullptr)
1163  , allocated_elements_end(nullptr)
1164 {
1165  if (size > 0)
1166  resize(size, init);
1167 }
1168 
1169 
1170 
1171 template <class T>
1173  : elements(nullptr, Deleter(this))
1174  , used_elements_end(nullptr)
1175  , allocated_elements_end(nullptr)
1176 {
1177  // copy the data from vec
1178  reserve(vec.size());
1180  internal::AlignedVectorCopyConstruct<T>(vec.elements.get(),
1181  vec.used_elements_end,
1182  elements.get());
1183 }
1184 
1185 
1186 
1187 template <class T>
1189  : AlignedVector<T>()
1190 {
1191  // forward to the move operator
1192  *this = std::move(vec);
1193 }
1194 
1195 
1196 
1197 template <class T>
1198 inline AlignedVector<T> &
1200 {
1201  const size_type new_size = vec.used_elements_end - vec.elements.get();
1202 
1203  // First throw away everything and re-allocate memory but leave that
1204  // memory uninitialized for now:
1205  resize(0);
1206  reserve(new_size);
1207 
1208  // Then copy the elements over by using the copy constructor on these
1209  // elements:
1210  internal::AlignedVectorCopyConstruct<T>(vec.elements.get(),
1211  vec.used_elements_end,
1212  elements.get());
1213 
1214  // Finally adjust the pointer to the end of the elements that are used:
1215  used_elements_end = elements.get() + new_size;
1216 
1217  return *this;
1218 }
1219 
1220 
1221 
1222 template <class T>
1223 inline AlignedVector<T> &
1225 {
1226  clear();
1227 
1228  // Move the actual data in the 'elements' object. One problem is that this
1229  // also moves the deleter object, but the deleter object
1230  // references 'this' (i.e., the 'this' pointer of the *moved-from*
1231  // object). The way this is implemented is that we have to move the
1232  // deleter as well, and then reset the pointer inside the deleter
1233  // that references the outer object.
1234  elements = std::move(vec.elements);
1235  elements.get_deleter().reset_owning_object(this);
1236 
1237  // Then also steal the other pointers and clear them in the original object:
1238  used_elements_end = vec.used_elements_end;
1239  allocated_elements_end = vec.allocated_elements_end;
1240 
1241  vec.used_elements_end = nullptr;
1242  vec.allocated_elements_end = nullptr;
1243 
1244  return *this;
1245 }
1246 
1247 
1248 
1249 template <class T>
1250 inline void
1252 {
1253  const size_type old_size = size();
1254 
1255  if (new_size == 0)
1256  clear();
1257  else if (new_size == old_size)
1258  {} // nothing to do here
1259  else if (new_size < old_size)
1260  {
1261  // call destructor on fields that are released, if the type requires it.
1262  // doing it backward releases the elements in reverse order as compared to
1263  // how they were created
1264  if (std::is_trivial<T>::value == false)
1265  for (T *p = used_elements_end - 1; p >= elements.get() + new_size; --p)
1266  p->~T();
1267  used_elements_end = elements.get() + new_size;
1268  }
1269  else // new_size > old_size
1270  {
1271  // Allocate more space, and claim that space as used
1272  reserve(new_size);
1273  used_elements_end = elements.get() + new_size;
1274 
1275  // need to still set the values in case the class is non-trivial because
1276  // virtual classes etc. need to run their (default) constructor
1277  if (std::is_trivial<T>::value == false)
1279  new_size - old_size, elements.get() + old_size);
1280  }
1281 }
1282 
1283 
1284 
1285 template <class T>
1286 inline void
1287 AlignedVector<T>::resize(const size_type new_size)
1288 {
1289  const size_type old_size = size();
1290 
1291  if (new_size == 0)
1292  clear();
1293  else if (new_size == old_size)
1294  {} // nothing to do here
1295  else if (new_size < old_size)
1296  {
1297  // call destructor on fields that are released, if the type requires it.
1298  // doing it backward releases the elements in reverse order as compared to
1299  // how they were created
1300  if (std::is_trivial<T>::value == false)
1301  for (T *p = used_elements_end - 1; p >= elements.get() + new_size; --p)
1302  p->~T();
1303  used_elements_end = elements.get() + new_size;
1304  }
1305  else // new_size > old_size
1306  {
1307  // Allocate more space, and claim that space as used
1308  reserve(new_size);
1309  used_elements_end = elements.get() + new_size;
1310 
1311  // finally set the values to the default initializer
1313  new_size - old_size, elements.get() + old_size);
1314  }
1315 }
1316 
1317 
1318 
1319 template <class T>
1320 inline void
1321 AlignedVector<T>::resize(const size_type new_size, const T &init)
1322 {
1323  const size_type old_size = size();
1324 
1325  if (new_size == 0)
1326  clear();
1327  else if (new_size == old_size)
1328  {} // nothing to do here
1329  else if (new_size < old_size)
1330  {
1331  // call destructor on fields that are released, if the type requires it.
1332  // doing it backward releases the elements in reverse order as compared to
1333  // how they were created
1334  if (std::is_trivial<T>::value == false)
1335  for (T *p = used_elements_end - 1; p >= elements.get() + new_size; --p)
1336  p->~T();
1337  used_elements_end = elements.get() + new_size;
1338  }
1339  else // new_size > old_size
1340  {
1341  // Allocate more space, and claim that space as used
1342  reserve(new_size);
1343  used_elements_end = elements.get() + new_size;
1344 
1345  // finally set the desired init values
1347  new_size - old_size, init, elements.get() + old_size);
1348  }
1349 }
1350 
1351 
1352 
1353 template <class T>
1354 inline void
1355 AlignedVector<T>::reserve(const size_type new_allocated_size)
1356 {
1357  const size_type old_size = used_elements_end - elements.get();
1358  const size_type old_allocated_size = allocated_elements_end - elements.get();
1359  if (new_allocated_size > old_allocated_size)
1360  {
1361  // if we continuously increase the size of the vector, we might be
1362  // reallocating a lot of times. therefore, try to increase the size more
1363  // aggressively
1364  const size_type new_size =
1365  std::max(new_allocated_size, 2 * old_allocated_size);
1366 
1367  // allocate and align along 64-byte boundaries (this is enough for all
1368  // levels of vectorization currently supported by deal.II)
1369  T *new_data_ptr;
1371  reinterpret_cast<void **>(&new_data_ptr), 64, new_size * sizeof(T));
1372 
1373  // Now create a deleter that encodes what should happen when the object is
1374  // released: We need to destroy the objects that are currently alive (in
1375  // reverse order, and then release the memory. Note that we catch the
1376  // 'this' pointer because the number of elements currently alive might
1377  // change over time.
1378  Deleter deleter(this);
1379 
1380  // copy whatever elements we need to retain
1381  if (new_allocated_size > 0)
1383  elements.get(), elements.get() + old_size, new_data_ptr);
1384 
1385  // Now reset all of the member variables of the current object
1386  // based on the allocation above. Assigning to a std::unique_ptr
1387  // object also releases the previously pointed to memory.
1388  //
1389  // Note that at the time of releasing the old memory, 'used_elements_end'
1390  // still points to its previous value, and this is important for the
1391  // deleter object of the previously allocated array (see how it loops over
1392  // the to-be-destroyed elements a the Deleter::DefaultDeleterAction
1393  // class).
1394  elements = decltype(elements)(new_data_ptr, std::move(deleter));
1395  used_elements_end = elements.get() + old_size;
1396  allocated_elements_end = elements.get() + new_size;
1397  }
1398  else if (new_allocated_size == 0)
1399  clear();
1400  else // size_alloc < allocated_size
1401  {} // nothing to do here
1402 }
1403 
1404 
1405 
1406 template <class T>
1407 inline void
1409 {
1410  // Just release the memory (which also calls the destructor of the elements),
1411  // and then set the auxiliary pointers to invalid values.
1412  //
1413  // Note that at the time of releasing the old memory, 'used_elements_end'
1414  // still points to its previous value, and this is important for the
1415  // deleter object of the previously allocated array (see how it loops over
1416  // the to-be-destroyed elements a few lines above).
1417  elements.reset();
1418  used_elements_end = nullptr;
1419  allocated_elements_end = nullptr;
1420 }
1421 
1422 
1423 
1424 template <class T>
1425 inline void
1426 AlignedVector<T>::push_back(const T in_data)
1427 {
1430  reserve(std::max(2 * capacity(), static_cast<size_type>(16)));
1431  if (std::is_trivial<T>::value == false)
1432  new (used_elements_end++) T(in_data);
1433  else
1434  *used_elements_end++ = in_data;
1435 }
1436 
1437 
1438 
1439 template <class T>
1440 inline typename AlignedVector<T>::reference
1442 {
1443  AssertIndexRange(0, size());
1444  T *field = used_elements_end - 1;
1445  return *field;
1446 }
1447 
1448 
1449 
1450 template <class T>
1451 inline typename AlignedVector<T>::const_reference
1452 AlignedVector<T>::back() const
1453 {
1454  AssertIndexRange(0, size());
1455  const T *field = used_elements_end - 1;
1456  return *field;
1457 }
1458 
1459 
1460 
1461 template <class T>
1462 template <typename ForwardIterator>
1463 inline void
1464 AlignedVector<T>::insert_back(ForwardIterator begin, ForwardIterator end)
1465 {
1466  const size_type old_size = size();
1467  reserve(old_size + (end - begin));
1468  for (; begin != end; ++begin, ++used_elements_end)
1469  {
1470  if (std::is_trivial<T>::value == false)
1471  new (used_elements_end) T;
1473  }
1474 }
1475 
1476 
1477 
1478 template <class T>
1479 inline void
1481 {
1483  elements.get());
1484 }
1485 
1486 
1487 
1488 template <class T>
1489 inline void
1491 {
1493  value,
1494  elements.get());
1495 }
1496 
1497 
1498 
1499 template <class T>
1500 inline void
1502  const unsigned int root_process)
1503 {
1504 # ifdef DEAL_II_WITH_MPI
1505 # if DEAL_II_MPI_VERSION_GTE(3, 0)
1506 
1507  // **** Step 0 ****
1508  // All but the root process no longer need their data, so release the memory
1509  // used to store the previous elements.
1510  if (Utilities::MPI::this_mpi_process(communicator) != root_process)
1511  {
1512  elements.reset();
1513  used_elements_end = nullptr;
1514  allocated_elements_end = nullptr;
1515  }
1516 
1517  // **** Step 1 ****
1518  // Create communicators for each group of processes that can use
1519  // shared memory areas. Within each of these groups, we don't care about
1520  // which rank each of the old processes gets except that we would like to
1521  // make sure that the (global) root process will have rank=0 within
1522  // its own sub-communicator. We can do that through the third argument of
1523  // MPI_Comm_split_type (the "key") which is an integer meant to indicate the
1524  // order of processes within the split communicators, and we should set it to
1525  // zero for the root processes and one for all others -- which means that
1526  // for all of these other processes, MPI can choose whatever order it
1527  // wants because they have the same key (MPI then documents that these ties
1528  // will be broken according to these processes' rank in the old group).
1529  //
1530  // At least that's the theory. In practice, the MPI implementation where
1531  // this function was developed on does not seem to do that. (Bug report
1532  // is here: https://github.com/open-mpi/ompi/issues/8854)
1533  // We work around this by letting MPI_Comm_split_type choose whatever
1534  // rank it wants, and then reshuffle with MPI_Comm_split in a second
1535  // step -- not elegant, nor efficient, but seems to work:
1536  MPI_Comm shmem_group_communicator;
1537  {
1538  MPI_Comm shmem_group_communicator_temp;
1539  int ierr = MPI_Comm_split_type(communicator,
1540  MPI_COMM_TYPE_SHARED,
1541  /* key */ 0,
1542  MPI_INFO_NULL,
1543  &shmem_group_communicator_temp);
1544  AssertThrowMPI(ierr);
1545 
1546  const int key =
1547  (Utilities::MPI::this_mpi_process(communicator) == root_process ? 0 : 1);
1548  ierr = MPI_Comm_split(shmem_group_communicator_temp,
1549  /* color */ 0,
1550  key,
1551  &shmem_group_communicator);
1552  AssertThrowMPI(ierr);
1553 
1554  // Verify the explanation from above
1555  if (Utilities::MPI::this_mpi_process(communicator) == root_process)
1556  Assert(Utilities::MPI::this_mpi_process(shmem_group_communicator) == 0,
1557  ExcInternalError());
1558 
1559  // And get rid of the temporary communicator
1560  Utilities::MPI::free_communicator(shmem_group_communicator_temp);
1561  }
1562  const bool is_shmem_root =
1563  Utilities::MPI::this_mpi_process(shmem_group_communicator) == 0;
1564 
1565  // **** Step 2 ****
1566  // We then have to send the state of the current object from the
1567  // root process to one exemplar in each shmem group. To this end,
1568  // we create another subcommunicator that includes the ranks zero
1569  // of all shmem groups, and because of the trick above, we know
1570  // that this also includes the original root process.
1571  //
1572  // There are different ways of creating a "shmem_roots_communicator".
1573  // The conceptually easiest way is to create an MPI_Group that only
1574  // includes the shmem roots and then create a communicator from this
1575  // via MPI_Comm_create or MPI_Comm_create_group. The problem
1576  // with this is that we would have to exchange among all processes
1577  // which ones are shmem roots and which are not. This is awkward.
1578  //
1579  // A simpler way is to use MPI_Comm_split that uses "colors" to
1580  // indicate which sub-communicator each process wants to be in.
1581  // We use color=0 to indicate the group of shmem roots, and color=1
1582  // for all other processes -- the latter will simply not ever do
1583  // anything among themselves with the communicator so created.
1584  //
1585  // Using MPI_Comm_split has the additional benefit that, just as above,
1586  // we can choose where each rank will end up in shmem_roots_communicator.
1587  // We again set key=0 for the original root_process, and key=1 for all other
1588  // ranks; then, the global root becomes rank=0 on the
1589  // shmem_roots_communicator. We don't care how the other processes are
1590  // ordered.
1591  MPI_Comm shmem_roots_communicator;
1592  {
1593  const int key =
1594  (Utilities::MPI::this_mpi_process(communicator) == root_process ? 0 : 1);
1595 
1596  const int ierr = MPI_Comm_split(communicator,
1597  /*color=*/
1598  (is_shmem_root ? 0 : 1),
1599  key,
1600  &shmem_roots_communicator);
1601  AssertThrowMPI(ierr);
1602 
1603  // Again verify the explanation from above
1604  if (Utilities::MPI::this_mpi_process(communicator) == root_process)
1605  Assert(Utilities::MPI::this_mpi_process(shmem_roots_communicator) == 0,
1606  ExcInternalError());
1607  }
1608 
1609  const unsigned int shmem_roots_root_rank = 0;
1610  const bool is_shmem_roots_root =
1611  (is_shmem_root && (Utilities::MPI::this_mpi_process(
1612  shmem_roots_communicator) == shmem_roots_root_rank));
1613 
1614  // Now let the original root_process broadcast the current object to all
1615  // shmem roots. We know that the last rank is the original root process that
1616  // has all of the data.
1617  if (is_shmem_root)
1618  {
1619  if (std::is_trivial<T>::value)
1620  {
1621  // The data is "trivial", i.e., we can copy things directly without
1622  // having to go through the serialization/deserialization machinery of
1623  // Utilities::MPI::broadcast.
1624  //
1625  // In that case, first tell all of the other shmem roots how many
1626  // elements we will have to deal with, and let them resize their
1627  // (non-shared) arrays.
1628  const size_type new_size =
1629  Utilities::MPI::broadcast(shmem_roots_communicator,
1630  size(),
1631  shmem_roots_root_rank);
1632  if (is_shmem_roots_root == false)
1633  resize(new_size);
1634 
1635  // Then directly copy from the root process into these buffers
1636  int ierr = MPI_Bcast(elements.get(),
1637  sizeof(T) * new_size,
1638  MPI_CHAR,
1639  shmem_roots_root_rank,
1640  shmem_roots_communicator);
1641  AssertThrowMPI(ierr);
1642  }
1643  else
1644  {
1645  // The objects to be sent around are not "trivial", and so we have
1646  // to go through the serialization/deserialization machinery. On all
1647  // but the sending process, overwrite the current state with the
1648  // vector just broadcast.
1649  //
1650  // On the root rank, this would lead to resetting the 'entries'
1651  // pointer, which would trigger the deleter which would lead to a
1652  // deadlock. So we just send the result of the broadcast() call to
1653  // nirvana on the root process and keep our current state.
1654  if (Utilities::MPI::this_mpi_process(shmem_roots_communicator) == 0)
1655  Utilities::MPI::broadcast(shmem_roots_communicator,
1656  *this,
1657  shmem_roots_root_rank);
1658  else
1659  *this = Utilities::MPI::broadcast(shmem_roots_communicator,
1660  *this,
1661  shmem_roots_root_rank);
1662  }
1663  }
1664 
1665  // We no longer need the shmem roots communicator, so get rid of it
1666  Utilities::MPI::free_communicator(shmem_group_communicator);
1667 
1668 
1669  // **** Step 3 ****
1670  // At this point, all shmem groups have one shmem root process that has
1671  // a copy of the data. This is the point where each shmem group should
1672  // establish a shmem area to put the data into. As mentioned above,
1673  // we know that the shmem roots are the last rank in their respective
1674  // shmem_group_communicator.
1675  //
1676  // The process for all of this works as follows: While all processes in
1677  // the shmem group participate in the generation of the shmem memory window,
1678  // only the shmem root actually allocates any memory -- the rest just
1679  // allocate zero bytes of their own. We allocate space for exactly
1680  // size() elements (computed on the shmem_root that already has the data)
1681  // and add however many bytes are necessary so that we know that we can align
1682  // things to 64-byte boundaries. The worst case happens if the memory system
1683  // gives us a pointer to an address one byte past a desired alignment
1684  // boundary, and in that case aligning the memory will require us to waste the
1685  // first (align_by-1) bytes. So we have to ask for
1686  // size() * sizeof(T) + (align_by - 1)
1687  // bytes.
1688  //
1689  // Before MPI 4.0, there was no way to specify that we want memory aligned to
1690  // a certain number of bytes. This is going to come back to bite us further
1691  // down below when we try to get a properly aligned pointer to our memory
1692  // region, see the commentary there. Starting with MPI 4.0, one can set a
1693  // flag in an MPI_Info structure that requests a desired alignment, so we do
1694  // this for forward compatibility; MPI implementations ignore flags they don't
1695  // know anything about, and so setting this flag is backward compatible also
1696  // to older MPI versions.
1697  //
1698  // There is one final piece we can already take care of here. At the beginning
1699  // of all of this, only the shmem_root knows how many elements there are in
1700  // the array. But at the end of it, all processes of course need to know. We
1701  // could put this information somewhere into the shmem area, along with the
1702  // other data, but that seems clumsy. It turns out that when calling
1703  // MPI_Win_allocate_shared, we are asked for the value of a parameter called
1704  // 'disp_unit' whose meaning is difficult to determine from the MPI
1705  // documentation, and that we do not actually need. So we "abuse" it a bit: On
1706  // the shmem root, we put the array size into it. Later on, the remaining
1707  // processes can query the shmem root's value of 'disp_unit', and so will be
1708  // able to learn about the array size that way.
1709  MPI_Win shmem_window;
1710  void * base_ptr;
1711  const MPI_Aint align_by = 64;
1712  const MPI_Aint alloc_size =
1713  Utilities::MPI::broadcast(shmem_group_communicator,
1714  (size() * sizeof(T) + (align_by - 1)),
1715  0);
1716 
1717  {
1718  const int disp_unit = (is_shmem_root ? size() : 1);
1719 
1720  int ierr;
1721 
1722  MPI_Info mpi_info;
1723  ierr = MPI_Info_create(&mpi_info);
1724  AssertThrowMPI(ierr);
1725  ierr = MPI_Info_set(mpi_info,
1726  "mpi_minimum_memory_alignment",
1727  std::to_string(align_by).c_str());
1728  AssertThrowMPI(ierr);
1729  ierr = MPI_Win_allocate_shared((is_shmem_root ? alloc_size : 0),
1730  disp_unit,
1731  mpi_info,
1732  shmem_group_communicator,
1733  &base_ptr,
1734  &shmem_window);
1735  AssertThrowMPI(ierr);
1736 
1737  ierr = MPI_Info_free(&mpi_info);
1738  AssertThrowMPI(ierr);
1739  }
1740 
1741 
1742  // **** Step 4 ****
1743  // The next step is to teach all non-shmem root processes what the pointer to
1744  // the array is that the shmem-root created. MPI has a nifty way for this
1745  // given that only a single process actually allocated memory in the window:
1746  // When calling MPI_Win_shared_query, the MPI documentation says that
1747  // "When rank is MPI_PROC_NULL, the pointer, disp_unit, and size returned are
1748  // the pointer, disp_unit, and size of the memory segment belonging the lowest
1749  // rank that specified size > 0. If all processes in the group attached to the
1750  // window specified size = 0, then the call returns size = 0 and a baseptr as
1751  // if MPI_ALLOC_MEM was called with size = 0."
1752  //
1753  // This will allow us to obtain the pointer to the shmem root's memory area,
1754  // which is the only one we care about. (None of the other processes have
1755  // even allocated any memory.) But this will also retrieve the shmem root's
1756  // disp_unit, which in step 3 above we have abused to pass along the number of
1757  // elements in the array.
1758  //
1759  // We don't need to do this on the shmem root process: This process has
1760  // already gotten its base_ptr correctly set above, and we can determine the
1761  // array size by just calling size().
1762  size_type array_size = (is_shmem_root ? size() : size_type(0));
1763  if (is_shmem_root == false)
1764  {
1765  int disp_unit;
1766  MPI_Aint alloc_size; // not actually used
1767  const int ierr = MPI_Win_shared_query(
1768  shmem_window, MPI_PROC_NULL, &alloc_size, &disp_unit, &base_ptr);
1769  AssertThrowMPI(ierr);
1770 
1771  // Make sure we actually got a pointer, and also unpack the array size as
1772  // discussed above.
1773  Assert(base_ptr != nullptr, ExcInternalError());
1774 
1775  array_size = disp_unit;
1776  }
1777 
1778 
1779  // **** Step 5 ****
1780  // Now that all processes know the address of the space that is visible to
1781  // everyone, we need to figure out whether it is properly aligned and if not,
1782  // find the next aligned address.
1783  //
1784  // std::align does that, but it also modifies its last two arguments. The
1785  // documentation of that function at
1786  // https://en.cppreference.com/w/cpp/memory/align is not entirely clear, but I
1787  // *think* that the following should do given that we do not use base_ptr and
1788  // available_space any further after the call to std::align.
1789  std::size_t available_space = alloc_size;
1790  void * base_ptr_backup = base_ptr;
1791  T * aligned_shmem_pointer = static_cast<T *>(
1792  std::align(align_by, array_size * sizeof(T), base_ptr, available_space));
1793  Assert(aligned_shmem_pointer != nullptr, ExcInternalError());
1794 
1795  // There is one step to guard against. It is *conceivable* that the base_ptr
1796  // we have previously obtained from MPI_Win_shared_query is mapped so
1797  // awkwardly into the different MPI processes' memory spaces that it is
1798  // aligned in one memory space, but not another. In that case, different
1799  // processes would align base_ptr differently, and adjust available_space
1800  // differently. We can check that by making sure that the max (or min) over
1801  // all processes is equal to every process's value. If that's not the case,
1802  // then the whole idea of aligning above is wrong and we need to rethink what
1803  // it means to align data in a shared memory space.
1804  //
1805  // One might be tempted to think that this is not how MPI implementations
1806  // actually arrange things. Alas, when developing this functionality in 2021,
1807  // this is really how at least OpenMPI ends up doing things. (This is with an
1808  // OpenMPI implementation of MPI 3.1, so it does not support the flag we set
1809  // in the MPI_Info structure above when allocating the memory window.) Indeed,
1810  // when running this code on three processes, one ends up with base_ptr values
1811  // of
1812  // base_ptr=0x7f0842f02108
1813  // base_ptr=0x7fc0a47881d0
1814  // base_ptr=0x7f64872db108
1815  // which, most annoyingly, are aligned to 8 and 16 byte boundaries -- so there
1816  // is no common offset std::align could find that leads to a 64-byte
1817  // aligned memory address in all three memory spaces. That's a tremendous
1818  // nuisance and there is really nothing we can do about this other than just
1819  // fall back on the (unaligned) base_ptr in that case.
1820  if (Utilities::MPI::min(available_space, shmem_group_communicator) !=
1821  Utilities::MPI::max(available_space, shmem_group_communicator))
1822  aligned_shmem_pointer = static_cast<T *>(base_ptr_backup);
1823 
1824 
1825  // **** Step 6 ****
1826  // If this is the shmem root process, we need to copy the data into the
1827  // shared memory space.
1828  if (is_shmem_root)
1829  {
1830  if (std::is_trivial<T>::value == true)
1831  std::memcpy(aligned_shmem_pointer, elements.get(), sizeof(T) * size());
1832  else
1833  for (std::size_t i = 0; i < size(); ++i)
1834  new (&aligned_shmem_pointer[i]) T(std::move(elements[i]));
1835  }
1836 
1837  // Make sure that the shared memory host has copied the data before we try to
1838  // access it.
1839  const int ierr = MPI_Barrier(shmem_group_communicator);
1840  AssertThrowMPI(ierr);
1841 
1842  // **** Step 7 ****
1843  // Finally, we need to set the pointers of this object to what we just
1844  // learned. This also releases all memory that may have been in use
1845  // previously.
1846  //
1847  // The part that is a bit tricky is how to write the deleter of this
1848  // shared memory object. When we want to get rid of it, we need to
1849  // also release the MPI_Win object along with the shmem_group_communicator
1850  // object. That's because as long as we use the shared memory, we still need
1851  // to hold on to the MPI_Win object, and the MPI_Win object is based on the
1852  // communicator. (The former is definitely true, the latter is not quite clear
1853  // from the MPI documentation, but seems reasonable.) So we need to have a
1854  // deleter for the pointer that ensures that upon release of the memory, we
1855  // not only call the destructor of these memory elements (but only once, on
1856  // the shmem root!) but also destroy the MPI_Win and the communicator. All of
1857  // that is encapsulated in the following call where the deleter makes copies
1858  // of the arguments in the lambda capture.
1859  elements = decltype(elements)(aligned_shmem_pointer,
1860  Deleter(this,
1861  is_shmem_root,
1862  aligned_shmem_pointer,
1863  shmem_group_communicator,
1864  shmem_window));
1865 
1866  // We then also have to set the other two pointers that define the state of
1867  // the current object. Note that the new buffer size is exactly as large as
1868  // necessary, i.e., can store size() elements, regardless of the number of
1869  // allocated elements in the original objects.
1870  used_elements_end = elements.get() + array_size;
1872 
1873  // **** Consistency check ****
1874  // At this point, each process should have a copy of the data.
1875  // Verify this in some sort of round-about way
1876 # ifdef DEBUG
1877  const std::vector<char> packed_data = Utilities::pack(*this);
1878  const int hash =
1879  std::accumulate(packed_data.begin(), packed_data.end(), int(0));
1880  Assert(Utilities::MPI::max(hash, communicator) == hash, ExcInternalError());
1881 # endif
1882 
1883 
1884 
1885 # else
1886  // If we only have MPI 2.x, then simply broadcast the current object to all
1887  // other processes and forego the idea of using shmem
1888  *this = Utilities::MPI::broadcast(communicator, *this, root_process);
1889 # endif
1890 # else
1891  // No MPI -> nothing to replicate
1892  (void)communicator;
1893  (void)root_process;
1894 # endif
1895 }
1896 
1897 
1898 
1899 template <class T>
1900 inline void
1902 {
1903  // Swap the data in the 'elements' objects. Then also make sure that
1904  // their respective deleter objects point to the right place.
1905  std::swap(elements, vec.elements);
1906  elements.get_deleter().reset_owning_object(this);
1907  vec.elements.get_deleter().reset_owning_object(&vec);
1908 
1909  // Now also swap the remaining members.
1910  std::swap(used_elements_end, vec.used_elements_end);
1911  std::swap(allocated_elements_end, vec.allocated_elements_end);
1912 }
1913 
1914 
1915 
1916 template <class T>
1917 inline bool
1919 {
1920  return used_elements_end == elements.get();
1921 }
1922 
1923 
1924 
1925 template <class T>
1926 inline typename AlignedVector<T>::size_type
1927 AlignedVector<T>::size() const
1928 {
1929  return used_elements_end - elements.get();
1930 }
1931 
1932 
1933 
1934 template <class T>
1935 inline typename AlignedVector<T>::size_type
1937 {
1938  return allocated_elements_end - elements.get();
1939 }
1940 
1941 
1942 
1943 template <class T>
1944 inline typename AlignedVector<T>::reference
1946 {
1947  AssertIndexRange(index, size());
1948  return elements[index];
1949 }
1950 
1951 
1952 
1953 template <class T>
1954 inline typename AlignedVector<T>::const_reference
1955 AlignedVector<T>::operator[](const size_type index) const
1956 {
1957  AssertIndexRange(index, size());
1958  return elements[index];
1959 }
1960 
1961 
1962 
1963 template <typename T>
1964 inline typename AlignedVector<T>::pointer
1966 {
1967  return elements.get();
1968 }
1969 
1970 
1971 
1972 template <typename T>
1973 inline typename AlignedVector<T>::const_pointer
1974 AlignedVector<T>::data() const
1975 {
1976  return elements.get();
1977 }
1978 
1979 
1980 
1981 template <class T>
1982 inline typename AlignedVector<T>::iterator
1984 {
1985  return elements.get();
1986 }
1987 
1988 
1989 
1990 template <class T>
1991 inline typename AlignedVector<T>::iterator
1993 {
1994  return used_elements_end;
1995 }
1996 
1997 
1998 
1999 template <class T>
2000 inline typename AlignedVector<T>::const_iterator
2002 {
2003  return elements.get();
2004 }
2005 
2006 
2007 
2008 template <class T>
2009 inline typename AlignedVector<T>::const_iterator
2010 AlignedVector<T>::end() const
2011 {
2012  return used_elements_end;
2013 }
2014 
2015 
2016 
2017 template <class T>
2018 template <class Archive>
2019 inline void
2020 AlignedVector<T>::save(Archive &ar, const unsigned int) const
2021 {
2022  size_type vec_size = size();
2023  ar & vec_size;
2024  if (vec_size > 0)
2025  ar &boost::serialization::make_array(elements.get(), vec_size);
2026 }
2027 
2028 
2029 
2030 template <class T>
2031 template <class Archive>
2032 inline void
2033 AlignedVector<T>::load(Archive &ar, const unsigned int)
2034 {
2035  size_type vec_size = 0;
2036  ar & vec_size;
2037 
2038  if (vec_size > 0)
2039  {
2040  reserve(vec_size);
2041  ar &boost::serialization::make_array(elements.get(), vec_size);
2042  used_elements_end = elements.get() + vec_size;
2043  }
2044 }
2045 
2046 
2047 
2048 template <class T>
2049 inline typename AlignedVector<T>::size_type
2051 {
2052  size_type memory = sizeof(*this);
2053  for (const T *t = elements.get(); t != used_elements_end; ++t)
2054  memory += ::MemoryConsumption::memory_consumption(*t);
2055  memory += sizeof(T) * (allocated_elements_end - used_elements_end);
2056  return memory;
2057 }
2058 
2059 
2060 #endif // ifndef DOXYGEN
2061 
2062 
2068 template <class T>
2069 bool
2071 {
2072  if (lhs.size() != rhs.size())
2073  return false;
2074  for (typename AlignedVector<T>::const_iterator lit = lhs.begin(),
2075  rit = rhs.begin();
2076  lit != lhs.end();
2077  ++lit, ++rit)
2078  if (*lit != *rit)
2079  return false;
2080  return true;
2081 }
2082 
2083 
2084 
2090 template <class T>
2091 bool
2093 {
2094  return !(operator==(lhs, rhs));
2095 }
2096 
2097 
2099 
2100 #endif
void resize(const size_type new_size)
void default_construct_or_assign(const std::size_t begin, const std::size_t end, std::integral_constant< bool, false >) const
void posix_memalign(void **memptr, std::size_t alignment, std::size_t size)
Definition: utilities.cc:1047
~AlignedVector()=default
value_type * pointer
AlignedVectorInitialize(const std::size_t size, const T &element, T *const destination)
#define AssertIndexRange(index, range)
Definition: exceptions.h:1720
pointer data()
void load(Archive &ar, const unsigned int version)
virtual void delete_array(const AlignedVector< T > *aligned_vector, T *ptr)
AlignedVector & operator=(const AlignedVector< T > &vec)
AlignedVectorMoveConstruct(T *const source_begin, T *const source_end, T *const destination)
std::unique_ptr< T[], Deleter > elements
void push_back(const T in_data)
value_type * iterator
reference operator[](const size_type index)
void serialize(Archive &archive, const unsigned int version)
MPISharedMemDeleterAction(const bool is_shmem_root, T *aligned_shmem_pointer, MPI_Comm shmem_group_communicator, MPI_Win shmem_window)
bool operator==(const AlignedVector< T > &lhs, const AlignedVector< T > &rhs)
void free_communicator(MPI_Comm &mpi_communicator)
Definition: mpi.cc:171
static const char T
void copy_construct_or_assign(const std::size_t begin, const std::size_t end, std::integral_constant< bool, false >) const
#define Assert(cond, exc)
Definition: exceptions.h:1461
void reset_owning_object(const AlignedVector< T > *new_aligned_vector_ptr)
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:407
std::unique_ptr< DeleterActionBase > deleter_action_object
std::string to_string(const T &t)
Definition: patterns.h:2330
virtual void apply_to_subrange(const std::size_t begin, const std::size_t end) const override
void insert_back(ForwardIterator begin, ForwardIterator end)
Deleter(AlignedVector< T > *owning_object)
size_t pack(const T &object, std::vector< char > &dest_buffer, const bool allow_compression=true)
Definition: utilities.h:1219
const value_type * const_iterator
size_type memory_consumption() const
value_type & reference
virtual void apply_to_subrange(const std::size_t begin, const std::size_t end) const override
void default_construct_or_assign(const std::size_t begin, const std::size_t end, std::integral_constant< bool, true >) const
void swap(AlignedVector< T > &vec)
void copy_construct_or_assign(const std::size_t begin, const std::size_t end, std::integral_constant< bool, true >) const
void swap(MemorySpaceData< Number, MemorySpace > &, MemorySpaceData< Number, MemorySpace > &)
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1778
void operator()(T *ptr)
void save(Archive &ar, const unsigned int version) const
virtual void apply_to_subrange(const std::size_t begin, const std::size_t end) const override
iterator end()
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:406
virtual void delete_array(const AlignedVector< T > *owning_aligned_vector, T *ptr)=0
T min(const T &t, const MPI_Comm &mpi_communicator)
void replicate_across_communicator(const MPI_Comm &communicator, const unsigned int root_process)
T broadcast(const MPI_Comm &comm, const T &object_to_send, const unsigned int root_process=0)
void resize_fast(const size_type new_size)
iterator begin()
size_type size() const
unsigned int minimum_parallel_grain_size
Definition: parallel.cc:34
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:128
bool operator!=(const AlignedVector< T > &lhs, const AlignedVector< T > &rhs)
T * allocated_elements_end
AlignedVectorDefaultInitialize(const std::size_t size, T *const destination)
static const types::blas_int zero
AlignedVectorCopyConstruct(const T *const source_begin, const T *const source_end, T *const destination)
void free(T *&pointer)
Definition: cuda.h:97
const value_type * const_pointer
bool empty() const
void reserve(const size_type new_allocated_size)
const value_type & const_reference
T max(const T &t, const MPI_Comm &mpi_communicator)
const AlignedVector< T > * owning_aligned_vector
virtual void apply_to_subrange(const std::size_t begin, const std::size_t end) const override
size_type capacity() const
std::enable_if< std::is_fundamental< T >::value, std::size_t >::type memory_consumption(const T &t)
reference back()
static ::ExceptionBase & ExcInternalError()