Reference documentation for deal.II version Git 497f915867 2021-09-17 22:46:48 +0200
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
aligned_vector.h
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2011 - 2021 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #ifndef dealii_aligned_vector_h
18 #define dealii_aligned_vector_h
19 
20 #include <deal.II/base/config.h>
21 
24 #include <deal.II/base/mpi.h>
25 #include <deal.II/base/parallel.h>
26 #include <deal.II/base/utilities.h>
27 
28 // boost::serialization::make_array used to be in array.hpp, but was
29 // moved to a different file in BOOST 1.64
30 #include <boost/version.hpp>
31 #if BOOST_VERSION >= 106400
32 # include <boost/serialization/array_wrapper.hpp>
33 #else
34 # include <boost/serialization/array.hpp>
35 #endif
36 #include <boost/serialization/split_member.hpp>
37 
38 #include <cstring>
39 #include <memory>
40 #include <type_traits>
41 
42 
43 
45 
46 
60 template <class T>
62 {
63 public:
68  using value_type = T;
69  using pointer = value_type *;
70  using const_pointer = const value_type *;
71  using iterator = value_type *;
72  using const_iterator = const value_type *;
73  using reference = value_type &;
74  using const_reference = const value_type &;
75  using size_type = std::size_t;
76 
80  AlignedVector();
81 
88  explicit AlignedVector(const size_type size, const T &init = T());
89 
93  ~AlignedVector() = default;
94 
100  AlignedVector(const AlignedVector<T> &vec);
101 
106  AlignedVector(AlignedVector<T> &&vec) noexcept;
107 
113  AlignedVector &
114  operator=(const AlignedVector<T> &vec);
115 
119  AlignedVector &
120  operator=(AlignedVector<T> &&vec) noexcept;
121 
144  void
145  resize_fast(const size_type new_size);
146 
159  void
160  resize(const size_type new_size);
161 
177  void
178  resize(const size_type new_size, const T &init);
179 
200  void
201  reserve(const size_type new_allocated_size);
202 
207  void
208  clear();
209 
215  void
216  push_back(const T in_data);
217 
221  reference
222  back();
223 
228  back() const;
229 
234  template <typename ForwardIterator>
235  void
236  insert_back(ForwardIterator begin, ForwardIterator end);
237 
247  void
248  fill();
249 
258  void
259  fill(const T &element);
260 
348  void
349  replicate_across_communicator(const MPI_Comm & communicator,
350  const unsigned int root_process);
351 
355  void
356  swap(AlignedVector<T> &vec);
357 
361  bool
362  empty() const;
363 
367  size_type
368  size() const;
369 
374  size_type
375  capacity() const;
376 
380  reference
381  operator[](const size_type index);
382 
387  operator[](const size_type index) const;
388 
392  pointer
393  data();
394 
399  data() const;
400 
404  iterator
405  begin();
406 
410  iterator
411  end();
412 
417  begin() const;
418 
423  end() const;
424 
430  size_type
431  memory_consumption() const;
432 
438  template <class Archive>
439  void
440  save(Archive &ar, const unsigned int version) const;
441 
447  template <class Archive>
448  void
449  load(Archive &ar, const unsigned int version);
450 
451 #ifdef DOXYGEN
452 
457  template <class Archive>
458  void
459  serialize(Archive &archive, const unsigned int version);
460 #else
461  // This macro defines the serialize() method that is compatible with
462  // the templated save() and load() method that have been implemented.
463  BOOST_SERIALIZATION_SPLIT_MEMBER()
464 #endif
465 
466 private:
558  class Deleter
559  {
560  public:
566  Deleter(AlignedVector<T> *owning_object);
567 
568 #ifdef DEAL_II_WITH_MPI
569 
576  Deleter(AlignedVector<T> *owning_object,
577  const bool is_shmem_root,
578  T * aligned_shmem_pointer,
579  MPI_Comm shmem_group_communicator,
580  MPI_Win shmem_window);
581 #endif
582 
588  void
589  operator()(T *ptr);
590 
598  void
599  reset_owning_object(const AlignedVector<T> *new_aligned_vector_ptr);
600 
601  private:
606  {
607  public:
611  virtual ~DeleterActionBase() = default;
612 
618  virtual void
620  };
621 
622 #ifdef DEAL_II_WITH_MPI
623 
629  {
630  public:
635  MPISharedMemDeleterAction(const bool is_shmem_root,
636  T * aligned_shmem_pointer,
637  MPI_Comm shmem_group_communicator,
638  MPI_Win shmem_window);
639 
645  virtual void
646  delete_array(const AlignedVector<T> *aligned_vector, T *ptr);
647 
648  private:
653  const bool is_shmem_root;
656  MPI_Win shmem_window;
657  };
658 #endif
659 
664  std::unique_ptr<DeleterActionBase> deleter_action_object;
665 
671  };
672 
676  std::unique_ptr<T[], Deleter> elements;
677 
682 
687 };
688 
689 
690 // ------------------------------- inline functions --------------------------
691 
697 namespace internal
698 {
717  template <typename T>
720  {
721  static const std::size_t minimum_parallel_grain_size =
722  160000 / sizeof(T) + 1;
723 
724  public:
734  AlignedVectorCopyConstruct(const T *const source_begin,
735  const T *const source_end,
736  T *const destination)
737  : source_(source_begin)
738  , destination_(destination)
739  {
740  Assert(source_end >= source_begin, ExcInternalError());
741  Assert(source_end == source_begin || destination != nullptr,
742  ExcInternalError());
743  const std::size_t size = source_end - source_begin;
744  if (size < minimum_parallel_grain_size)
745  AlignedVectorCopyConstruct::apply_to_subrange(0, size);
746  else
747  apply_parallel(0, size, minimum_parallel_grain_size);
748  }
749 
754  virtual void
755  apply_to_subrange(const std::size_t begin,
756  const std::size_t end) const override
757  {
758  if (end == begin)
759  return;
760 
761  // for classes trivial assignment can use memcpy. cast element to
762  // (void*) to silence compiler warning for virtual classes (they will
763  // never arrive here because they are non-trivial).
764 
765  if (std::is_trivial<T>::value == true)
766  std::memcpy(static_cast<void *>(destination_ + begin),
767  static_cast<const void *>(source_ + begin),
768  (end - begin) * sizeof(T));
769  else
770  for (std::size_t i = begin; i < end; ++i)
771  new (&destination_[i]) T(source_[i]);
772  }
773 
774  private:
775  const T *const source_;
776  T *const destination_;
777  };
778 
779 
786  template <typename T>
789  {
790  static const std::size_t minimum_parallel_grain_size =
791  160000 / sizeof(T) + 1;
792 
793  public:
803  AlignedVectorMoveConstruct(T *const source_begin,
804  T *const source_end,
805  T *const destination)
806  : source_(source_begin)
807  , destination_(destination)
808  {
809  Assert(source_end >= source_begin, ExcInternalError());
810  Assert(source_end == source_begin || destination != nullptr,
811  ExcInternalError());
812  const std::size_t size = source_end - source_begin;
813  if (size < minimum_parallel_grain_size)
814  AlignedVectorMoveConstruct::apply_to_subrange(0, size);
815  else
816  apply_parallel(0, size, minimum_parallel_grain_size);
817  }
818 
823  virtual void
824  apply_to_subrange(const std::size_t begin,
825  const std::size_t end) const override
826  {
827  if (end == begin)
828  return;
829 
830  // Classes with trivial assignment can use memcpy. cast element to
831  // (void*) to silence compiler warning for virtual classes (they will
832  // never arrive here because they are non-trivial).
833  if (std::is_trivial<T>::value == true)
834  std::memcpy(static_cast<void *>(destination_ + begin),
835  static_cast<void *>(source_ + begin),
836  (end - begin) * sizeof(T));
837  else
838  // For everything else just use the move constructor. The original
839  // object remains alive and will be destroyed elsewhere.
840  for (std::size_t i = begin; i < end; ++i)
841  new (&destination_[i]) T(std::move(source_[i]));
842  }
843 
844  private:
845  T *const source_;
846  T *const destination_;
847  };
848 
849 
867  template <typename T, bool initialize_memory>
869  {
870  static const std::size_t minimum_parallel_grain_size =
871  160000 / sizeof(T) + 1;
872 
873  public:
878  AlignedVectorInitialize(const std::size_t size,
879  const T & element,
880  T *const destination)
881  : element_(element)
882  , destination_(destination)
883  , trivial_element(false)
884  {
885  if (size == 0)
886  return;
887  Assert(destination != nullptr, ExcInternalError());
888 
889  // do not use memcmp for long double because on some systems it does not
890  // completely fill its memory and may lead to false positives in
891  // e.g. valgrind
892  if (std::is_trivial<T>::value == true &&
893  std::is_same<T, long double>::value == false)
894  {
895  const unsigned char zero[sizeof(T)] = {};
896  // cast element to (void*) to silence compiler warning for virtual
897  // classes (they will never arrive here because they are
898  // non-trivial).
899  if (std::memcmp(zero,
900  static_cast<const void *>(&element),
901  sizeof(T)) == 0)
902  trivial_element = true;
903  }
904  if (size < minimum_parallel_grain_size)
905  AlignedVectorInitialize::apply_to_subrange(0, size);
906  else
907  apply_parallel(0, size, minimum_parallel_grain_size);
908  }
909 
913  virtual void
914  apply_to_subrange(const std::size_t begin,
915  const std::size_t end) const override
916  {
917  // for classes with trivial assignment of zero can use memset. cast
918  // element to (void*) to silence compiler warning for virtual
919  // classes (they will never arrive here because they are
920  // non-trivial).
921  if (std::is_trivial<T>::value == true && trivial_element)
922  std::memset(static_cast<void *>(destination_ + begin),
923  0,
924  (end - begin) * sizeof(T));
925  else
926  copy_construct_or_assign(
927  begin, end, std::integral_constant<bool, initialize_memory>());
928  }
929 
930  private:
931  const T & element_;
932  mutable T *destination_;
934 
935  // copy assignment operation
936  void
937  copy_construct_or_assign(const std::size_t begin,
938  const std::size_t end,
939  std::integral_constant<bool, false>) const
940  {
941  for (std::size_t i = begin; i < end; ++i)
942  destination_[i] = element_;
943  }
944 
945  // copy constructor (memory initialization)
946  void
947  copy_construct_or_assign(const std::size_t begin,
948  const std::size_t end,
949  std::integral_constant<bool, true>) const
950  {
951  for (std::size_t i = begin; i < end; ++i)
952  new (&destination_[i]) T(element_);
953  }
954  };
955 
956 
957 
970  template <typename T, bool initialize_memory>
973  {
974  static const std::size_t minimum_parallel_grain_size =
975  160000 / sizeof(T) + 1;
976 
977  public:
982  AlignedVectorDefaultInitialize(const std::size_t size, T *const destination)
983  : destination_(destination)
984  {
985  if (size == 0)
986  return;
987  Assert(destination != nullptr, ExcInternalError());
988 
989  if (size < minimum_parallel_grain_size)
990  AlignedVectorDefaultInitialize::apply_to_subrange(0, size);
991  else
992  apply_parallel(0, size, minimum_parallel_grain_size);
993  }
994 
998  virtual void
999  apply_to_subrange(const std::size_t begin,
1000  const std::size_t end) const override
1001  {
1002  // for classes with trivial assignment of zero can use memset. cast
1003  // element to (void*) to silence compiler warning for virtual
1004  // classes (they will never arrive here because they are
1005  // non-trivial).
1006  if (std::is_trivial<T>::value == true)
1007  std::memset(static_cast<void *>(destination_ + begin),
1008  0,
1009  (end - begin) * sizeof(T));
1010  else
1011  default_construct_or_assign(
1012  begin, end, std::integral_constant<bool, initialize_memory>());
1013  }
1014 
1015  private:
1016  mutable T *destination_;
1017 
1018  // copy assignment operation
1019  void
1021  const std::size_t end,
1022  std::integral_constant<bool, false>) const
1023  {
1024  for (std::size_t i = begin; i < end; ++i)
1025  destination_[i] = std::move(T());
1026  }
1027 
1028  // copy constructor (memory initialization)
1029  void
1031  const std::size_t end,
1032  std::integral_constant<bool, true>) const
1033  {
1034  for (std::size_t i = begin; i < end; ++i)
1035  new (&destination_[i]) T;
1036  }
1037  };
1038 
1039 } // end of namespace internal
1040 
1041 
1042 #ifndef DOXYGEN
1043 
1044 
1045 
1046 template <typename T>
1048  : deleter_action_object(nullptr) // encode default action by using a nullptr
1049  , owning_aligned_vector(owning_object)
1050 {}
1051 
1052 
1053 # ifdef DEAL_II_WITH_MPI
1054 
1055 template <typename T>
1057  const bool is_shmem_root,
1058  T * aligned_shmem_pointer,
1059  MPI_Comm shmem_group_communicator,
1060  MPI_Win shmem_window)
1062  std::make_unique<MPISharedMemDeleterAction>(is_shmem_root,
1063  aligned_shmem_pointer,
1064  shmem_group_communicator,
1065  shmem_window))
1066  , owning_aligned_vector(owning_object)
1067 {}
1068 # endif
1069 
1070 
1071 template <typename T>
1072 inline void
1074 {
1075  // If no special action has been registered (i.e., if the action pointer is
1076  // nullptr), then just perform the default action right here.
1077  if (deleter_action_object == nullptr)
1078  {
1079  if (ptr != nullptr)
1080  {
1081  Assert(owning_aligned_vector->used_elements_end != nullptr,
1082  ExcInternalError());
1083 
1084  if (std::is_trivial<T>::value == false)
1085  for (T *p = owning_aligned_vector->used_elements_end - 1; p >= ptr;
1086  --p)
1087  p->~T();
1088 
1089  std::free(ptr);
1090  }
1091  }
1092  else
1093  // Otherwise, let the action object do what is necessary
1094  deleter_action_object->delete_array(owning_aligned_vector, ptr);
1095 }
1096 
1097 
1098 
1099 template <typename T>
1100 inline void
1102  const AlignedVector<T> *new_aligned_vector_ptr)
1103 {
1104  owning_aligned_vector = new_aligned_vector_ptr;
1105 }
1106 
1107 
1108 # ifdef DEAL_II_WITH_MPI
1109 
1110 template <typename T>
1112  MPISharedMemDeleterAction(const bool is_shmem_root,
1113  T * aligned_shmem_pointer,
1114  MPI_Comm shmem_group_communicator,
1115  MPI_Win shmem_window)
1116  : is_shmem_root(is_shmem_root)
1117  , aligned_shmem_pointer(aligned_shmem_pointer)
1118  , shmem_group_communicator(shmem_group_communicator)
1119  , shmem_window(shmem_window)
1120 {}
1121 
1122 
1123 
1124 template <typename T>
1125 inline void
1127  const AlignedVector<T> *aligned_vector,
1128  T * ptr)
1129 {
1130  (void)ptr;
1131  Assert(aligned_vector->elements.get() == ptr, ExcInternalError());
1132 
1133  if (is_shmem_root)
1134  if (std::is_trivial<T>::value == false)
1135  for (T *p = aligned_vector->used_elements_end - 1;
1136  p >= aligned_vector->elements.get();
1137  --p)
1138  p->~T();
1139 
1140  int ierr;
1141  ierr = MPI_Win_free(&shmem_window);
1142  AssertThrowMPI(ierr);
1143 
1144  ierr = MPI_Comm_free(&shmem_group_communicator);
1145  AssertThrowMPI(ierr);
1146 }
1147 
1148 # endif
1149 
1150 
1151 template <class T>
1153  : elements(nullptr, Deleter(this))
1154  , used_elements_end(nullptr)
1155  , allocated_elements_end(nullptr)
1156 {}
1157 
1158 
1159 
1160 template <class T>
1161 inline AlignedVector<T>::AlignedVector(const size_type size, const T &init)
1162  : elements(nullptr, Deleter(this))
1163  , used_elements_end(nullptr)
1164  , allocated_elements_end(nullptr)
1165 {
1166  if (size > 0)
1167  resize(size, init);
1168 }
1169 
1170 
1171 
1172 template <class T>
1174  : elements(nullptr, Deleter(this))
1175  , used_elements_end(nullptr)
1176  , allocated_elements_end(nullptr)
1177 {
1178  // copy the data from vec
1179  reserve(vec.size());
1181  internal::AlignedVectorCopyConstruct<T>(vec.elements.get(),
1182  vec.used_elements_end,
1183  elements.get());
1184 }
1185 
1186 
1187 
1188 template <class T>
1190  : AlignedVector<T>()
1191 {
1192  // forward to the move operator
1193  *this = std::move(vec);
1194 }
1195 
1196 
1197 
1198 template <class T>
1199 inline AlignedVector<T> &
1201 {
1202  resize(0);
1203  resize_fast(vec.used_elements_end - vec.elements.get());
1204  internal::AlignedVectorCopyConstruct<T>(vec.elements.get(),
1205  vec.used_elements_end,
1206  elements.get());
1207  return *this;
1208 }
1209 
1210 
1211 
1212 template <class T>
1213 inline AlignedVector<T> &
1215 {
1216  clear();
1217 
1218  // Move the actual data in the 'elements' object. One problem is that this
1219  // also moves the deleter object, but the deleter object
1220  // references 'this' (i.e., the 'this' pointer of the *moved-from*
1221  // object). The way this is implemented is that we have to move the
1222  // deleter as well, and then reset the pointer inside the deleter
1223  // that references the outer object.
1224  elements = std::move(vec.elements);
1225  elements.get_deleter().reset_owning_object(this);
1226 
1227  // Then also steal the other pointers and clear them in the original object:
1228  used_elements_end = vec.used_elements_end;
1229  allocated_elements_end = vec.allocated_elements_end;
1230 
1231  vec.used_elements_end = nullptr;
1232  vec.allocated_elements_end = nullptr;
1233 
1234  return *this;
1235 }
1236 
1237 
1238 
1239 template <class T>
1240 inline void
1242 {
1243  const size_type old_size = size();
1244 
1245  if (new_size == 0)
1246  clear();
1247  else if (new_size == old_size)
1248  {} // nothing to do here
1249  else if (new_size < old_size)
1250  {
1251  // call destructor on fields that are released, if the type requires it.
1252  // doing it backward releases the elements in reverse order as compared to
1253  // how they were created
1254  if (std::is_trivial<T>::value == false)
1255  for (T *p = used_elements_end - 1; p >= elements.get() + new_size; --p)
1256  p->~T();
1257  used_elements_end = elements.get() + new_size;
1258  }
1259  else // new_size > old_size
1260  {
1261  // Allocate more space, and claim that space as used
1262  reserve(new_size);
1263  used_elements_end = elements.get() + new_size;
1264 
1265  // need to still set the values in case the class is non-trivial because
1266  // virtual classes etc. need to run their (default) constructor
1267  if (std::is_trivial<T>::value == false)
1269  new_size - old_size, elements.get() + old_size);
1270  }
1271 }
1272 
1273 
1274 
1275 template <class T>
1276 inline void
1277 AlignedVector<T>::resize(const size_type new_size)
1278 {
1279  const size_type old_size = size();
1280 
1281  if (new_size == 0)
1282  clear();
1283  else if (new_size == old_size)
1284  {} // nothing to do here
1285  else if (new_size < old_size)
1286  {
1287  // call destructor on fields that are released, if the type requires it.
1288  // doing it backward releases the elements in reverse order as compared to
1289  // how they were created
1290  if (std::is_trivial<T>::value == false)
1291  for (T *p = used_elements_end - 1; p >= elements.get() + new_size; --p)
1292  p->~T();
1293  used_elements_end = elements.get() + new_size;
1294  }
1295  else // new_size > old_size
1296  {
1297  // Allocate more space, and claim that space as used
1298  reserve(new_size);
1299  used_elements_end = elements.get() + new_size;
1300 
1301  // finally set the values to the default initializer
1303  new_size - old_size, elements.get() + old_size);
1304  }
1305 }
1306 
1307 
1308 
1309 template <class T>
1310 inline void
1311 AlignedVector<T>::resize(const size_type new_size, const T &init)
1312 {
1313  const size_type old_size = size();
1314 
1315  if (new_size == 0)
1316  clear();
1317  else if (new_size == old_size)
1318  {} // nothing to do here
1319  else if (new_size < old_size)
1320  {
1321  // call destructor on fields that are released, if the type requires it.
1322  // doing it backward releases the elements in reverse order as compared to
1323  // how they were created
1324  if (std::is_trivial<T>::value == false)
1325  for (T *p = used_elements_end - 1; p >= elements.get() + new_size; --p)
1326  p->~T();
1327  used_elements_end = elements.get() + new_size;
1328  }
1329  else // new_size > old_size
1330  {
1331  // Allocate more space, and claim that space as used
1332  reserve(new_size);
1333  used_elements_end = elements.get() + new_size;
1334 
1335  // finally set the desired init values
1337  new_size - old_size, init, elements.get() + old_size);
1338  }
1339 }
1340 
1341 
1342 
1343 template <class T>
1344 inline void
1345 AlignedVector<T>::reserve(const size_type new_allocated_size)
1346 {
1347  const size_type old_size = used_elements_end - elements.get();
1348  const size_type old_allocated_size = allocated_elements_end - elements.get();
1349  if (new_allocated_size > old_allocated_size)
1350  {
1351  // if we continuously increase the size of the vector, we might be
1352  // reallocating a lot of times. therefore, try to increase the size more
1353  // aggressively
1354  const size_type new_size =
1355  std::max(new_allocated_size, 2 * old_allocated_size);
1356 
1357  // allocate and align along 64-byte boundaries (this is enough for all
1358  // levels of vectorization currently supported by deal.II)
1359  T *new_data_ptr;
1361  reinterpret_cast<void **>(&new_data_ptr), 64, new_size * sizeof(T));
1362 
1363  // Now create a deleter that encodes what should happen when the object is
1364  // released: We need to destroy the objects that are currently alive (in
1365  // reverse order, and then release the memory. Note that we catch the
1366  // 'this' pointer because the number of elements currently alive might
1367  // change over time.
1368  Deleter deleter(this);
1369 
1370  // copy whatever elements we need to retain
1371  if (new_allocated_size > 0)
1373  elements.get(), elements.get() + old_size, new_data_ptr);
1374 
1375  // Now reset all of the member variables of the current object
1376  // based on the allocation above. Assigning to a std::unique_ptr
1377  // object also releases the previously pointed to memory.
1378  //
1379  // Note that at the time of releasing the old memory, 'used_elements_end'
1380  // still points to its previous value, and this is important for the
1381  // deleter object of the previously allocated array (see how it loops over
1382  // the to-be-destroyed elements a the Deleter::DefaultDeleterAction
1383  // class).
1384  elements = decltype(elements)(new_data_ptr, std::move(deleter));
1385  used_elements_end = elements.get() + old_size;
1386  allocated_elements_end = elements.get() + new_size;
1387  }
1388  else if (new_allocated_size == 0)
1389  clear();
1390  else // size_alloc < allocated_size
1391  {} // nothing to do here
1392 }
1393 
1394 
1395 
1396 template <class T>
1397 inline void
1399 {
1400  // Just release the memory (which also calls the destructor of the elements),
1401  // and then set the auxiliary pointers to invalid values.
1402  //
1403  // Note that at the time of releasing the old memory, 'used_elements_end'
1404  // still points to its previous value, and this is important for the
1405  // deleter object of the previously allocated array (see how it loops over
1406  // the to-be-destroyed elements a few lines above).
1407  elements.reset();
1408  used_elements_end = nullptr;
1409  allocated_elements_end = nullptr;
1410 }
1411 
1412 
1413 
1414 template <class T>
1415 inline void
1416 AlignedVector<T>::push_back(const T in_data)
1417 {
1420  reserve(std::max(2 * capacity(), static_cast<size_type>(16)));
1421  if (std::is_trivial<T>::value == false)
1422  new (used_elements_end++) T(in_data);
1423  else
1424  *used_elements_end++ = in_data;
1425 }
1426 
1427 
1428 
1429 template <class T>
1430 inline typename AlignedVector<T>::reference
1432 {
1433  AssertIndexRange(0, size());
1434  T *field = used_elements_end - 1;
1435  return *field;
1436 }
1437 
1438 
1439 
1440 template <class T>
1441 inline typename AlignedVector<T>::const_reference
1442 AlignedVector<T>::back() const
1443 {
1444  AssertIndexRange(0, size());
1445  const T *field = used_elements_end - 1;
1446  return *field;
1447 }
1448 
1449 
1450 
1451 template <class T>
1452 template <typename ForwardIterator>
1453 inline void
1454 AlignedVector<T>::insert_back(ForwardIterator begin, ForwardIterator end)
1455 {
1456  const size_type old_size = size();
1457  reserve(old_size + (end - begin));
1458  for (; begin != end; ++begin, ++used_elements_end)
1459  {
1460  if (std::is_trivial<T>::value == false)
1461  new (used_elements_end) T;
1463  }
1464 }
1465 
1466 
1467 
1468 template <class T>
1469 inline void
1471 {
1473  elements.get());
1474 }
1475 
1476 
1477 
1478 template <class T>
1479 inline void
1481 {
1483  value,
1484  elements.get());
1485 }
1486 
1487 
1488 
1489 template <class T>
1490 inline void
1492  const unsigned int root_process)
1493 {
1494 # ifdef DEAL_II_WITH_MPI
1495 # if DEAL_II_MPI_VERSION_GTE(3, 0)
1496 
1497  // **** Step 0 ****
1498  // All but the root process no longer need their data, so release the memory
1499  // used to store the previous elements.
1500  if (Utilities::MPI::this_mpi_process(communicator) != root_process)
1501  {
1502  elements.reset();
1503  used_elements_end = nullptr;
1504  allocated_elements_end = nullptr;
1505  }
1506 
1507  // **** Step 1 ****
1508  // Create communicators for each group of processes that can use
1509  // shared memory areas. Within each of these groups, we don't care about
1510  // which rank each of the old processes gets except that we would like to
1511  // make sure that the (global) root process will have rank=0 within
1512  // its own sub-communicator. We can do that through the third argument of
1513  // MPI_Comm_split_type (the "key") which is an integer meant to indicate the
1514  // order of processes within the split communicators, and we should set it to
1515  // zero for the root processes and one for all others -- which means that
1516  // for all of these other processes, MPI can choose whatever order it
1517  // wants because they have the same key (MPI then documents that these ties
1518  // will be broken according to these processes' rank in the old group).
1519  //
1520  // At least that's the theory. In practice, the MPI implementation where
1521  // this function was developed on does not seem to do that. (Bug report
1522  // is here: https://github.com/open-mpi/ompi/issues/8854)
1523  // We work around this by letting MPI_Comm_split_type choose whatever
1524  // rank it wants, and then reshuffle with MPI_Comm_split in a second
1525  // step -- not elegant, nor efficient, but seems to work:
1526  MPI_Comm shmem_group_communicator;
1527  {
1528  MPI_Comm shmem_group_communicator_temp;
1529  int ierr = MPI_Comm_split_type(communicator,
1530  MPI_COMM_TYPE_SHARED,
1531  /* key */ 0,
1532  MPI_INFO_NULL,
1533  &shmem_group_communicator_temp);
1534  AssertThrowMPI(ierr);
1535 
1536  const int key =
1537  (Utilities::MPI::this_mpi_process(communicator) == root_process ? 0 : 1);
1538  ierr = MPI_Comm_split(shmem_group_communicator_temp,
1539  /* color */ 0,
1540  key,
1541  &shmem_group_communicator);
1542  AssertThrowMPI(ierr);
1543 
1544  // Verify the explanation from above
1545  if (Utilities::MPI::this_mpi_process(communicator) == root_process)
1546  Assert(Utilities::MPI::this_mpi_process(shmem_group_communicator) == 0,
1547  ExcInternalError());
1548 
1549  // And get rid of the temporary communicator
1550  ierr = MPI_Comm_free(&shmem_group_communicator_temp);
1551  AssertThrowMPI(ierr);
1552  }
1553  const bool is_shmem_root =
1554  Utilities::MPI::this_mpi_process(shmem_group_communicator) == 0;
1555 
1556  // **** Step 2 ****
1557  // We then have to send the state of the current object from the
1558  // root process to one exemplar in each shmem group. To this end,
1559  // we create another subcommunicator that includes the ranks zero
1560  // of all shmem groups, and because of the trick above, we know
1561  // that this also includes the original root process.
1562  //
1563  // There are different ways of creating a "shmem_roots_communicator".
1564  // The conceptually easiest way is to create an MPI_Group that only
1565  // includes the shmem roots and then create a communicator from this
1566  // via MPI_Comm_create or MPI_Comm_create_group. The problem
1567  // with this is that we would have to exchange among all processes
1568  // which ones are shmem roots and which are not. This is awkward.
1569  //
1570  // A simpler way is to use MPI_Comm_split that uses "colors" to
1571  // indicate which sub-communicator each process wants to be in.
1572  // We use color=0 to indicate the group of shmem roots, and color=1
1573  // for all other processes -- the latter will simply not ever do
1574  // anything among themselves with the communicator so created.
1575  //
1576  // Using MPI_Comm_split has the additional benefit that, just as above,
1577  // we can choose where each rank will end up in shmem_roots_communicator.
1578  // We again set key=0 for the original root_process, and key=1 for all other
1579  // ranks; then, the global root becomes rank=0 on the
1580  // shmem_roots_communicator. We don't care how the other processes are
1581  // ordered.
1582  MPI_Comm shmem_roots_communicator;
1583  {
1584  const int key =
1585  (Utilities::MPI::this_mpi_process(communicator) == root_process ? 0 : 1);
1586 
1587  const int ierr = MPI_Comm_split(communicator,
1588  /*color=*/
1589  (is_shmem_root ? 0 : 1),
1590  key,
1591  &shmem_roots_communicator);
1592  AssertThrowMPI(ierr);
1593 
1594  // Again verify the explanation from above
1595  if (Utilities::MPI::this_mpi_process(communicator) == root_process)
1596  Assert(Utilities::MPI::this_mpi_process(shmem_roots_communicator) == 0,
1597  ExcInternalError());
1598  }
1599 
1600  const unsigned int shmem_roots_root_rank = 0;
1601  const bool is_shmem_roots_root =
1602  (is_shmem_root && (Utilities::MPI::this_mpi_process(
1603  shmem_roots_communicator) == shmem_roots_root_rank));
1604 
1605  // Now let the original root_process broadcast the current object to all
1606  // shmem roots. We know that the last rank is the original root process that
1607  // has all of the data.
1608  if (is_shmem_root)
1609  {
1610  if (std::is_trivial<T>::value)
1611  {
1612  // The data is "trivial", i.e., we can copy things directly without
1613  // having to go through the serialization/deserialization machinery of
1614  // Utilities::MPI::broadcast.
1615  //
1616  // In that case, first tell all of the other shmem roots how many
1617  // elements we will have to deal with, and let them resize their
1618  // (non-shared) arrays.
1619  const size_type new_size =
1620  Utilities::MPI::broadcast(shmem_roots_communicator,
1621  size(),
1622  shmem_roots_root_rank);
1623  if (is_shmem_roots_root == false)
1624  resize(new_size);
1625 
1626  // Then directly copy from the root process into these buffers
1627  int ierr = MPI_Bcast(elements.get(),
1628  sizeof(T) * new_size,
1629  MPI_CHAR,
1630  shmem_roots_root_rank,
1631  shmem_roots_communicator);
1632  AssertThrowMPI(ierr);
1633  }
1634  else
1635  {
1636  // The objects to be sent around are not "trivial", and so we have
1637  // to go through the serialization/deserialization machinery. On all
1638  // but the sending process, overwrite the current state with the
1639  // vector just broadcast.
1640  //
1641  // On the root rank, this would lead to resetting the 'entries'
1642  // pointer, which would trigger the deleter which would lead to a
1643  // deadlock. So we just send the result of the broadcast() call to
1644  // nirvana on the root process and keep our current state.
1645  if (Utilities::MPI::this_mpi_process(shmem_roots_communicator) == 0)
1646  Utilities::MPI::broadcast(shmem_roots_communicator,
1647  *this,
1648  shmem_roots_root_rank);
1649  else
1650  *this = Utilities::MPI::broadcast(shmem_roots_communicator,
1651  *this,
1652  shmem_roots_root_rank);
1653  }
1654  }
1655 
1656  // We no longer need the shmem roots communicator, so get rid of it
1657  {
1658  const int ierr = MPI_Comm_free(&shmem_roots_communicator);
1659  AssertThrowMPI(ierr);
1660  }
1661 
1662 
1663  // **** Step 3 ****
1664  // At this point, all shmem groups have one shmem root process that has
1665  // a copy of the data. This is the point where each shmem group should
1666  // establish a shmem area to put the data into. As mentioned above,
1667  // we know that the shmem roots are the last rank in their respective
1668  // shmem_group_communicator.
1669  //
1670  // The process for all of this works as follows: While all processes in
1671  // the shmem group participate in the generation of the shmem memory window,
1672  // only the shmem root actually allocates any memory -- the rest just
1673  // allocate zero bytes of their own. We allocate space for exactly
1674  // size() elements (computed on the shmem_root that already has the data)
1675  // and add however many bytes are necessary so that we know that we can align
1676  // things to 64-byte boundaries. The worst case happens if the memory system
1677  // gives us a pointer to an address one byte past a desired alignment
1678  // boundary, and in that case aligning the memory will require us to waste the
1679  // first (align_by-1) bytes. So we have to ask for
1680  // size() * sizeof(T) + (align_by - 1)
1681  // bytes.
1682  //
1683  // Before MPI 4.0, there was no way to specify that we want memory aligned to
1684  // a certain number of bytes. This is going to come back to bite us further
1685  // down below when we try to get a properly aligned pointer to our memory
1686  // region, see the commentary there. Starting with MPI 4.0, one can set a
1687  // flag in an MPI_Info structure that requests a desired alignment, so we do
1688  // this for forward compatibility; MPI implementations ignore flags they don't
1689  // know anything about, and so setting this flag is backward compatible also
1690  // to older MPI versions.
1691  //
1692  // There is one final piece we can already take care of here. At the beginning
1693  // of all of this, only the shmem_root knows how many elements there are in
1694  // the array. But at the end of it, all processes of course need to know. We
1695  // could put this information somewhere into the shmem area, along with the
1696  // other data, but that seems clumsy. It turns out that when calling
1697  // MPI_Win_allocate_shared, we are asked for the value of a parameter called
1698  // 'disp_unit' whose meaning is difficult to determine from the MPI
1699  // documentation, and that we do not actually need. So we "abuse" it a bit: On
1700  // the shmem root, we put the array size into it. Later on, the remaining
1701  // processes can query the shmem root's value of 'disp_unit', and so will be
1702  // able to learn about the array size that way.
1703  MPI_Win shmem_window;
1704  void * base_ptr;
1705  const MPI_Aint align_by = 64;
1706  const MPI_Aint alloc_size =
1707  Utilities::MPI::broadcast(shmem_group_communicator,
1708  (size() * sizeof(T) + (align_by - 1)),
1709  0);
1710 
1711  {
1712  const int disp_unit = (is_shmem_root ? size() : 1);
1713 
1714  int ierr;
1715 
1716  MPI_Info mpi_info;
1717  ierr = MPI_Info_create(&mpi_info);
1718  AssertThrowMPI(ierr);
1719  ierr = MPI_Info_set(mpi_info,
1720  "mpi_minimum_memory_alignment",
1721  std::to_string(align_by).c_str());
1722  AssertThrowMPI(ierr);
1723  ierr = MPI_Win_allocate_shared((is_shmem_root ? alloc_size : 0),
1724  disp_unit,
1725  mpi_info,
1726  shmem_group_communicator,
1727  &base_ptr,
1728  &shmem_window);
1729  AssertThrowMPI(ierr);
1730 
1731  ierr = MPI_Info_free(&mpi_info);
1732  AssertThrowMPI(ierr);
1733  }
1734 
1735 
1736  // **** Step 4 ****
1737  // The next step is to teach all non-shmem root processes what the pointer to
1738  // the array is that the shmem-root created. MPI has a nifty way for this
1739  // given that only a single process actually allocated memory in the window:
1740  // When calling MPI_Win_shared_query, the MPI documentation says that
1741  // "When rank is MPI_PROC_NULL, the pointer, disp_unit, and size returned are
1742  // the pointer, disp_unit, and size of the memory segment belonging the lowest
1743  // rank that specified size > 0. If all processes in the group attached to the
1744  // window specified size = 0, then the call returns size = 0 and a baseptr as
1745  // if MPI_ALLOC_MEM was called with size = 0."
1746  //
1747  // This will allow us to obtain the pointer to the shmem root's memory area,
1748  // which is the only one we care about. (None of the other processes have
1749  // even allocated any memory.) But this will also retrieve the shmem root's
1750  // disp_unit, which in step 3 above we have abused to pass along the number of
1751  // elements in the array.
1752  //
1753  // We don't need to do this on the shmem root process: This process has
1754  // already gotten its base_ptr correctly set above, and we can determine the
1755  // array size by just calling size().
1756  size_type array_size = (is_shmem_root ? size() : size_type(0));
1757  if (is_shmem_root == false)
1758  {
1759  int disp_unit;
1760  MPI_Aint alloc_size; // not actually used
1761  const int ierr = MPI_Win_shared_query(
1762  shmem_window, MPI_PROC_NULL, &alloc_size, &disp_unit, &base_ptr);
1763  AssertThrowMPI(ierr);
1764 
1765  // Make sure we actually got a pointer, and also unpack the array size as
1766  // discussed above.
1767  Assert(base_ptr != nullptr, ExcInternalError());
1768 
1769  array_size = disp_unit;
1770  }
1771 
1772 
1773  // **** Step 5 ****
1774  // Now that all processes know the address of the space that is visible to
1775  // everyone, we need to figure out whether it is properly aligned and if not,
1776  // find the next aligned address.
1777  //
1778  // std::align does that, but it also modifies its last two arguments. The
1779  // documentation of that function at
1780  // https://en.cppreference.com/w/cpp/memory/align is not entirely clear, but I
1781  // *think* that the following should do given that we do not use base_ptr and
1782  // available_space any further after the call to std::align.
1783  std::size_t available_space = alloc_size;
1784  void * base_ptr_backup = base_ptr;
1785  T * aligned_shmem_pointer = static_cast<T *>(
1786  std::align(align_by, array_size * sizeof(T), base_ptr, available_space));
1787  Assert(aligned_shmem_pointer != nullptr, ExcInternalError());
1788 
1789  // There is one step to guard against. It is *conceivable* that the base_ptr
1790  // we have previously obtained from MPI_Win_shared_query is mapped so
1791  // awkwardly into the different MPI processes' memory spaces that it is
1792  // aligned in one memory space, but not another. In that case, different
1793  // processes would align base_ptr differently, and adjust available_space
1794  // differently. We can check that by making sure that the max (or min) over
1795  // all processes is equal to every process's value. If that's not the case,
1796  // then the whole idea of aligning above is wrong and we need to rethink what
1797  // it means to align data in a shared memory space.
1798  //
1799  // One might be tempted to think that this is not how MPI implementations
1800  // actually arrange things. Alas, when developing this functionality in 2021,
1801  // this is really how at least OpenMPI ends up doing things. (This is with an
1802  // OpenMPI implementation of MPI 3.1, so it does not support the flag we set
1803  // in the MPI_Info structure above when allocating the memory window.) Indeed,
1804  // when running this code on three processes, one ends up with base_ptr values
1805  // of
1806  // base_ptr=0x7f0842f02108
1807  // base_ptr=0x7fc0a47881d0
1808  // base_ptr=0x7f64872db108
1809  // which, most annoyingly, are aligned to 8 and 16 byte boundaries -- so there
1810  // is no common offset std::align could find that leads to a 64-byte
1811  // aligned memory address in all three memory spaces. That's a tremendous
1812  // nuisance and there is really nothing we can do about this other than just
1813  // fall back on the (unaligned) base_ptr in that case.
1814  if (Utilities::MPI::min(available_space, shmem_group_communicator) !=
1815  Utilities::MPI::max(available_space, shmem_group_communicator))
1816  aligned_shmem_pointer = static_cast<T *>(base_ptr_backup);
1817 
1818 
1819  // **** Step 6 ****
1820  // If this is the shmem root process, we need to copy the data into the
1821  // shared memory space.
1822  if (is_shmem_root)
1823  {
1824  if (std::is_trivial<T>::value == true)
1825  std::memcpy(aligned_shmem_pointer, elements.get(), sizeof(T) * size());
1826  else
1827  for (std::size_t i = 0; i < size(); ++i)
1828  new (&aligned_shmem_pointer[i]) T(std::move(elements[i]));
1829  }
1830 
1831  // Make sure that the shared memory host has copied the data before we try to
1832  // access it.
1833  const int ierr = MPI_Barrier(shmem_group_communicator);
1834  AssertThrowMPI(ierr);
1835 
1836  // **** Step 7 ****
1837  // Finally, we need to set the pointers of this object to what we just
1838  // learned. This also releases all memory that may have been in use
1839  // previously.
1840  //
1841  // The part that is a bit tricky is how to write the deleter of this
1842  // shared memory object. When we want to get rid of it, we need to
1843  // also release the MPI_Win object along with the shmem_group_communicator
1844  // object. That's because as long as we use the shared memory, we still need
1845  // to hold on to the MPI_Win object, and the MPI_Win object is based on the
1846  // communicator. (The former is definitely true, the latter is not quite clear
1847  // from the MPI documentation, but seems reasonable.) So we need to have a
1848  // deleter for the pointer that ensures that upon release of the memory, we
1849  // not only call the destructor of these memory elements (but only once, on
1850  // the shmem root!) but also destroy the MPI_Win and the communicator. All of
1851  // that is encapsulated in the following call where the deleter makes copies
1852  // of the arguments in the lambda capture.
1853  elements = decltype(elements)(aligned_shmem_pointer,
1854  Deleter(this,
1855  is_shmem_root,
1856  aligned_shmem_pointer,
1857  shmem_group_communicator,
1858  shmem_window));
1859 
1860  // We then also have to set the other two pointers that define the state of
1861  // the current object. Note that the new buffer size is exactly as large as
1862  // necessary, i.e., can store size() elements, regardless of the number of
1863  // allocated elements in the original objects.
1864  used_elements_end = elements.get() + array_size;
1866 
1867  // **** Consistency check ****
1868  // At this point, each process should have a copy of the data.
1869  // Verify this in some sort of round-about way
1870 # ifdef DEBUG
1871  const std::vector<char> packed_data = Utilities::pack(*this);
1872  const int hash =
1873  std::accumulate(packed_data.begin(), packed_data.end(), int(0));
1874  Assert(Utilities::MPI::max(hash, communicator) == hash, ExcInternalError());
1875 # endif
1876 
1877 
1878 
1879 # else
1880  // If we only have MPI 2.x, then simply broadcast the current object to all
1881  // other processes and forego the idea of using shmem
1882  *this = Utilities::MPI::broadcast(communicator, *this, root_process);
1883 # endif
1884 # else
1885  // No MPI -> nothing to replicate
1886  (void)communicator;
1887  (void)root_process;
1888 # endif
1889 }
1890 
1891 
1892 
1893 template <class T>
1894 inline void
1896 {
1897  // Swap the data in the 'elements' objects. Then also make sure that
1898  // their respective deleter objects point to the right place.
1899  std::swap(elements, vec.elements);
1900  elements.get_deleter().reset_owning_object(this);
1901  vec.elements.get_deleter().reset_owning_object(&vec);
1902 
1903  // Now also swap the remaining members.
1904  std::swap(used_elements_end, vec.used_elements_end);
1905  std::swap(allocated_elements_end, vec.allocated_elements_end);
1906 }
1907 
1908 
1909 
1910 template <class T>
1911 inline bool
1913 {
1914  return used_elements_end == elements.get();
1915 }
1916 
1917 
1918 
1919 template <class T>
1920 inline typename AlignedVector<T>::size_type
1921 AlignedVector<T>::size() const
1922 {
1923  return used_elements_end - elements.get();
1924 }
1925 
1926 
1927 
1928 template <class T>
1929 inline typename AlignedVector<T>::size_type
1931 {
1932  return allocated_elements_end - elements.get();
1933 }
1934 
1935 
1936 
1937 template <class T>
1938 inline typename AlignedVector<T>::reference
1940 {
1941  AssertIndexRange(index, size());
1942  return elements[index];
1943 }
1944 
1945 
1946 
1947 template <class T>
1948 inline typename AlignedVector<T>::const_reference
1949 AlignedVector<T>::operator[](const size_type index) const
1950 {
1951  AssertIndexRange(index, size());
1952  return elements[index];
1953 }
1954 
1955 
1956 
1957 template <typename T>
1958 inline typename AlignedVector<T>::pointer
1960 {
1961  return elements.get();
1962 }
1963 
1964 
1965 
1966 template <typename T>
1967 inline typename AlignedVector<T>::const_pointer
1968 AlignedVector<T>::data() const
1969 {
1970  return elements.get();
1971 }
1972 
1973 
1974 
1975 template <class T>
1976 inline typename AlignedVector<T>::iterator
1978 {
1979  return elements.get();
1980 }
1981 
1982 
1983 
1984 template <class T>
1985 inline typename AlignedVector<T>::iterator
1987 {
1988  return used_elements_end;
1989 }
1990 
1991 
1992 
1993 template <class T>
1994 inline typename AlignedVector<T>::const_iterator
1996 {
1997  return elements.get();
1998 }
1999 
2000 
2001 
2002 template <class T>
2003 inline typename AlignedVector<T>::const_iterator
2004 AlignedVector<T>::end() const
2005 {
2006  return used_elements_end;
2007 }
2008 
2009 
2010 
2011 template <class T>
2012 template <class Archive>
2013 inline void
2014 AlignedVector<T>::save(Archive &ar, const unsigned int) const
2015 {
2016  size_type vec_size = size();
2017  ar & vec_size;
2018  if (vec_size > 0)
2019  ar &boost::serialization::make_array(elements.get(), vec_size);
2020 }
2021 
2022 
2023 
2024 template <class T>
2025 template <class Archive>
2026 inline void
2027 AlignedVector<T>::load(Archive &ar, const unsigned int)
2028 {
2029  size_type vec_size = 0;
2030  ar & vec_size;
2031 
2032  if (vec_size > 0)
2033  {
2034  reserve(vec_size);
2035  ar &boost::serialization::make_array(elements.get(), vec_size);
2036  used_elements_end = elements.get() + vec_size;
2037  }
2038 }
2039 
2040 
2041 
2042 template <class T>
2043 inline typename AlignedVector<T>::size_type
2045 {
2046  size_type memory = sizeof(*this);
2047  for (const T *t = elements.get(); t != used_elements_end; ++t)
2048  memory += ::MemoryConsumption::memory_consumption(*t);
2049  memory += sizeof(T) * (allocated_elements_end - used_elements_end);
2050  return memory;
2051 }
2052 
2053 
2054 #endif // ifndef DOXYGEN
2055 
2056 
2062 template <class T>
2063 bool
2065 {
2066  if (lhs.size() != rhs.size())
2067  return false;
2068  for (typename AlignedVector<T>::const_iterator lit = lhs.begin(),
2069  rit = rhs.begin();
2070  lit != lhs.end();
2071  ++lit, ++rit)
2072  if (*lit != *rit)
2073  return false;
2074  return true;
2075 }
2076 
2077 
2078 
2084 template <class T>
2085 bool
2087 {
2088  return !(operator==(lhs, rhs));
2089 }
2090 
2091 
2093 
2094 #endif
void resize(const size_type new_size)
void default_construct_or_assign(const std::size_t begin, const std::size_t end, std::integral_constant< bool, false >) const
void posix_memalign(void **memptr, std::size_t alignment, std::size_t size)
Definition: utilities.cc:1050
~AlignedVector()=default
value_type * pointer
AlignedVectorInitialize(const std::size_t size, const T &element, T *const destination)
#define AssertIndexRange(index, range)
Definition: exceptions.h:1718
pointer data()
void load(Archive &ar, const unsigned int version)
virtual void delete_array(const AlignedVector< T > *aligned_vector, T *ptr)
AlignedVector & operator=(const AlignedVector< T > &vec)
AlignedVectorMoveConstruct(T *const source_begin, T *const source_end, T *const destination)
std::unique_ptr< T[], Deleter > elements
void push_back(const T in_data)
value_type * iterator
reference operator[](const size_type index)
void serialize(Archive &archive, const unsigned int version)
MPISharedMemDeleterAction(const bool is_shmem_root, T *aligned_shmem_pointer, MPI_Comm shmem_group_communicator, MPI_Win shmem_window)
bool operator==(const AlignedVector< T > &lhs, const AlignedVector< T > &rhs)
static const char T
void copy_construct_or_assign(const std::size_t begin, const std::size_t end, std::integral_constant< bool, false >) const
#define Assert(cond, exc)
Definition: exceptions.h:1461
void reset_owning_object(const AlignedVector< T > *new_aligned_vector_ptr)
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:401
std::unique_ptr< DeleterActionBase > deleter_action_object
std::string to_string(const T &t)
Definition: patterns.h:2329
virtual void apply_to_subrange(const std::size_t begin, const std::size_t end) const override
void insert_back(ForwardIterator begin, ForwardIterator end)
Deleter(AlignedVector< T > *owning_object)
size_t pack(const T &object, std::vector< char > &dest_buffer, const bool allow_compression=true)
Definition: utilities.h:1218
const value_type * const_iterator
size_type memory_consumption() const
value_type & reference
virtual void apply_to_subrange(const std::size_t begin, const std::size_t end) const override
void default_construct_or_assign(const std::size_t begin, const std::size_t end, std::integral_constant< bool, true >) const
void swap(AlignedVector< T > &vec)
void copy_construct_or_assign(const std::size_t begin, const std::size_t end, std::integral_constant< bool, true >) const
void swap(MemorySpaceData< Number, MemorySpace > &, MemorySpaceData< Number, MemorySpace > &)
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1776
void operator()(T *ptr)
void save(Archive &ar, const unsigned int version) const
virtual void apply_to_subrange(const std::size_t begin, const std::size_t end) const override
iterator end()
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:400
virtual void delete_array(const AlignedVector< T > *owning_aligned_vector, T *ptr)=0
T min(const T &t, const MPI_Comm &mpi_communicator)
void replicate_across_communicator(const MPI_Comm &communicator, const unsigned int root_process)
T broadcast(const MPI_Comm &comm, const T &object_to_send, const unsigned int root_process=0)
void resize_fast(const size_type new_size)
iterator begin()
size_type size() const
unsigned int minimum_parallel_grain_size
Definition: parallel.cc:34
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:128
bool operator!=(const AlignedVector< T > &lhs, const AlignedVector< T > &rhs)
T * allocated_elements_end
AlignedVectorDefaultInitialize(const std::size_t size, T *const destination)
static const types::blas_int zero
AlignedVectorCopyConstruct(const T *const source_begin, const T *const source_end, T *const destination)
void free(T *&pointer)
Definition: cuda.h:97
const value_type * const_pointer
bool empty() const
void reserve(const size_type new_allocated_size)
const value_type & const_reference
T max(const T &t, const MPI_Comm &mpi_communicator)
const AlignedVector< T > * owning_aligned_vector
virtual void apply_to_subrange(const std::size_t begin, const std::size_t end) const override
size_type capacity() const
std::enable_if< std::is_fundamental< T >::value, std::size_t >::type memory_consumption(const T &t)
reference back()
static ::ExceptionBase & ExcInternalError()