Reference documentation for deal.II version 9.3.3
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
aligned_vector.h
Go to the documentation of this file.
1// ---------------------------------------------------------------------
2//
3// Copyright (C) 2011 - 2021 by the deal.II authors
4//
5// This file is part of the deal.II library.
6//
7// The deal.II library is free software; you can use it, redistribute
8// it, and/or modify it under the terms of the GNU Lesser General
9// Public License as published by the Free Software Foundation; either
10// version 2.1 of the License, or (at your option) any later version.
11// The full text of the license can be found in the file LICENSE.md at
12// the top level directory of deal.II.
13//
14// ---------------------------------------------------------------------
15
16
17#ifndef dealii_aligned_vector_h
18#define dealii_aligned_vector_h
19
20#include <deal.II/base/config.h>
21
24#include <deal.II/base/mpi.h>
27
28// boost::serialization::make_array used to be in array.hpp, but was
29// moved to a different file in BOOST 1.64
30#include <boost/version.hpp>
31#if BOOST_VERSION >= 106400
32# include <boost/serialization/array_wrapper.hpp>
33#else
34# include <boost/serialization/array.hpp>
35#endif
36#include <boost/serialization/split_member.hpp>
37
38#include <cstring>
39#include <memory>
40#include <type_traits>
41
42
43
45
46
60template <class T>
62{
63public:
68 using value_type = T;
70 using const_pointer = const value_type *;
72 using const_iterator = const value_type *;
74 using const_reference = const value_type &;
75 using size_type = std::size_t;
76
81
88 explicit AlignedVector(const size_type size, const T &init = T());
89
93 ~AlignedVector() = default;
94
101
107
115
121
144 void
145 resize_fast(const size_type new_size);
146
159 void
160 resize(const size_type new_size);
161
177 void
178 resize(const size_type new_size, const T &init);
179
200 void
201 reserve(const size_type new_allocated_size);
202
207 void
209
215 void
216 push_back(const T in_data);
217
223
228 back() const;
229
234 template <typename ForwardIterator>
235 void
236 insert_back(ForwardIterator begin, ForwardIterator end);
237
247 void
249
258 void
259 fill(const T &element);
260
348 void
350 const unsigned int root_process);
351
355 void
357
361 bool
362 empty() const;
363
368 size() const;
369
375 capacity() const;
376
381
386
390 pointer
392
397 data() const;
398
404
410
415 begin() const;
416
421 end() const;
422
430
436 template <class Archive>
437 void
438 save(Archive &ar, const unsigned int version) const;
439
445 template <class Archive>
446 void
447 load(Archive &ar, const unsigned int version);
448
449#ifdef DOXYGEN
455 template <class Archive>
456 void
457 serialize(Archive &archive, const unsigned int version);
458#else
459 // This macro defines the serialize() method that is compatible with
460 // the templated save() and load() method that have been implemented.
461 BOOST_SERIALIZATION_SPLIT_MEMBER()
462#endif
463
464private:
468 std::unique_ptr<T[], std::function<void(T *)>> elements;
469
474
479};
480
481
482// ------------------------------- inline functions --------------------------
483
489namespace internal
490{
506 template <typename T>
508 {
509 static const std::size_t minimum_parallel_grain_size =
510 160000 / sizeof(T) + 1;
511
512 public:
522 AlignedVectorCopy(const T *const source_begin,
523 const T *const source_end,
524 T *const destination)
525 : source_(source_begin)
526 , destination_(destination)
527 {
528 Assert(source_end >= source_begin, ExcInternalError());
529 Assert(source_end == source_begin || destination != nullptr,
531 const std::size_t size = source_end - source_begin;
534 else
536 }
537
542 virtual void
543 apply_to_subrange(const std::size_t begin,
544 const std::size_t end) const override
545 {
546 if (end == begin)
547 return;
548
549 // for classes trivial assignment can use memcpy. cast element to
550 // (void*) to silence compiler warning for virtual classes (they will
551 // never arrive here because they are non-trivial).
552
553 if (std::is_trivial<T>::value == true)
554 std::memcpy(static_cast<void *>(destination_ + begin),
555 static_cast<const void *>(source_ + begin),
556 (end - begin) * sizeof(T));
557 else
558 for (std::size_t i = begin; i < end; ++i)
559 new (&destination_[i]) T(source_[i]);
560 }
561
562 private:
563 const T *const source_;
565 };
566
567
573 template <typename T>
575 {
576 static const std::size_t minimum_parallel_grain_size =
577 160000 / sizeof(T) + 1;
578
579 public:
589 AlignedVectorMove(T *const source_begin,
590 T *const source_end,
591 T *const destination)
592 : source_(source_begin)
593 , destination_(destination)
594 {
595 Assert(source_end >= source_begin, ExcInternalError());
596 Assert(source_end == source_begin || destination != nullptr,
598 const std::size_t size = source_end - source_begin;
601 else
603 }
604
609 virtual void
610 apply_to_subrange(const std::size_t begin,
611 const std::size_t end) const override
612 {
613 if (end == begin)
614 return;
615
616 // Classes with trivial assignment can use memcpy. cast element to
617 // (void*) to silence compiler warning for virtual classes (they will
618 // never arrive here because they are non-trivial).
619 if (std::is_trivial<T>::value == true)
620 std::memcpy(static_cast<void *>(destination_ + begin),
621 static_cast<void *>(source_ + begin),
622 (end - begin) * sizeof(T));
623 else
624 // For everything else just use the move constructor. The original
625 // object remains alive and will be destroyed elsewhere.
626 for (std::size_t i = begin; i < end; ++i)
627 new (&destination_[i]) T(std::move(source_[i]));
628 }
629
630 private:
631 T *const source_;
633 };
634
635
647 template <typename T, bool initialize_memory>
649 {
650 static const std::size_t minimum_parallel_grain_size =
651 160000 / sizeof(T) + 1;
652
653 public:
658 AlignedVectorSet(const std::size_t size,
659 const T & element,
660 T *const destination)
661 : element_(element)
662 , destination_(destination)
663 , trivial_element(false)
664 {
665 if (size == 0)
666 return;
667 Assert(destination != nullptr, ExcInternalError());
668
669 // do not use memcmp for long double because on some systems it does not
670 // completely fill its memory and may lead to false positives in
671 // e.g. valgrind
672 if (std::is_trivial<T>::value == true &&
673 std::is_same<T, long double>::value == false)
674 {
675 const unsigned char zero[sizeof(T)] = {};
676 // cast element to (void*) to silence compiler warning for virtual
677 // classes (they will never arrive here because they are
678 // non-trivial).
679 if (std::memcmp(zero,
680 static_cast<const void *>(&element),
681 sizeof(T)) == 0)
682 trivial_element = true;
683 }
686 else
688 }
689
693 virtual void
694 apply_to_subrange(const std::size_t begin,
695 const std::size_t end) const override
696 {
697 // for classes with trivial assignment of zero can use memset. cast
698 // element to (void*) to silence compiler warning for virtual
699 // classes (they will never arrive here because they are
700 // non-trivial).
701 if (std::is_trivial<T>::value == true && trivial_element)
702 std::memset(static_cast<void *>(destination_ + begin),
703 0,
704 (end - begin) * sizeof(T));
705 else
707 begin, end, std::integral_constant<bool, initialize_memory>());
708 }
709
710 private:
711 const T & element_;
712 mutable T *destination_;
714
715 // copy assignment operation
716 void
718 const std::size_t end,
719 std::integral_constant<bool, false>) const
720 {
721 for (std::size_t i = begin; i < end; ++i)
723 }
724
725 // copy constructor (memory initialization)
726 void
728 const std::size_t end,
729 std::integral_constant<bool, true>) const
730 {
731 for (std::size_t i = begin; i < end; ++i)
732 new (&destination_[i]) T(element_);
733 }
734 };
735
736
737
749 template <typename T, bool initialize_memory>
752 {
753 static const std::size_t minimum_parallel_grain_size =
754 160000 / sizeof(T) + 1;
755
756 public:
761 AlignedVectorDefaultInitialize(const std::size_t size, T *const destination)
762 : destination_(destination)
763 {
764 if (size == 0)
765 return;
766 Assert(destination != nullptr, ExcInternalError());
767
770 else
772 }
773
777 virtual void
778 apply_to_subrange(const std::size_t begin,
779 const std::size_t end) const override
780 {
781 // for classes with trivial assignment of zero can use memset. cast
782 // element to (void*) to silence compiler warning for virtual
783 // classes (they will never arrive here because they are
784 // non-trivial).
785 if (std::is_trivial<T>::value == true)
786 std::memset(static_cast<void *>(destination_ + begin),
787 0,
788 (end - begin) * sizeof(T));
789 else
791 begin, end, std::integral_constant<bool, initialize_memory>());
792 }
793
794 private:
795 mutable T *destination_;
796
797 // copy assignment operation
798 void
800 const std::size_t end,
801 std::integral_constant<bool, false>) const
802 {
803 for (std::size_t i = begin; i < end; ++i)
804 destination_[i] = std::move(T());
805 }
806
807 // copy constructor (memory initialization)
808 void
810 const std::size_t end,
811 std::integral_constant<bool, true>) const
812 {
813 for (std::size_t i = begin; i < end; ++i)
814 new (&destination_[i]) T;
815 }
816 };
817
818} // end of namespace internal
819
820
821#ifndef DOXYGEN
822
823
824template <class T>
826 : elements(nullptr, [](T *) { Assert(false, ExcInternalError()); })
827 , used_elements_end(nullptr)
828 , allocated_elements_end(nullptr)
829{}
830
831
832
833template <class T>
834inline AlignedVector<T>::AlignedVector(const size_type size, const T &init)
835 : elements(nullptr, [](T *) { Assert(false, ExcInternalError()); })
836 , used_elements_end(nullptr)
837 , allocated_elements_end(nullptr)
838{
839 if (size > 0)
840 resize(size, init);
841}
842
843
844
845template <class T>
847 : elements(nullptr, [](T *) { Assert(false, ExcInternalError()); })
848 , used_elements_end(nullptr)
849 , allocated_elements_end(nullptr)
850{
851 // copy the data from vec
852 reserve(vec.size());
853 used_elements_end = allocated_elements_end;
854 internal::AlignedVectorCopy<T>(vec.elements.get(),
855 vec.used_elements_end,
856 elements.get());
857}
858
859
860
861template <class T>
864{
865 // forward to the move operator
866 *this = std::move(vec);
867}
868
869
870
871template <class T>
872inline AlignedVector<T> &
874{
875 resize(0);
876 resize_fast(vec.used_elements_end - vec.elements.get());
877 internal::AlignedVectorCopy<T>(vec.elements.get(),
878 vec.used_elements_end,
879 elements.get());
880 return *this;
881}
882
883
884
885template <class T>
886inline AlignedVector<T> &
888{
889 clear();
890
891 // Move the actual data in the 'elements' object. One problem is that this
892 // also moves the deleter object, but the deleter object is a lambda function
893 // that references 'this' (i.e., the 'this' pointer of the *moved-from*
894 // object). So what we actually do is steal the pointer via
895 // std::unique_ptr::release() and then install our own deleter object that
896 // mirrors the one used in reserve() below.
897 elements = decltype(elements)(vec.elements.release(), [this](T *ptr) {
898 if (ptr != nullptr)
899 {
900 Assert(this->used_elements_end != nullptr, ExcInternalError());
901
902 if (std::is_trivial<T>::value == false)
903 for (T *p = this->used_elements_end - 1; p >= ptr; --p)
904 p->~T();
905 }
906
907 std::free(ptr);
908 });
909
910 // Then also steal the other pointers and clear them in the original object:
911 used_elements_end = vec.used_elements_end;
912 allocated_elements_end = vec.allocated_elements_end;
913
914 vec.used_elements_end = nullptr;
915 vec.allocated_elements_end = nullptr;
916
917 return *this;
918}
919
920
921
922template <class T>
923inline void
925{
926 const size_type old_size = size();
927
928 if (new_size == 0)
929 clear();
930 else if (new_size == old_size)
931 {} // nothing to do here
932 else if (new_size < old_size)
933 {
934 // call destructor on fields that are released, if the type requires it.
935 // doing it backward releases the elements in reverse order as compared to
936 // how they were created
937 if (std::is_trivial<T>::value == false)
938 for (T *p = used_elements_end - 1; p >= elements.get() + new_size; --p)
939 p->~T();
940 used_elements_end = elements.get() + new_size;
941 }
942 else // new_size > old_size
943 {
944 // Allocate more space, and claim that space as used
945 reserve(new_size);
946 used_elements_end = elements.get() + new_size;
947
948 // need to still set the values in case the class is non-trivial because
949 // virtual classes etc. need to run their (default) constructor
950 if (std::is_trivial<T>::value == false)
952 new_size - old_size, elements.get() + old_size);
953 }
954}
955
956
957
958template <class T>
959inline void
961{
962 const size_type old_size = size();
963
964 if (new_size == 0)
965 clear();
966 else if (new_size == old_size)
967 {} // nothing to do here
968 else if (new_size < old_size)
969 {
970 // call destructor on fields that are released, if the type requires it.
971 // doing it backward releases the elements in reverse order as compared to
972 // how they were created
973 if (std::is_trivial<T>::value == false)
974 for (T *p = used_elements_end - 1; p >= elements.get() + new_size; --p)
975 p->~T();
976 used_elements_end = elements.get() + new_size;
977 }
978 else // new_size > old_size
979 {
980 // Allocate more space, and claim that space as used
981 reserve(new_size);
982 used_elements_end = elements.get() + new_size;
983
984 // finally set the values to the default initializer
986 new_size - old_size, elements.get() + old_size);
987 }
988}
989
990
991
992template <class T>
993inline void
994AlignedVector<T>::resize(const size_type new_size, const T &init)
995{
996 const size_type old_size = size();
997
998 if (new_size == 0)
999 clear();
1000 else if (new_size == old_size)
1001 {} // nothing to do here
1002 else if (new_size < old_size)
1003 {
1004 // call destructor on fields that are released, if the type requires it.
1005 // doing it backward releases the elements in reverse order as compared to
1006 // how they were created
1007 if (std::is_trivial<T>::value == false)
1008 for (T *p = used_elements_end - 1; p >= elements.get() + new_size; --p)
1009 p->~T();
1010 used_elements_end = elements.get() + new_size;
1011 }
1012 else // new_size > old_size
1013 {
1014 // Allocate more space, and claim that space as used
1015 reserve(new_size);
1016 used_elements_end = elements.get() + new_size;
1017
1018 // finally set the desired init values
1019 ::internal::AlignedVectorSet<T, true>(new_size - old_size,
1020 init,
1021 elements.get() + old_size);
1022 }
1023}
1024
1025
1026
1027template <class T>
1028inline void
1029AlignedVector<T>::reserve(const size_type new_allocated_size)
1030{
1031 const size_type old_size = used_elements_end - elements.get();
1032 const size_type old_allocated_size = allocated_elements_end - elements.get();
1033 if (new_allocated_size > old_allocated_size)
1034 {
1035 // if we continuously increase the size of the vector, we might be
1036 // reallocating a lot of times. therefore, try to increase the size more
1037 // aggressively
1038 const size_type new_size =
1039 std::max(new_allocated_size, 2 * old_allocated_size);
1040
1041 // allocate and align along 64-byte boundaries (this is enough for all
1042 // levels of vectorization currently supported by deal.II)
1043 T *new_data_ptr;
1045 reinterpret_cast<void **>(&new_data_ptr), 64, new_size * sizeof(T));
1046
1047 // Now create a deleter that encodes what should happen when the object is
1048 // released: We need to destroy the objects that are currently alive (in
1049 // reverse order, and then release the memory. Note that we catch the
1050 // 'this' pointer because the number of elements currently alive might
1051 // change over time.
1052 auto deleter = [this](T *ptr) {
1053 if (ptr != nullptr)
1054 {
1055 Assert(this->used_elements_end != nullptr, ExcInternalError());
1056
1057 if (std::is_trivial<T>::value == false)
1058 for (T *p = this->used_elements_end - 1; p >= ptr; --p)
1059 p->~T();
1060 }
1061
1062 std::free(ptr);
1063 };
1064
1065 // copy whatever elements we need to retain
1066 if (new_allocated_size > 0)
1067 ::internal::AlignedVectorMove<T>(elements.get(),
1068 elements.get() + old_size,
1069 new_data_ptr);
1070
1071 // Now reset all of the member variables of the current object
1072 // based on the allocation above. Assigning to a std::unique_ptr
1073 // object also releases the previously pointed to memory.
1074 //
1075 // Note that at the time of releasing the old memory, 'used_elements_end'
1076 // still points to its previous value, and this is important for the
1077 // deleter object of the previously allocated array (see how it loops over
1078 // the to-be-destroyed elements a few lines above).
1079 elements = decltype(elements)(new_data_ptr, deleter);
1080 used_elements_end = elements.get() + old_size;
1081 allocated_elements_end = elements.get() + new_size;
1082 }
1083 else if (new_allocated_size == 0)
1084 clear();
1085 else // size_alloc < allocated_size
1086 {} // nothing to do here
1087}
1088
1089
1090
1091template <class T>
1092inline void
1094{
1095 // Just release the memory (which also calls the destructor of the elements),
1096 // and then set the auxiliary pointers to invalid values.
1097 //
1098 // Note that at the time of releasing the old memory, 'used_elements_end'
1099 // still points to its previous value, and this is important for the
1100 // deleter object of the previously allocated array (see how it loops over
1101 // the to-be-destroyed elements a few lines above).
1102 elements.reset();
1103 used_elements_end = nullptr;
1104 allocated_elements_end = nullptr;
1105}
1106
1107
1108
1109template <class T>
1110inline void
1111AlignedVector<T>::push_back(const T in_data)
1112{
1113 Assert(used_elements_end <= allocated_elements_end, ExcInternalError());
1114 if (used_elements_end == allocated_elements_end)
1115 reserve(std::max(2 * capacity(), static_cast<size_type>(16)));
1116 if (std::is_trivial<T>::value == false)
1117 new (used_elements_end++) T(in_data);
1118 else
1119 *used_elements_end++ = in_data;
1120}
1121
1122
1123
1124template <class T>
1125inline typename AlignedVector<T>::reference
1127{
1128 AssertIndexRange(0, size());
1129 T *field = used_elements_end - 1;
1130 return *field;
1131}
1132
1133
1134
1135template <class T>
1138{
1139 AssertIndexRange(0, size());
1140 const T *field = used_elements_end - 1;
1141 return *field;
1142}
1143
1144
1145
1146template <class T>
1147template <typename ForwardIterator>
1148inline void
1149AlignedVector<T>::insert_back(ForwardIterator begin, ForwardIterator end)
1150{
1151 const unsigned int old_size = size();
1152 reserve(old_size + (end - begin));
1153 for (; begin != end; ++begin, ++used_elements_end)
1154 {
1155 if (std::is_trivial<T>::value == false)
1156 new (used_elements_end) T;
1157 *used_elements_end = *begin;
1158 }
1159}
1160
1161
1162
1163template <class T>
1164inline void
1166{
1168 elements.get());
1169}
1170
1171
1172
1173template <class T>
1174inline void
1175AlignedVector<T>::fill(const T &value)
1176{
1177 ::internal::AlignedVectorSet<T, false>(size(), value, elements.get());
1178}
1179
1180
1181
1182template <class T>
1183inline void
1185 const unsigned int root_process)
1186{
1187# ifdef DEAL_II_WITH_MPI
1188# if DEAL_II_MPI_VERSION_GTE(3, 0)
1189
1190 // **** Step 0 ****
1191 // All but the root process no longer need their data, so release the memory
1192 // used to store the previous elements.
1193 if (Utilities::MPI::this_mpi_process(communicator) != root_process)
1194 {
1195 elements.reset();
1196 used_elements_end = nullptr;
1197 allocated_elements_end = nullptr;
1198 }
1199
1200 // **** Step 1 ****
1201 // Create communicators for each group of processes that can use
1202 // shared memory areas. Within each of these groups, we don't care about
1203 // which rank each of the old processes gets except that we would like to
1204 // make sure that the (global) root process will have rank=0 within
1205 // its own sub-communicator. We can do that through the third argument of
1206 // MPI_Comm_split_type (the "key") which is an integer meant to indicate the
1207 // order of processes within the split communicators, and we should set it to
1208 // zero for the root processes and one for all others -- which means that
1209 // for all of these other processes, MPI can choose whatever order it
1210 // wants because they have the same key (MPI then documents that these ties
1211 // will be broken according to these processes' rank in the old group).
1212 //
1213 // At least that's the theory. In practice, the MPI implementation where
1214 // this function was developed on does not seem to do that. (Bug report
1215 // is here: https://github.com/open-mpi/ompi/issues/8854)
1216 // We work around this by letting MPI_Comm_split_type choose whatever
1217 // rank it wants, and then reshuffle with MPI_Comm_split in a second
1218 // step -- not elegant, nor efficient, but seems to work:
1219 MPI_Comm shmem_group_communicator;
1220 {
1221 MPI_Comm shmem_group_communicator_temp;
1222 int ierr = MPI_Comm_split_type(communicator,
1223 MPI_COMM_TYPE_SHARED,
1224 /* key */ 0,
1225 MPI_INFO_NULL,
1226 &shmem_group_communicator_temp);
1227
1228 AssertThrowMPI(ierr);
1229 const int key =
1230 (Utilities::MPI::this_mpi_process(communicator) == root_process ? 0 : 1);
1231 ierr = MPI_Comm_split(shmem_group_communicator_temp,
1232 /* color */ 0,
1233 key,
1234 &shmem_group_communicator);
1235 AssertThrowMPI(ierr);
1236
1237 // Verify the explanation from above
1238 if (Utilities::MPI::this_mpi_process(communicator) == root_process)
1239 Assert(Utilities::MPI::this_mpi_process(shmem_group_communicator) == 0,
1241
1242 // And get rid of the temporary communicator
1243 ierr = MPI_Comm_free(&shmem_group_communicator_temp);
1244 }
1245 const bool is_shmem_root =
1246 Utilities::MPI::this_mpi_process(shmem_group_communicator) == 0;
1247
1248 // **** Step 2 ****
1249 // We then have to send the state of the current object from the
1250 // root process to one exemplar in each shmem group. To this end,
1251 // we create another subcommunicator that includes the ranks zero
1252 // of all shmem groups, and because of the trick above, we know
1253 // that this also includes the original root process.
1254 //
1255 // There are different ways of creating a "shmem_roots_communicator".
1256 // The conceptually easiest way is to create an MPI_Group that only
1257 // includes the shmem roots and then create a communicator from this
1258 // via MPI_Comm_create or MPI_Comm_create_group. The problem
1259 // with this is that we would have to exchange among all processes
1260 // which ones are shmem roots and which are not. This is awkward.
1261 //
1262 // A simpler way is to use MPI_Comm_split that uses "colors" to
1263 // indicate which sub-communicator each process wants to be in.
1264 // We use color=0 to indicate the group of shmem roots, and color=1
1265 // for all other processes -- the latter will simply not ever do
1266 // anything among themselves with the communicator so created.
1267 //
1268 // Using MPI_Comm_split has the additional benefit that, just as above,
1269 // we can choose where each rank will end up in shmem_roots_communicator.
1270 // We again set key=0 for the original root_process, and key=1 for all other
1271 // ranks; then, the global root becomes rank=0 on the
1272 // shmem_roots_communicator. We don't care how the other processes are
1273 // ordered.
1274 MPI_Comm shmem_roots_communicator;
1275 {
1276 const int key =
1277 (Utilities::MPI::this_mpi_process(communicator) == root_process ? 0 : 1);
1278
1279 const int ierr = MPI_Comm_split(communicator,
1280 /*color=*/
1281 (is_shmem_root ? 0 : 1),
1282 key,
1283 &shmem_roots_communicator);
1284 AssertThrowMPI(ierr);
1285
1286 // Again verify the explanation from above
1287 if (Utilities::MPI::this_mpi_process(communicator) == root_process)
1288 Assert(Utilities::MPI::this_mpi_process(shmem_roots_communicator) == 0,
1290 }
1291
1292 const unsigned int shmem_roots_root_rank = 0;
1293 const bool is_shmem_roots_root =
1294 (is_shmem_root && (Utilities::MPI::this_mpi_process(
1295 shmem_roots_communicator) == shmem_roots_root_rank));
1296
1297 // Now let the original root_process broadcast the current object to all
1298 // shmem roots. We know that the last rank is the original root process that
1299 // has all of the data.
1300 if (is_shmem_root)
1301 {
1302 if (std::is_trivial<T>::value)
1303 {
1304 // The data is "trivial", i.e., we can copy things directly without
1305 // having to go through the serialization/deserialization machinery of
1306 // Utilities::MPI::broadcast.
1307 //
1308 // In that case, first tell all of the other shmem roots how many
1309 // elements we will have to deal with, and let them resize their
1310 // (non-shared) arrays.
1311 const size_type new_size =
1312 Utilities::MPI::broadcast(shmem_roots_communicator,
1313 size(),
1314 shmem_roots_root_rank);
1315 if (is_shmem_roots_root == false)
1316 resize(new_size);
1317
1318 // Then directly copy from the root process into these buffers
1319 int ierr = MPI_Bcast(elements.get(),
1320 sizeof(T) * new_size,
1321 MPI_CHAR,
1322 shmem_roots_root_rank,
1323 shmem_roots_communicator);
1324 AssertThrowMPI(ierr);
1325 }
1326 else
1327 {
1328 // The objects to be sent around are not "trivial", and so we have
1329 // to go through the serialization/deserialization machinery. On all
1330 // but the sending process, overwrite the current state with the
1331 // vector just broadcast.
1332 //
1333 // On the root rank, this would lead to resetting the 'entries'
1334 // pointer, which would trigger the deleter which would lead to a
1335 // deadlock. So we just send the result of the broadcast() call to
1336 // nirvana on the root process and keep our current state.
1337 if (Utilities::MPI::this_mpi_process(shmem_roots_communicator) == 0)
1338 Utilities::MPI::broadcast(shmem_roots_communicator,
1339 *this,
1340 shmem_roots_root_rank);
1341 else
1342 *this = Utilities::MPI::broadcast(shmem_roots_communicator,
1343 *this,
1344 shmem_roots_root_rank);
1345 }
1346 }
1347
1348 // We no longer need the shmem roots communicator, so get rid of it
1349 {
1350 const int ierr = MPI_Comm_free(&shmem_roots_communicator);
1351 AssertThrowMPI(ierr);
1352 }
1353
1354
1355 // **** Step 3 ****
1356 // At this point, all shmem groups have one shmem root process that has
1357 // a copy of the data. This is the point where each shmem group should
1358 // establish a shmem area to put the data into. As mentioned above,
1359 // we know that the shmem roots are the last rank in their respective
1360 // shmem_group_communicator.
1361 //
1362 // The process for all of this works as follows: While all processes in
1363 // the shmem group participate in the generation of the shmem memory window,
1364 // only the shmem root actually allocates any memory -- the rest just
1365 // allocate zero bytes of their own. We allocate space for exactly
1366 // size() elements (computed on the shmem_root that already has the data)
1367 // and add however many bytes are necessary so that we know that we can align
1368 // things to 64-byte boundaries. The worst case happens if the memory system
1369 // gives us a pointer to an address one byte past a desired alignment
1370 // boundary, and in that case aligning the memory will require us to waste the
1371 // first (align_by-1) bytes. So we have to ask for
1372 // size() * sizeof(T) + (align_by - 1)
1373 // bytes.
1374 //
1375 // Before MPI 4.0, there was no way to specify that we want memory aligned to
1376 // a certain number of bytes. This is going to come back to bite us further
1377 // down below when we try to get a properly aligned pointer to our memory
1378 // region, see the commentary there. Starting with MPI 4.0, one can set a
1379 // flag in an MPI_Info structure that requests a desired alignment, so we do
1380 // this for forward compatibility; MPI implementations ignore flags they don't
1381 // know anything about, and so setting this flag is backward compatible also
1382 // to older MPI versions.
1383 //
1384 // There is one final piece we can already take care of here. At the beginning
1385 // of all of this, only the shmem_root knows how many elements there are in
1386 // the array. But at the end of it, all processes of course need to know. We
1387 // could put this information somewhere into the shmem area, along with the
1388 // other data, but that seems clumsy. It turns out that when calling
1389 // MPI_Win_allocate_shared, we are asked for the value of a parameter called
1390 // 'disp_unit' whose meaning is difficult to determine from the MPI
1391 // documentation, and that we do not actually need. So we "abuse" it a bit: On
1392 // the shmem root, we put the array size into it. Later on, the remaining
1393 // processes can query the shmem root's value of 'disp_unit', and so will be
1394 // able to learn about the array size that way.
1395 MPI_Win shmem_window;
1396 void * base_ptr;
1397 const MPI_Aint align_by = 64;
1398 const MPI_Aint alloc_size =
1399 Utilities::MPI::broadcast(shmem_group_communicator,
1400 (size() * sizeof(T) + (align_by - 1)),
1401 0);
1402
1403 {
1404 const int disp_unit = (is_shmem_root ? size() : 1);
1405
1406 int ierr;
1407
1408 MPI_Info mpi_info;
1409 ierr = MPI_Info_create(&mpi_info);
1410 AssertThrowMPI(ierr);
1411 ierr = MPI_Info_set(mpi_info,
1412 "mpi_minimum_memory_alignment",
1413 std::to_string(align_by).c_str());
1414 AssertThrowMPI(ierr);
1415 ierr = MPI_Win_allocate_shared((is_shmem_root ? alloc_size : 0),
1416 disp_unit,
1417 mpi_info,
1418 shmem_group_communicator,
1419 &base_ptr,
1420 &shmem_window);
1421 AssertThrowMPI(ierr);
1422
1423 ierr = MPI_Info_free(&mpi_info);
1424 AssertThrowMPI(ierr);
1425 }
1426
1427
1428 // **** Step 4 ****
1429 // The next step is to teach all non-shmem root processes what the pointer to
1430 // the array is that the shmem-root created. MPI has a nifty way for this
1431 // given that only a single process actually allocated memory in the window:
1432 // When calling MPI_Win_shared_query, the MPI documentation says that
1433 // "When rank is MPI_PROC_NULL, the pointer, disp_unit, and size returned are
1434 // the pointer, disp_unit, and size of the memory segment belonging the lowest
1435 // rank that specified size > 0. If all processes in the group attached to the
1436 // window specified size = 0, then the call returns size = 0 and a baseptr as
1437 // if MPI_ALLOC_MEM was called with size = 0."
1438 //
1439 // This will allow us to obtain the pointer to the shmem root's memory area,
1440 // which is the only one we care about. (None of the other processes have
1441 // even allocated any memory.) But this will also retrieve the shmem root's
1442 // disp_unit, which in step 3 above we have abused to pass along the number of
1443 // elements in the array.
1444 //
1445 // We don't need to do this on the shmem root process: This process has
1446 // already gotten its base_ptr correctly set above, and we can determine the
1447 // array size by just calling size().
1448 unsigned int array_size =
1449 (is_shmem_root ? size() : numbers::invalid_unsigned_int);
1450 if (is_shmem_root == false)
1451 {
1452 int disp_unit;
1453 MPI_Aint alloc_size; // not actually used
1454 const int ierr = MPI_Win_shared_query(
1455 shmem_window, MPI_PROC_NULL, &alloc_size, &disp_unit, &base_ptr);
1456 AssertThrowMPI(ierr);
1457
1458 // Make sure we actually got a pointer, and also unpack the array size as
1459 // discussed above.
1460 Assert(base_ptr != nullptr, ExcInternalError());
1461
1462 array_size = disp_unit;
1463 }
1464
1465
1466 // **** Step 5 ****
1467 // Now that all processes know the address of the space that is visible to
1468 // everyone, we need to figure out whether it is properly aligned and if not,
1469 // find the next aligned address.
1470 //
1471 // std::align does that, but it also modifies its last two arguments. The
1472 // documentation of that function at
1473 // https://en.cppreference.com/w/cpp/memory/align is not entirely clear, but I
1474 // *think* that the following should do given that we do not use base_ptr and
1475 // available_space any further after the call to std::align.
1476 std::size_t available_space = alloc_size;
1477 void * base_ptr_backup = base_ptr;
1478 T * aligned_shmem_pointer = static_cast<T *>(
1479 std::align(align_by, array_size * sizeof(T), base_ptr, available_space));
1480 Assert(aligned_shmem_pointer != nullptr, ExcInternalError());
1481
1482 // There is one step to guard against. It is *conceivable* that the base_ptr
1483 // we have previously obtained from MPI_Win_shared_query is mapped so
1484 // awkwardly into the different MPI processes' memory spaces that it is
1485 // aligned in one memory space, but not another. In that case, different
1486 // processes would align base_ptr differently, and adjust available_space
1487 // differently. We can check that by making sure that the max (or min) over
1488 // all processes is equal to every process's value. If that's not the case,
1489 // then the whole idea of aligning above is wrong and we need to rethink what
1490 // it means to align data in a shared memory space.
1491 //
1492 // One might be tempted to think that this is not how MPI implementations
1493 // actually arrange things. Alas, when developing this functionality in 2021,
1494 // this is really how at least OpenMPI ends up doing things. (This is with an
1495 // OpenMPI implementation of MPI 3.1, so it does not support the flag we set
1496 // in the MPI_Info structure above when allocating the memory window.) Indeed,
1497 // when running this code on three processes, one ends up with base_ptr values
1498 // of
1499 // base_ptr=0x7f0842f02108
1500 // base_ptr=0x7fc0a47881d0
1501 // base_ptr=0x7f64872db108
1502 // which, most annoyingly, are aligned to 8 and 16 byte boundaries -- so there
1503 // is no common offset std::align could find that leads to a 64-byte
1504 // aligned memory address in all three memory spaces. That's a tremendous
1505 // nuisance and there is really nothing we can do about this other than just
1506 // fall back on the (unaligned) base_ptr in that case.
1507 if (Utilities::MPI::min(available_space, shmem_group_communicator) !=
1508 Utilities::MPI::max(available_space, shmem_group_communicator))
1509 aligned_shmem_pointer = static_cast<T *>(base_ptr_backup);
1510
1511
1512 // **** Step 6 ****
1513 // If this is the shmem root process, we need to copy the data into the
1514 // shared memory space.
1515 if (is_shmem_root)
1516 {
1517 if (std::is_trivial<T>::value == true)
1518 std::memcpy(aligned_shmem_pointer, elements.get(), sizeof(T) * size());
1519 else
1520 for (std::size_t i = 0; i < size(); ++i)
1521 new (&aligned_shmem_pointer[i]) T(std::move(elements[i]));
1522 }
1523
1524 // Make sure that the shared memory host has copied the data before we try to
1525 // access it.
1526 MPI_Barrier(shmem_group_communicator);
1527
1528 // **** Step 7 ****
1529 // Finally, we need to set the pointers of this object to what we just
1530 // learned. This also releases all memory that may have been in use
1531 // previously.
1532 //
1533 // The part that is a bit tricky is how to write the deleter of this
1534 // shared memory object. When we want to get rid of it, we need to
1535 // also release the MPI_Win object along with the shmem_group_communicator
1536 // object. That's because as long as we use the shared memory, we still need
1537 // to hold on to the MPI_Win object, and the MPI_Win object is based on the
1538 // communicator. (The former is definitely true, the latter is not quite clear
1539 // from the MPI documentation, but seems reasonable.) So we need to have a
1540 // deleter for the pointer that ensures that upon release of the memory, we
1541 // not only call the destructor of these memory elements (but only once, on
1542 // the shmem root!) but also destroy the MPI_Win and the communicator. All of
1543 // that is encapsulated in the following call where the deleter makes copies
1544 // of the arguments in the lambda capture.
1545 elements =
1546 decltype(elements)(aligned_shmem_pointer,
1547 [is_shmem_root,
1548 array_size,
1549 aligned_shmem_pointer,
1550 shmem_group_communicator,
1551 shmem_window](T *) mutable {
1552 if (is_shmem_root)
1553 for (unsigned int i = 0; i < array_size; ++i)
1554 aligned_shmem_pointer[i].~T();
1555
1556 int ierr;
1557 ierr = MPI_Win_free(&shmem_window);
1558 AssertThrowMPI(ierr);
1559
1560 ierr = MPI_Comm_free(&shmem_group_communicator);
1561 AssertThrowMPI(ierr);
1562 });
1563
1564 // We then also have to set the other two pointers that define the state of
1565 // the current object. Note that the new buffer size is exactly as large as
1566 // necessary, i.e., can store size() elements, regardless of the number of
1567 // allocated elements in the original objects.
1568 used_elements_end = elements.get() + array_size;
1569 allocated_elements_end = used_elements_end;
1570
1571 // **** Consistency check ****
1572 // At this point, each process should have a copy of the data.
1573 // Verify this in some sort of round-about way
1574# ifdef DEBUG
1575 const std::vector<char> packed_data = Utilities::pack(*this);
1576 const int hash =
1577 std::accumulate(packed_data.begin(), packed_data.end(), int(0));
1578 Assert(Utilities::MPI::max(hash, communicator) == hash, ExcInternalError());
1579# endif
1580
1581
1582
1583# else
1584 // If we only have MPI 2.x, then simply broadcast the current object to all
1585 // other processes and forego the idea of using shmem
1586 *this = Utilities::MPI::broadcast(communicator, *this, root_process);
1587# endif
1588# else
1589 // No MPI -> nothing to replicate
1590 (void)communicator;
1591 (void)root_process;
1592# endif
1593}
1594
1595
1596
1597template <class T>
1598inline void
1600{
1601 // Swap the data in the 'elements' objects. One problem is that this
1602 // also moves the deleter object, but the deleter object is a lambda function
1603 // that references 'this' (i.e., the 'this' pointer of the *moved-from*
1604 // object). So what we actually do is steal the pointer via
1605 // std::unique_ptr::release() and then install our own deleter object that
1606 // mirrors the one used in reserve() below.
1607 //
1608 // We have to do the same for the other object
1609 T *this_element_pointer = elements.release();
1610
1611 elements = decltype(elements)(vec.elements.release(), [this](T *ptr) {
1612 if (ptr != nullptr)
1613 {
1614 Assert(this->used_elements_end != nullptr, ExcInternalError());
1615
1616 if (std::is_trivial<T>::value == false)
1617 for (T *p = this->used_elements_end - 1; p >= ptr; --p)
1618 p->~T();
1619 }
1620
1621 std::free(ptr);
1622 });
1623
1624 vec.elements = decltype(vec.elements)(this_element_pointer, [&vec](T *ptr) {
1625 if (ptr != nullptr)
1626 {
1627 Assert(vec.used_elements_end != nullptr, ExcInternalError());
1628
1629 if (std::is_trivial<T>::value == false)
1630 for (T *p = vec.used_elements_end - 1; p >= ptr; --p)
1631 p->~T();
1632 }
1633
1634 std::free(ptr);
1635 });
1636
1637 std::swap(used_elements_end, vec.used_elements_end);
1638 std::swap(allocated_elements_end, vec.allocated_elements_end);
1639}
1640
1641
1642
1643template <class T>
1644inline bool
1646{
1647 return used_elements_end == elements.get();
1648}
1649
1650
1651
1652template <class T>
1653inline typename AlignedVector<T>::size_type
1655{
1656 return used_elements_end - elements.get();
1657}
1658
1659
1660
1661template <class T>
1662inline typename AlignedVector<T>::size_type
1664{
1665 return allocated_elements_end - elements.get();
1666}
1667
1668
1669
1670template <class T>
1672 operator[](const size_type index)
1673{
1674 AssertIndexRange(index, size());
1675 return elements[index];
1676}
1677
1678
1679
1680template <class T>
1682 operator[](const size_type index) const
1683{
1684 AssertIndexRange(index, size());
1685 return elements[index];
1686}
1687
1688
1689
1690template <typename T>
1691inline typename AlignedVector<T>::pointer
1693{
1694 return elements.get();
1695}
1696
1697
1698
1699template <typename T>
1700inline typename AlignedVector<T>::const_pointer
1702{
1703 return elements.get();
1704}
1705
1706
1707
1708template <class T>
1709inline typename AlignedVector<T>::iterator
1711{
1712 return elements.get();
1713}
1714
1715
1716
1717template <class T>
1718inline typename AlignedVector<T>::iterator
1720{
1721 return used_elements_end;
1722}
1723
1724
1725
1726template <class T>
1729{
1730 return elements.get();
1731}
1732
1733
1734
1735template <class T>
1738{
1739 return used_elements_end;
1740}
1741
1742
1743
1744template <class T>
1745template <class Archive>
1746inline void
1747AlignedVector<T>::save(Archive &ar, const unsigned int) const
1748{
1749 size_type vec_size = size();
1750 ar & vec_size;
1751 if (vec_size > 0)
1752 ar &boost::serialization::make_array(elements.get(), vec_size);
1753}
1754
1755
1756
1757template <class T>
1758template <class Archive>
1759inline void
1760AlignedVector<T>::load(Archive &ar, const unsigned int)
1761{
1762 size_type vec_size = 0;
1763 ar & vec_size;
1764
1765 if (vec_size > 0)
1766 {
1767 reserve(vec_size);
1768 ar &boost::serialization::make_array(elements.get(), vec_size);
1769 used_elements_end = elements.get() + vec_size;
1770 }
1771}
1772
1773
1774
1775template <class T>
1776inline typename AlignedVector<T>::size_type
1778{
1779 size_type memory = sizeof(*this);
1780 for (const T *t = elements.get(); t != used_elements_end; ++t)
1782 memory += sizeof(T) * (allocated_elements_end - used_elements_end);
1783 return memory;
1784}
1785
1786
1787#endif // ifndef DOXYGEN
1788
1789
1795template <class T>
1796bool
1798{
1799 if (lhs.size() != rhs.size())
1800 return false;
1801 for (typename AlignedVector<T>::const_iterator lit = lhs.begin(),
1802 rit = rhs.begin();
1803 lit != lhs.end();
1804 ++lit, ++rit)
1805 if (*lit != *rit)
1806 return false;
1807 return true;
1808}
1809
1810
1811
1817template <class T>
1818bool
1820{
1821 return !(operator==(lhs, rhs));
1822}
1823
1824
1826
1827#endif
bool operator==(const AlignedVector< T > &lhs, const AlignedVector< T > &rhs)
iterator end()
size_type memory_consumption() const
void resize_fast(const size_type new_size)
reference operator[](const size_type index)
void fill(const T &element)
iterator begin()
const_iterator end() const
~AlignedVector()=default
AlignedVector(AlignedVector< T > &&vec) noexcept
void reserve(const size_type new_allocated_size)
void serialize(Archive &archive, const unsigned int version)
bool operator!=(const AlignedVector< T > &lhs, const AlignedVector< T > &rhs)
const_reference operator[](const size_type index) const
pointer data()
void swap(AlignedVector< T > &vec)
void resize(const size_type new_size, const T &init)
AlignedVector & operator=(AlignedVector< T > &&vec) noexcept
size_type capacity() const
value_type & reference
AlignedVector & operator=(const AlignedVector< T > &vec)
const value_type * const_pointer
void push_back(const T in_data)
const_iterator begin() const
AlignedVector(const size_type size, const T &init=T())
T * allocated_elements_end
bool empty() const
AlignedVector(const AlignedVector< T > &vec)
size_type size() const
const_reference back() const
std::size_t size_type
std::unique_ptr< T[], std::function< void(T *)> > elements
bool operator==(const AlignedVector< T > &lhs, const AlignedVector< T > &rhs)
const value_type * const_iterator
void replicate_across_communicator(const MPI_Comm &communicator, const unsigned int root_process)
void resize(const size_type new_size)
void load(Archive &ar, const unsigned int version)
void save(Archive &ar, const unsigned int version) const
void insert_back(ForwardIterator begin, ForwardIterator end)
const value_type & const_reference
reference back()
const_pointer data() const
value_type * pointer
value_type * iterator
AlignedVectorCopy(const T *const source_begin, const T *const source_end, T *const destination)
static const std::size_t minimum_parallel_grain_size
virtual void apply_to_subrange(const std::size_t begin, const std::size_t end) const override
void default_construct_or_assign(const std::size_t begin, const std::size_t end, std::integral_constant< bool, true >) const
void default_construct_or_assign(const std::size_t begin, const std::size_t end, std::integral_constant< bool, false >) const
AlignedVectorDefaultInitialize(const std::size_t size, T *const destination)
static const std::size_t minimum_parallel_grain_size
virtual void apply_to_subrange(const std::size_t begin, const std::size_t end) const override
static const std::size_t minimum_parallel_grain_size
virtual void apply_to_subrange(const std::size_t begin, const std::size_t end) const override
AlignedVectorMove(T *const source_begin, T *const source_end, T *const destination)
static const std::size_t minimum_parallel_grain_size
void copy_construct_or_assign(const std::size_t begin, const std::size_t end, std::integral_constant< bool, false >) const
void copy_construct_or_assign(const std::size_t begin, const std::size_t end, std::integral_constant< bool, true >) const
AlignedVectorSet(const std::size_t size, const T &element, T *const destination)
virtual void apply_to_subrange(const std::size_t begin, const std::size_t end) const override
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:402
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:403
#define Assert(cond, exc)
Definition: exceptions.h:1465
std::string to_string(const T &t)
Definition: patterns.h:2329
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1746
#define AssertIndexRange(index, range)
Definition: exceptions.h:1690
static ::ExceptionBase & ExcInternalError()
static const types::blas_int zero
static const char T
types::global_dof_index size_type
Definition: cuda_kernels.h:45
std::enable_if< std::is_fundamental< T >::value, std::size_t >::type memory_consumption(const T &t)
void swap(MemorySpaceData< Number, MemorySpace > &, MemorySpaceData< Number, MemorySpace > &)
VectorType::value_type * end(VectorType &V)
VectorType::value_type * begin(VectorType &V)
void free(T *&pointer)
Definition: cuda.h:97
T broadcast(const MPI_Comm &comm, const T &object_to_send, const unsigned int root_process=0)
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:128
T min(const T &t, const MPI_Comm &mpi_communicator)
T max(const T &t, const MPI_Comm &mpi_communicator)
void posix_memalign(void **memptr, std::size_t alignment, std::size_t size)
Definition: utilities.cc:1050
size_t pack(const T &object, std::vector< char > &dest_buffer, const bool allow_compression=true)
Definition: utilities.h:1218
static const unsigned int invalid_unsigned_int
Definition: types.h:196
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
void apply_parallel(const std::size_t begin, const std::size_t end, const std::size_t minimum_parallel_grain_size) const
Definition: parallel.h:839