Reference documentation for deal.II version GIT 6a72d26406 2023-06-07 13:05:01+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
vectorization.h
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2011 - 2021 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #ifndef dealii_vectorization_h
18 #define dealii_vectorization_h
19 
20 #include <deal.II/base/config.h>
21 
24 
25 #include <array>
26 #include <cmath>
27 
28 // Note:
29 // The flag DEAL_II_VECTORIZATION_WIDTH_IN_BITS is essentially constructed
30 // according to the following scheme (on x86-based architectures)
31 // #ifdef __AVX512F__
32 // #define DEAL_II_VECTORIZATION_WIDTH_IN_BITS 512
33 // #elif defined (__AVX__)
34 // #define DEAL_II_VECTORIZATION_WIDTH_IN_BITS 256
35 // #elif defined (__SSE2__)
36 // #define DEAL_II_VECTORIZATION_WIDTH_IN_BITS 128
37 // #else
38 // #define DEAL_II_VECTORIZATION_WIDTH_IN_BITS 0
39 // #endif
40 // In addition to checking the flags __AVX512F__, __AVX__ and __SSE2__, a CMake
41 // test, 'check_01_cpu_features.cmake', ensures that these feature are not only
42 // present in the compilation unit but also working properly.
43 
44 #if DEAL_II_VECTORIZATION_WIDTH_IN_BITS > 0
45 
46 // These error messages try to detect the case that deal.II was compiled with
47 // a wider instruction set extension as the current compilation unit, for
48 // example because deal.II was compiled with AVX, but a user project does not
49 // add -march=native or similar flags, making it fall to SSE2. This leads to
50 // very strange errors as the size of data structures differs between the
51 // compiled deal.II code sitting in libdeal_II.so and the user code if not
52 // detected.
53 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && !defined(__AVX__)
54 # error \
55  "Mismatch in vectorization capabilities: AVX was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native."
56 # endif
57 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && !defined(__AVX512F__)
58 # error \
59  "Mismatch in vectorization capabilities: AVX-512F was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native."
60 # endif
61 
62 # ifdef _MSC_VER
63 # include <intrin.h>
64 # elif defined(__ALTIVEC__)
65 # include <altivec.h>
66 
67 // altivec.h defines vector, pixel, bool, but we do not use them, so undefine
68 // them before they make trouble
69 # undef vector
70 # undef pixel
71 # undef bool
72 # else
73 # include <x86intrin.h>
74 # endif
75 
76 #endif
77 
78 
80 
81 
82 // Enable the EnableIfScalar type trait for VectorizedArray<Number> such
83 // that it can be used as a Number type in Tensor<rank,dim,Number>, etc.
84 
85 template <typename Number, std::size_t width>
86 struct EnableIfScalar<VectorizedArray<Number, width>>
87 {
89 };
90 
91 
92 
96 template <typename T>
98 {
99 public:
106  VectorizedArrayIterator(T &data, const std::size_t lane)
107  : data(&data)
108  , lane(lane)
109  {}
110 
114  bool
116  {
117  Assert(this->data == other.data,
118  ExcMessage(
119  "You are trying to compare iterators into different arrays."));
120  return this->lane == other.lane;
121  }
122 
126  bool
128  {
129  Assert(this->data == other.data,
130  ExcMessage(
131  "You are trying to compare iterators into different arrays."));
132  return this->lane != other.lane;
133  }
134 
139  operator=(const VectorizedArrayIterator<T> &other) = default;
140 
145  const typename T::value_type &
146  operator*() const
147  {
148  AssertIndexRange(lane, T::size());
149  return (*data)[lane];
150  }
151 
152 
157  template <typename U = T>
158  std::enable_if_t<!std::is_same<U, const U>::value, typename T::value_type> &
160  {
161  AssertIndexRange(lane, T::size());
162  return (*data)[lane];
163  }
164 
172  {
173  AssertIndexRange(lane + 1, T::size() + 1);
174  lane++;
175  return *this;
176  }
177 
183  operator+=(const std::size_t offset)
184  {
185  AssertIndexRange(lane + offset, T::size() + 1);
186  lane += offset;
187  return *this;
188  }
189 
197  {
198  Assert(
199  lane > 0,
200  ExcMessage(
201  "You can't decrement an iterator that is already at the beginning of the range."));
202  --lane;
203  return *this;
204  }
205 
210  operator+(const std::size_t &offset) const
211  {
212  AssertIndexRange(lane + offset, T::size() + 1);
213  return VectorizedArrayIterator<T>(*data, lane + offset);
214  }
215 
219  std::ptrdiff_t
221  {
222  return static_cast<std::ptrdiff_t>(lane) -
223  static_cast<ptrdiff_t>(other.lane);
224  }
225 
226 private:
230  T *data;
231 
235  std::size_t lane;
236 };
237 
238 
239 
249 template <typename T, std::size_t width>
251 {
252 public:
256  VectorizedArrayBase() = default;
257 
261  template <typename U>
262  VectorizedArrayBase(const std::initializer_list<U> &list)
263  {
264  auto i0 = this->begin();
265  auto i1 = list.begin();
266 
267  for (; i1 != list.end(); ++i0, ++i1)
268  {
269  Assert(
270  i0 != this->end(),
271  ExcMessage(
272  "Initializer list exceeds size of this VectorizedArray object."));
273 
274  *i0 = *i1;
275  }
276 
277  for (; i0 != this->end(); ++i0)
278  {
279  *i0 = 0.0;
280  }
281  }
282 
286  static constexpr std::size_t
288  {
289  return width;
290  }
291 
297  {
298  return VectorizedArrayIterator<T>(static_cast<T &>(*this), 0);
299  }
300 
306  begin() const
307  {
308  return VectorizedArrayIterator<const T>(static_cast<const T &>(*this), 0);
309  }
310 
315  end()
316  {
317  return VectorizedArrayIterator<T>(static_cast<T &>(*this), width);
318  }
319 
325  end() const
326  {
327  return VectorizedArrayIterator<const T>(static_cast<const T &>(*this),
328  width);
329  }
330 };
331 
332 
333 
418 template <typename Number, std::size_t width>
420  : public VectorizedArrayBase<VectorizedArray<Number, width>, 1>
421 {
422 public:
426  using value_type = Number;
427 
428  static_assert(width == 1,
429  "You specified an illegal width that is not supported.");
430 
435  VectorizedArray() = default;
436 
440  VectorizedArray(const Number scalar)
441  {
442  this->operator=(scalar);
443  }
444 
448  template <typename U>
449  VectorizedArray(const std::initializer_list<U> &list)
450  : VectorizedArrayBase<VectorizedArray<Number, width>, 1>(list)
451  {}
452 
458  operator=(const Number scalar) &
459  {
460  data = scalar;
461  return *this;
462  }
463 
470  operator=(const Number scalar) && = delete;
471 
477  Number &
478  operator[](const unsigned int comp)
479  {
480  (void)comp;
481  AssertIndexRange(comp, 1);
482  return data;
483  }
484 
490  const Number &
491  operator[](const unsigned int comp) const
492  {
493  (void)comp;
494  AssertIndexRange(comp, 1);
495  return data;
496  }
497 
504  {
505  data += vec.data;
506  return *this;
507  }
508 
515  {
516  data -= vec.data;
517  return *this;
518  }
519 
526  {
527  data *= vec.data;
528  return *this;
529  }
530 
537  {
538  data /= vec.data;
539  return *this;
540  }
541 
548  template <typename OtherNumber>
550  load(const OtherNumber *ptr)
551  {
552  data = *ptr;
553  }
554 
561  template <typename OtherNumber>
563  store(OtherNumber *ptr) const
564  {
565  *ptr = data;
566  }
567 
615  void
616  streaming_store(Number *ptr) const
617  {
618  *ptr = data;
619  }
620 
634  void
635  gather(const Number *base_ptr, const unsigned int *offsets)
636  {
637  data = base_ptr[offsets[0]];
638  }
639 
653  void
654  scatter(const unsigned int *offsets, Number *base_ptr) const
655  {
656  base_ptr[offsets[0]] = data;
657  }
658 
664  Number
665  sum()
666  {
667  return data;
668  }
669 
675  Number data;
676 
677 private:
684  get_sqrt() const
685  {
686  VectorizedArray res;
687  res.data = std::sqrt(data);
688  return res;
689  }
690 
697  get_abs() const
698  {
699  VectorizedArray res;
700  res.data = std::fabs(data);
701  return res;
702  }
703 
710  get_max(const VectorizedArray &other) const
711  {
712  VectorizedArray res;
713  res.data = std::max(data, other.data);
714  return res;
715  }
716 
723  get_min(const VectorizedArray &other) const
724  {
725  VectorizedArray res;
726  res.data = std::min(data, other.data);
727  return res;
728  }
729 
730  // Make a few functions friends.
731  template <typename Number2, std::size_t width2>
734  template <typename Number2, std::size_t width2>
737  template <typename Number2, std::size_t width2>
741  template <typename Number2, std::size_t width2>
745 };
746 
747 
748 
760 template <typename Number,
761  std::size_t width =
764  make_vectorized_array(const Number &u)
765 {
767  return result;
768 }
769 
770 
771 
778 template <typename VectorizedArrayType>
779 inline DEAL_II_ALWAYS_INLINE VectorizedArrayType
780 make_vectorized_array(const typename VectorizedArrayType::value_type &u)
781 {
782  static_assert(
783  std::is_same<VectorizedArrayType,
784  VectorizedArray<typename VectorizedArrayType::value_type,
785  VectorizedArrayType::size()>>::value,
786  "VectorizedArrayType is not a VectorizedArray.");
787 
788  VectorizedArrayType result = u;
789  return result;
790 }
791 
792 
793 
805 template <typename Number, std::size_t width>
806 inline DEAL_II_ALWAYS_INLINE void
808  const std::array<Number *, width> &ptrs,
809  const unsigned int offset)
810 {
811  for (unsigned int v = 0; v < width; ++v)
812  out.data[v] = ptrs[v][offset];
813 }
814 
815 
816 
842 template <typename Number, std::size_t width>
843 inline DEAL_II_ALWAYS_INLINE void
844 vectorized_load_and_transpose(const unsigned int n_entries,
845  const Number * in,
846  const unsigned int * offsets,
848 {
849  for (unsigned int i = 0; i < n_entries; ++i)
850  for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
851  out[i][v] = in[offsets[v] + i];
852 }
853 
854 
866 template <typename Number, std::size_t width>
867 inline DEAL_II_ALWAYS_INLINE void
868 vectorized_load_and_transpose(const unsigned int n_entries,
869  const std::array<Number *, width> &in,
871 {
872  for (unsigned int i = 0; i < n_entries; ++i)
873  for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
874  out[i][v] = in[v][i];
875 }
876 
877 
878 
917 template <typename Number, std::size_t width>
918 inline DEAL_II_ALWAYS_INLINE void
919 vectorized_transpose_and_store(const bool add_into,
920  const unsigned int n_entries,
922  const unsigned int * offsets,
923  Number * out)
924 {
925  if (add_into)
926  for (unsigned int i = 0; i < n_entries; ++i)
927  for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
928  out[offsets[v] + i] += in[i][v];
929  else
930  for (unsigned int i = 0; i < n_entries; ++i)
931  for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
932  out[offsets[v] + i] = in[i][v];
933 }
934 
935 
947 template <typename Number, std::size_t width>
948 inline DEAL_II_ALWAYS_INLINE void
949 vectorized_transpose_and_store(const bool add_into,
950  const unsigned int n_entries,
952  std::array<Number *, width> & out)
953 {
954  if (add_into)
955  for (unsigned int i = 0; i < n_entries; ++i)
956  for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
957  out[v][i] += in[i][v];
958  else
959  for (unsigned int i = 0; i < n_entries; ++i)
960  for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
961  out[v][i] = in[i][v];
962 }
963 
964 
967 #ifndef DOXYGEN
968 
969 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__SSE2__)
970 
974 template <>
975 class VectorizedArray<double, 2>
976  : public VectorizedArrayBase<VectorizedArray<double, 2>, 2>
977 {
978 public:
982  using value_type = double;
983 
988  VectorizedArray() = default;
989 
993  VectorizedArray(const double scalar)
994  {
995  this->operator=(scalar);
996  }
997 
1001  template <typename U>
1002  VectorizedArray(const std::initializer_list<U> &list)
1004  {}
1005 
1010  VectorizedArray &
1011  operator=(const double x) &
1012  {
1013  data = _mm_set1_pd(x);
1014  return *this;
1015  }
1016 
1022  VectorizedArray &
1023  operator=(const double scalar) && = delete;
1024 
1029  double &
1030  operator[](const unsigned int comp)
1031  {
1032  AssertIndexRange(comp, 2);
1033  return *(reinterpret_cast<double *>(&data) + comp);
1034  }
1035 
1040  const double &
1041  operator[](const unsigned int comp) const
1042  {
1043  AssertIndexRange(comp, 2);
1044  return *(reinterpret_cast<const double *>(&data) + comp);
1045  }
1046 
1051  VectorizedArray &
1052  operator+=(const VectorizedArray &vec)
1053  {
1054 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1055  data += vec.data;
1056 # else
1057  data = _mm_add_pd(data, vec.data);
1058 # endif
1059  return *this;
1060  }
1061 
1066  VectorizedArray &
1067  operator-=(const VectorizedArray &vec)
1068  {
1069 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1070  data -= vec.data;
1071 # else
1072  data = _mm_sub_pd(data, vec.data);
1073 # endif
1074  return *this;
1075  }
1076 
1081  VectorizedArray &
1082  operator*=(const VectorizedArray &vec)
1083  {
1084 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1085  data *= vec.data;
1086 # else
1087  data = _mm_mul_pd(data, vec.data);
1088 # endif
1089  return *this;
1090  }
1091 
1096  VectorizedArray &
1097  operator/=(const VectorizedArray &vec)
1098  {
1099 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1100  data /= vec.data;
1101 # else
1102  data = _mm_div_pd(data, vec.data);
1103 # endif
1104  return *this;
1105  }
1106 
1113  void
1114  load(const double *ptr)
1115  {
1116  data = _mm_loadu_pd(ptr);
1117  }
1118 
1120  void
1121  load(const float *ptr)
1122  {
1124  for (unsigned int i = 0; i < 2; ++i)
1125  data[i] = ptr[i];
1126  }
1127 
1135  void
1136  store(double *ptr) const
1137  {
1138  _mm_storeu_pd(ptr, data);
1139  }
1140 
1142  void
1143  store(float *ptr) const
1144  {
1146  for (unsigned int i = 0; i < 2; ++i)
1147  ptr[i] = data[i];
1148  }
1149 
1155  void
1156  streaming_store(double *ptr) const
1157  {
1158  Assert(reinterpret_cast<std::size_t>(ptr) % 16 == 0,
1159  ExcMessage("Memory not aligned"));
1160  _mm_stream_pd(ptr, data);
1161  }
1162 
1176  void
1177  gather(const double *base_ptr, const unsigned int *offsets)
1178  {
1179  for (unsigned int i = 0; i < 2; ++i)
1180  *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
1181  }
1182 
1196  void
1197  scatter(const unsigned int *offsets, double *base_ptr) const
1198  {
1199  for (unsigned int i = 0; i < 2; ++i)
1200  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
1201  }
1202 
1207  double
1208  sum()
1209  {
1210  __m128d t1 = _mm_unpackhi_pd(data, data);
1211  __m128d t2 = _mm_add_pd(data, t1);
1212  return _mm_cvtsd_f64(t2);
1213  }
1214 
1220  __m128d data;
1221 
1222 private:
1229  get_sqrt() const
1230  {
1231  VectorizedArray res;
1232  res.data = _mm_sqrt_pd(data);
1233  return res;
1234  }
1235 
1242  get_abs() const
1243  {
1244  // to compute the absolute value, perform
1245  // bitwise andnot with -0. This will leave all
1246  // value and exponent bits unchanged but force
1247  // the sign value to +.
1248  __m128d mask = _mm_set1_pd(-0.);
1249  VectorizedArray res;
1250  res.data = _mm_andnot_pd(mask, data);
1251  return res;
1252  }
1253 
1260  get_max(const VectorizedArray &other) const
1261  {
1262  VectorizedArray res;
1263  res.data = _mm_max_pd(data, other.data);
1264  return res;
1265  }
1266 
1273  get_min(const VectorizedArray &other) const
1274  {
1275  VectorizedArray res;
1276  res.data = _mm_min_pd(data, other.data);
1277  return res;
1278  }
1279 
1280  // Make a few functions friends.
1281  template <typename Number2, std::size_t width2>
1284  template <typename Number2, std::size_t width2>
1287  template <typename Number2, std::size_t width2>
1291  template <typename Number2, std::size_t width2>
1295 };
1296 
1297 
1298 
1302 template <>
1303 inline DEAL_II_ALWAYS_INLINE void
1304 vectorized_load_and_transpose(const unsigned int n_entries,
1305  const double * in,
1306  const unsigned int * offsets,
1308 {
1309  const unsigned int n_chunks = n_entries / 2;
1310  for (unsigned int i = 0; i < n_chunks; ++i)
1311  {
1312  __m128d u0 = _mm_loadu_pd(in + 2 * i + offsets[0]);
1313  __m128d u1 = _mm_loadu_pd(in + 2 * i + offsets[1]);
1314  out[2 * i + 0].data = _mm_unpacklo_pd(u0, u1);
1315  out[2 * i + 1].data = _mm_unpackhi_pd(u0, u1);
1316  }
1317 
1318  // remainder loop of work that does not divide by 2
1319  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
1320  for (unsigned int v = 0; v < 2; ++v)
1321  out[i][v] = in[offsets[v] + i];
1322 }
1323 
1324 
1325 
1329 template <>
1330 inline DEAL_II_ALWAYS_INLINE void
1331 vectorized_load_and_transpose(const unsigned int n_entries,
1332  const std::array<double *, 2> &in,
1334 {
1335  // see the comments in the vectorized_load_and_transpose above
1336 
1337  const unsigned int n_chunks = n_entries / 2;
1338  for (unsigned int i = 0; i < n_chunks; ++i)
1339  {
1340  __m128d u0 = _mm_loadu_pd(in[0] + 2 * i);
1341  __m128d u1 = _mm_loadu_pd(in[1] + 2 * i);
1342  out[2 * i + 0].data = _mm_unpacklo_pd(u0, u1);
1343  out[2 * i + 1].data = _mm_unpackhi_pd(u0, u1);
1344  }
1345 
1346  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
1347  for (unsigned int v = 0; v < 2; ++v)
1348  out[i][v] = in[v][i];
1349 }
1350 
1351 
1352 
1356 template <>
1357 inline DEAL_II_ALWAYS_INLINE void
1358 vectorized_transpose_and_store(const bool add_into,
1359  const unsigned int n_entries,
1360  const VectorizedArray<double, 2> *in,
1361  const unsigned int * offsets,
1362  double * out)
1363 {
1364  const unsigned int n_chunks = n_entries / 2;
1365  if (add_into)
1366  {
1367  for (unsigned int i = 0; i < n_chunks; ++i)
1368  {
1369  __m128d u0 = in[2 * i + 0].data;
1370  __m128d u1 = in[2 * i + 1].data;
1371  __m128d res0 = _mm_unpacklo_pd(u0, u1);
1372  __m128d res1 = _mm_unpackhi_pd(u0, u1);
1373  _mm_storeu_pd(out + 2 * i + offsets[0],
1374  _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[0]),
1375  res0));
1376  _mm_storeu_pd(out + 2 * i + offsets[1],
1377  _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[1]),
1378  res1));
1379  }
1380  // remainder loop of work that does not divide by 2
1381  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
1382  for (unsigned int v = 0; v < 2; ++v)
1383  out[offsets[v] + i] += in[i][v];
1384  }
1385  else
1386  {
1387  for (unsigned int i = 0; i < n_chunks; ++i)
1388  {
1389  __m128d u0 = in[2 * i + 0].data;
1390  __m128d u1 = in[2 * i + 1].data;
1391  __m128d res0 = _mm_unpacklo_pd(u0, u1);
1392  __m128d res1 = _mm_unpackhi_pd(u0, u1);
1393  _mm_storeu_pd(out + 2 * i + offsets[0], res0);
1394  _mm_storeu_pd(out + 2 * i + offsets[1], res1);
1395  }
1396  // remainder loop of work that does not divide by 2
1397  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
1398  for (unsigned int v = 0; v < 2; ++v)
1399  out[offsets[v] + i] = in[i][v];
1400  }
1401 }
1402 
1403 
1404 
1408 template <>
1409 inline DEAL_II_ALWAYS_INLINE void
1410 vectorized_transpose_and_store(const bool add_into,
1411  const unsigned int n_entries,
1412  const VectorizedArray<double, 2> *in,
1413  std::array<double *, 2> & out)
1414 {
1415  // see the comments in the vectorized_transpose_and_store above
1416 
1417  const unsigned int n_chunks = n_entries / 2;
1418  if (add_into)
1419  {
1420  for (unsigned int i = 0; i < n_chunks; ++i)
1421  {
1422  __m128d u0 = in[2 * i + 0].data;
1423  __m128d u1 = in[2 * i + 1].data;
1424  __m128d res0 = _mm_unpacklo_pd(u0, u1);
1425  __m128d res1 = _mm_unpackhi_pd(u0, u1);
1426  _mm_storeu_pd(out[0] + 2 * i,
1427  _mm_add_pd(_mm_loadu_pd(out[0] + 2 * i), res0));
1428  _mm_storeu_pd(out[1] + 2 * i,
1429  _mm_add_pd(_mm_loadu_pd(out[1] + 2 * i), res1));
1430  }
1431 
1432  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
1433  for (unsigned int v = 0; v < 2; ++v)
1434  out[v][i] += in[i][v];
1435  }
1436  else
1437  {
1438  for (unsigned int i = 0; i < n_chunks; ++i)
1439  {
1440  __m128d u0 = in[2 * i + 0].data;
1441  __m128d u1 = in[2 * i + 1].data;
1442  __m128d res0 = _mm_unpacklo_pd(u0, u1);
1443  __m128d res1 = _mm_unpackhi_pd(u0, u1);
1444  _mm_storeu_pd(out[0] + 2 * i, res0);
1445  _mm_storeu_pd(out[1] + 2 * i, res1);
1446  }
1447 
1448  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
1449  for (unsigned int v = 0; v < 2; ++v)
1450  out[v][i] = in[i][v];
1451  }
1452 }
1453 
1454 
1455 
1459 template <>
1460 class VectorizedArray<float, 4>
1461  : public VectorizedArrayBase<VectorizedArray<float, 4>, 4>
1462 {
1463 public:
1467  using value_type = float;
1468 
1477  VectorizedArray() = default;
1478 
1482  VectorizedArray(const float scalar)
1483  {
1484  this->operator=(scalar);
1485  }
1486 
1490  template <typename U>
1491  VectorizedArray(const std::initializer_list<U> &list)
1492  : VectorizedArrayBase<VectorizedArray<float, 4>, 4>(list)
1493  {}
1494 
1496  VectorizedArray &
1497  operator=(const float x) &
1498  {
1499  data = _mm_set1_ps(x);
1500  return *this;
1501  }
1502 
1508  VectorizedArray &
1509  operator=(const float scalar) && = delete;
1510 
1515  float &
1516  operator[](const unsigned int comp)
1517  {
1518  AssertIndexRange(comp, 4);
1519  return *(reinterpret_cast<float *>(&data) + comp);
1520  }
1521 
1526  const float &
1527  operator[](const unsigned int comp) const
1528  {
1529  AssertIndexRange(comp, 4);
1530  return *(reinterpret_cast<const float *>(&data) + comp);
1531  }
1532 
1537  VectorizedArray &
1538  operator+=(const VectorizedArray &vec)
1539  {
1540 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1541  data += vec.data;
1542 # else
1543  data = _mm_add_ps(data, vec.data);
1544 # endif
1545  return *this;
1546  }
1547 
1552  VectorizedArray &
1553  operator-=(const VectorizedArray &vec)
1554  {
1555 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1556  data -= vec.data;
1557 # else
1558  data = _mm_sub_ps(data, vec.data);
1559 # endif
1560  return *this;
1561  }
1562 
1567  VectorizedArray &
1568  operator*=(const VectorizedArray &vec)
1569  {
1570 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1571  data *= vec.data;
1572 # else
1573  data = _mm_mul_ps(data, vec.data);
1574 # endif
1575  return *this;
1576  }
1577 
1582  VectorizedArray &
1583  operator/=(const VectorizedArray &vec)
1584  {
1585 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1586  data /= vec.data;
1587 # else
1588  data = _mm_div_ps(data, vec.data);
1589 # endif
1590  return *this;
1591  }
1592 
1599  void
1600  load(const float *ptr)
1601  {
1602  data = _mm_loadu_ps(ptr);
1603  }
1604 
1612  void
1613  store(float *ptr) const
1614  {
1615  _mm_storeu_ps(ptr, data);
1616  }
1617 
1623  void
1624  streaming_store(float *ptr) const
1625  {
1626  Assert(reinterpret_cast<std::size_t>(ptr) % 16 == 0,
1627  ExcMessage("Memory not aligned"));
1628  _mm_stream_ps(ptr, data);
1629  }
1630 
1644  void
1645  gather(const float *base_ptr, const unsigned int *offsets)
1646  {
1647  for (unsigned int i = 0; i < 4; ++i)
1648  *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
1649  }
1650 
1664  void
1665  scatter(const unsigned int *offsets, float *base_ptr) const
1666  {
1667  for (unsigned int i = 0; i < 4; ++i)
1668  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
1669  }
1670 
1675  float
1676  sum()
1677  {
1678  __m128 t1 = _mm_movehl_ps(data, data);
1679  __m128 t2 = _mm_add_ps(data, t1);
1680  __m128 t3 = _mm_shuffle_ps(t2, t2, 1);
1681  __m128 t4 = _mm_add_ss(t2, t3);
1682  return _mm_cvtss_f32(t4);
1683  }
1684 
1690  __m128 data;
1691 
1692 private:
1699  get_sqrt() const
1700  {
1701  VectorizedArray res;
1702  res.data = _mm_sqrt_ps(data);
1703  return res;
1704  }
1705 
1712  get_abs() const
1713  {
1714  // to compute the absolute value, perform bitwise andnot with -0. This
1715  // will leave all value and exponent bits unchanged but force the sign
1716  // value to +.
1717  __m128 mask = _mm_set1_ps(-0.f);
1718  VectorizedArray res;
1719  res.data = _mm_andnot_ps(mask, data);
1720  return res;
1721  }
1722 
1729  get_max(const VectorizedArray &other) const
1730  {
1731  VectorizedArray res;
1732  res.data = _mm_max_ps(data, other.data);
1733  return res;
1734  }
1735 
1742  get_min(const VectorizedArray &other) const
1743  {
1744  VectorizedArray res;
1745  res.data = _mm_min_ps(data, other.data);
1746  return res;
1747  }
1748 
1749  // Make a few functions friends.
1750  template <typename Number2, std::size_t width2>
1753  template <typename Number2, std::size_t width2>
1756  template <typename Number2, std::size_t width2>
1760  template <typename Number2, std::size_t width2>
1764 };
1765 
1766 
1767 
1771 template <>
1772 inline DEAL_II_ALWAYS_INLINE void
1773 vectorized_load_and_transpose(const unsigned int n_entries,
1774  const float * in,
1775  const unsigned int * offsets,
1777 {
1778  const unsigned int n_chunks = n_entries / 4;
1779  for (unsigned int i = 0; i < n_chunks; ++i)
1780  {
1781  __m128 u0 = _mm_loadu_ps(in + 4 * i + offsets[0]);
1782  __m128 u1 = _mm_loadu_ps(in + 4 * i + offsets[1]);
1783  __m128 u2 = _mm_loadu_ps(in + 4 * i + offsets[2]);
1784  __m128 u3 = _mm_loadu_ps(in + 4 * i + offsets[3]);
1785  __m128 v0 = _mm_shuffle_ps(u0, u1, 0x44);
1786  __m128 v1 = _mm_shuffle_ps(u0, u1, 0xee);
1787  __m128 v2 = _mm_shuffle_ps(u2, u3, 0x44);
1788  __m128 v3 = _mm_shuffle_ps(u2, u3, 0xee);
1789  out[4 * i + 0].data = _mm_shuffle_ps(v0, v2, 0x88);
1790  out[4 * i + 1].data = _mm_shuffle_ps(v0, v2, 0xdd);
1791  out[4 * i + 2].data = _mm_shuffle_ps(v1, v3, 0x88);
1792  out[4 * i + 3].data = _mm_shuffle_ps(v1, v3, 0xdd);
1793  }
1794 
1795  // remainder loop of work that does not divide by 4
1796  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1797  for (unsigned int v = 0; v < 4; ++v)
1798  out[i][v] = in[offsets[v] + i];
1799 }
1800 
1801 
1802 
1806 template <>
1807 inline DEAL_II_ALWAYS_INLINE void
1808 vectorized_load_and_transpose(const unsigned int n_entries,
1809  const std::array<float *, 4> &in,
1811 {
1812  // see the comments in the vectorized_load_and_transpose above
1813 
1814  const unsigned int n_chunks = n_entries / 4;
1815  for (unsigned int i = 0; i < n_chunks; ++i)
1816  {
1817  __m128 u0 = _mm_loadu_ps(in[0] + 4 * i);
1818  __m128 u1 = _mm_loadu_ps(in[1] + 4 * i);
1819  __m128 u2 = _mm_loadu_ps(in[2] + 4 * i);
1820  __m128 u3 = _mm_loadu_ps(in[3] + 4 * i);
1821  __m128 v0 = _mm_shuffle_ps(u0, u1, 0x44);
1822  __m128 v1 = _mm_shuffle_ps(u0, u1, 0xee);
1823  __m128 v2 = _mm_shuffle_ps(u2, u3, 0x44);
1824  __m128 v3 = _mm_shuffle_ps(u2, u3, 0xee);
1825  out[4 * i + 0].data = _mm_shuffle_ps(v0, v2, 0x88);
1826  out[4 * i + 1].data = _mm_shuffle_ps(v0, v2, 0xdd);
1827  out[4 * i + 2].data = _mm_shuffle_ps(v1, v3, 0x88);
1828  out[4 * i + 3].data = _mm_shuffle_ps(v1, v3, 0xdd);
1829  }
1830 
1831  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1832  for (unsigned int v = 0; v < 4; ++v)
1833  out[i][v] = in[v][i];
1834 }
1835 
1836 
1837 
1841 template <>
1842 inline DEAL_II_ALWAYS_INLINE void
1843 vectorized_transpose_and_store(const bool add_into,
1844  const unsigned int n_entries,
1845  const VectorizedArray<float, 4> *in,
1846  const unsigned int * offsets,
1847  float * out)
1848 {
1849  const unsigned int n_chunks = n_entries / 4;
1850  for (unsigned int i = 0; i < n_chunks; ++i)
1851  {
1852  __m128 u0 = in[4 * i + 0].data;
1853  __m128 u1 = in[4 * i + 1].data;
1854  __m128 u2 = in[4 * i + 2].data;
1855  __m128 u3 = in[4 * i + 3].data;
1856  __m128 t0 = _mm_shuffle_ps(u0, u1, 0x44);
1857  __m128 t1 = _mm_shuffle_ps(u0, u1, 0xee);
1858  __m128 t2 = _mm_shuffle_ps(u2, u3, 0x44);
1859  __m128 t3 = _mm_shuffle_ps(u2, u3, 0xee);
1860  u0 = _mm_shuffle_ps(t0, t2, 0x88);
1861  u1 = _mm_shuffle_ps(t0, t2, 0xdd);
1862  u2 = _mm_shuffle_ps(t1, t3, 0x88);
1863  u3 = _mm_shuffle_ps(t1, t3, 0xdd);
1864 
1865  // Cannot use the same store instructions in both paths of the 'if'
1866  // because the compiler cannot know that there is no aliasing between
1867  // pointers
1868  if (add_into)
1869  {
1870  u0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), u0);
1871  _mm_storeu_ps(out + 4 * i + offsets[0], u0);
1872  u1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), u1);
1873  _mm_storeu_ps(out + 4 * i + offsets[1], u1);
1874  u2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), u2);
1875  _mm_storeu_ps(out + 4 * i + offsets[2], u2);
1876  u3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), u3);
1877  _mm_storeu_ps(out + 4 * i + offsets[3], u3);
1878  }
1879  else
1880  {
1881  _mm_storeu_ps(out + 4 * i + offsets[0], u0);
1882  _mm_storeu_ps(out + 4 * i + offsets[1], u1);
1883  _mm_storeu_ps(out + 4 * i + offsets[2], u2);
1884  _mm_storeu_ps(out + 4 * i + offsets[3], u3);
1885  }
1886  }
1887 
1888  // remainder loop of work that does not divide by 4
1889  if (add_into)
1890  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1891  for (unsigned int v = 0; v < 4; ++v)
1892  out[offsets[v] + i] += in[i][v];
1893  else
1894  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1895  for (unsigned int v = 0; v < 4; ++v)
1896  out[offsets[v] + i] = in[i][v];
1897 }
1898 
1899 
1900 
1904 template <>
1905 inline DEAL_II_ALWAYS_INLINE void
1906 vectorized_transpose_and_store(const bool add_into,
1907  const unsigned int n_entries,
1908  const VectorizedArray<float, 4> *in,
1909  std::array<float *, 4> & out)
1910 {
1911  // see the comments in the vectorized_transpose_and_store above
1912 
1913  const unsigned int n_chunks = n_entries / 4;
1914  for (unsigned int i = 0; i < n_chunks; ++i)
1915  {
1916  __m128 u0 = in[4 * i + 0].data;
1917  __m128 u1 = in[4 * i + 1].data;
1918  __m128 u2 = in[4 * i + 2].data;
1919  __m128 u3 = in[4 * i + 3].data;
1920  __m128 t0 = _mm_shuffle_ps(u0, u1, 0x44);
1921  __m128 t1 = _mm_shuffle_ps(u0, u1, 0xee);
1922  __m128 t2 = _mm_shuffle_ps(u2, u3, 0x44);
1923  __m128 t3 = _mm_shuffle_ps(u2, u3, 0xee);
1924  u0 = _mm_shuffle_ps(t0, t2, 0x88);
1925  u1 = _mm_shuffle_ps(t0, t2, 0xdd);
1926  u2 = _mm_shuffle_ps(t1, t3, 0x88);
1927  u3 = _mm_shuffle_ps(t1, t3, 0xdd);
1928 
1929  if (add_into)
1930  {
1931  u0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), u0);
1932  _mm_storeu_ps(out[0] + 4 * i, u0);
1933  u1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), u1);
1934  _mm_storeu_ps(out[1] + 4 * i, u1);
1935  u2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), u2);
1936  _mm_storeu_ps(out[2] + 4 * i, u2);
1937  u3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), u3);
1938  _mm_storeu_ps(out[3] + 4 * i, u3);
1939  }
1940  else
1941  {
1942  _mm_storeu_ps(out[0] + 4 * i, u0);
1943  _mm_storeu_ps(out[1] + 4 * i, u1);
1944  _mm_storeu_ps(out[2] + 4 * i, u2);
1945  _mm_storeu_ps(out[3] + 4 * i, u3);
1946  }
1947  }
1948 
1949  if (add_into)
1950  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1951  for (unsigned int v = 0; v < 4; ++v)
1952  out[v][i] += in[i][v];
1953  else
1954  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1955  for (unsigned int v = 0; v < 4; ++v)
1956  out[v][i] = in[i][v];
1957 }
1958 
1959 
1960 
1961 # endif // if DEAL_II_VECTORIZATION_WIDTH_IN_BITS > 0 && defined(__SSE2__)
1962 
1963 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
1964 
1968 template <>
1969 class VectorizedArray<double, 4>
1970  : public VectorizedArrayBase<VectorizedArray<double, 4>, 4>
1971 {
1972 public:
1976  using value_type = double;
1977 
1982  VectorizedArray() = default;
1983 
1987  VectorizedArray(const double scalar)
1988  {
1989  this->operator=(scalar);
1990  }
1991 
1995  template <typename U>
1996  VectorizedArray(const std::initializer_list<U> &list)
1998  {}
1999 
2004  VectorizedArray &
2005  operator=(const double x) &
2006  {
2007  data = _mm256_set1_pd(x);
2008  return *this;
2009  }
2010 
2016  VectorizedArray &
2017  operator=(const double scalar) && = delete;
2018 
2023  double &
2024  operator[](const unsigned int comp)
2025  {
2026  AssertIndexRange(comp, 4);
2027  return *(reinterpret_cast<double *>(&data) + comp);
2028  }
2029 
2034  const double &
2035  operator[](const unsigned int comp) const
2036  {
2037  AssertIndexRange(comp, 4);
2038  return *(reinterpret_cast<const double *>(&data) + comp);
2039  }
2040 
2045  VectorizedArray &
2046  operator+=(const VectorizedArray &vec)
2047  {
2048  // if the compiler supports vector arithmetic, we can simply use +=
2049  // operator on the given data type. this allows the compiler to combine
2050  // additions with multiplication (fused multiply-add) if those
2051  // instructions are available. Otherwise, we need to use the built-in
2052  // intrinsic command for __m256d
2053 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2054  data += vec.data;
2055 # else
2056  data = _mm256_add_pd(data, vec.data);
2057 # endif
2058  return *this;
2059  }
2060 
2065  VectorizedArray &
2066  operator-=(const VectorizedArray &vec)
2067  {
2068 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2069  data -= vec.data;
2070 # else
2071  data = _mm256_sub_pd(data, vec.data);
2072 # endif
2073  return *this;
2074  }
2079  VectorizedArray &
2080  operator*=(const VectorizedArray &vec)
2081  {
2082 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2083  data *= vec.data;
2084 # else
2085  data = _mm256_mul_pd(data, vec.data);
2086 # endif
2087  return *this;
2088  }
2089 
2094  VectorizedArray &
2095  operator/=(const VectorizedArray &vec)
2096  {
2097 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2098  data /= vec.data;
2099 # else
2100  data = _mm256_div_pd(data, vec.data);
2101 # endif
2102  return *this;
2103  }
2104 
2111  void
2112  load(const double *ptr)
2113  {
2114  data = _mm256_loadu_pd(ptr);
2115  }
2116 
2118  void
2119  load(const float *ptr)
2120  {
2121  data = _mm256_cvtps_pd(_mm_loadu_ps(ptr));
2122  }
2123 
2131  void
2132  store(double *ptr) const
2133  {
2134  _mm256_storeu_pd(ptr, data);
2135  }
2136 
2138  void
2139  store(float *ptr) const
2140  {
2141  _mm_storeu_ps(ptr, _mm256_cvtpd_ps(data));
2142  }
2143 
2149  void
2150  streaming_store(double *ptr) const
2151  {
2152  Assert(reinterpret_cast<std::size_t>(ptr) % 32 == 0,
2153  ExcMessage("Memory not aligned"));
2154  _mm256_stream_pd(ptr, data);
2155  }
2156 
2170  void
2171  gather(const double *base_ptr, const unsigned int *offsets)
2172  {
2173 # ifdef __AVX2__
2174  // unfortunately, there does not appear to be a 128 bit integer load, so
2175  // do it by some reinterpret casts here. this is allowed because the Intel
2176  // API allows aliasing between different vector types.
2177  const __m128 index_val =
2178  _mm_loadu_ps(reinterpret_cast<const float *>(offsets));
2179  const __m128i index = *reinterpret_cast<const __m128i *>(&index_val);
2180 
2181  // work around a warning with gcc-12 about an uninitialized initial state
2182  // for gather by starting with a zero guess, even though all lanes will be
2183  // overwritten
2184  __m256d zero = _mm256_setzero_pd();
2185  __m256d mask = _mm256_cmp_pd(zero, zero, _CMP_EQ_OQ);
2186 
2187  data = _mm256_mask_i32gather_pd(zero, base_ptr, index, mask, 8);
2188 # else
2189  for (unsigned int i = 0; i < 4; ++i)
2190  *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
2191 # endif
2192  }
2193 
2207  void
2208  scatter(const unsigned int *offsets, double *base_ptr) const
2209  {
2210  // no scatter operation in AVX/AVX2
2211  for (unsigned int i = 0; i < 4; ++i)
2212  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
2213  }
2214 
2219  double
2220  sum()
2221  {
2223  t1.data = _mm_add_pd(this->get_lower(), this->get_upper());
2224  return t1.sum();
2225  }
2226 
2232  __m256d data;
2233 
2234 private:
2239  __m128d
2240  get_lower() const
2241  {
2242  return _mm256_castpd256_pd128(data);
2243  }
2244 
2249  __m128d
2250  get_upper() const
2251  {
2252  return _mm256_extractf128_pd(data, 1);
2253  }
2254 
2261  get_sqrt() const
2262  {
2263  VectorizedArray res;
2264  res.data = _mm256_sqrt_pd(data);
2265  return res;
2266  }
2267 
2274  get_abs() const
2275  {
2276  // to compute the absolute value, perform bitwise andnot with -0. This
2277  // will leave all value and exponent bits unchanged but force the sign
2278  // value to +.
2279  __m256d mask = _mm256_set1_pd(-0.);
2280  VectorizedArray res;
2281  res.data = _mm256_andnot_pd(mask, data);
2282  return res;
2283  }
2284 
2291  get_max(const VectorizedArray &other) const
2292  {
2293  VectorizedArray res;
2294  res.data = _mm256_max_pd(data, other.data);
2295  return res;
2296  }
2297 
2304  get_min(const VectorizedArray &other) const
2305  {
2306  VectorizedArray res;
2307  res.data = _mm256_min_pd(data, other.data);
2308  return res;
2309  }
2310 
2311  // Make a few functions friends.
2312  template <typename Number2, std::size_t width2>
2315  template <typename Number2, std::size_t width2>
2318  template <typename Number2, std::size_t width2>
2322  template <typename Number2, std::size_t width2>
2326 };
2327 
2328 
2329 
2333 template <>
2334 inline DEAL_II_ALWAYS_INLINE void
2335 vectorized_load_and_transpose(const unsigned int n_entries,
2336  const double * in,
2337  const unsigned int * offsets,
2339 {
2340  const unsigned int n_chunks = n_entries / 4;
2341  const double * in0 = in + offsets[0];
2342  const double * in1 = in + offsets[1];
2343  const double * in2 = in + offsets[2];
2344  const double * in3 = in + offsets[3];
2345 
2346  for (unsigned int i = 0; i < n_chunks; ++i)
2347  {
2348  __m256d u0 = _mm256_loadu_pd(in0 + 4 * i);
2349  __m256d u1 = _mm256_loadu_pd(in1 + 4 * i);
2350  __m256d u2 = _mm256_loadu_pd(in2 + 4 * i);
2351  __m256d u3 = _mm256_loadu_pd(in3 + 4 * i);
2352  __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2353  __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2354  __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2355  __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2356  out[4 * i + 0].data = _mm256_unpacklo_pd(t0, t1);
2357  out[4 * i + 1].data = _mm256_unpackhi_pd(t0, t1);
2358  out[4 * i + 2].data = _mm256_unpacklo_pd(t2, t3);
2359  out[4 * i + 3].data = _mm256_unpackhi_pd(t2, t3);
2360  }
2361 
2362  // remainder loop of work that does not divide by 4
2363  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2364  out[i].gather(in + i, offsets);
2365 }
2366 
2367 
2368 
2372 template <>
2373 inline DEAL_II_ALWAYS_INLINE void
2374 vectorized_load_and_transpose(const unsigned int n_entries,
2375  const std::array<double *, 4> &in,
2377 {
2378  // see the comments in the vectorized_load_and_transpose above
2379 
2380  const unsigned int n_chunks = n_entries / 4;
2381  const double * in0 = in[0];
2382  const double * in1 = in[1];
2383  const double * in2 = in[2];
2384  const double * in3 = in[3];
2385 
2386  for (unsigned int i = 0; i < n_chunks; ++i)
2387  {
2388  __m256d u0 = _mm256_loadu_pd(in0 + 4 * i);
2389  __m256d u1 = _mm256_loadu_pd(in1 + 4 * i);
2390  __m256d u2 = _mm256_loadu_pd(in2 + 4 * i);
2391  __m256d u3 = _mm256_loadu_pd(in3 + 4 * i);
2392  __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2393  __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2394  __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2395  __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2396  out[4 * i + 0].data = _mm256_unpacklo_pd(t0, t1);
2397  out[4 * i + 1].data = _mm256_unpackhi_pd(t0, t1);
2398  out[4 * i + 2].data = _mm256_unpacklo_pd(t2, t3);
2399  out[4 * i + 3].data = _mm256_unpackhi_pd(t2, t3);
2400  }
2401 
2402  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2403  gather(out[i], in, i);
2404 }
2405 
2406 
2407 
2411 template <>
2412 inline DEAL_II_ALWAYS_INLINE void
2413 vectorized_transpose_and_store(const bool add_into,
2414  const unsigned int n_entries,
2415  const VectorizedArray<double, 4> *in,
2416  const unsigned int * offsets,
2417  double * out)
2418 {
2419  const unsigned int n_chunks = n_entries / 4;
2420  double * out0 = out + offsets[0];
2421  double * out1 = out + offsets[1];
2422  double * out2 = out + offsets[2];
2423  double * out3 = out + offsets[3];
2424  for (unsigned int i = 0; i < n_chunks; ++i)
2425  {
2426  __m256d u0 = in[4 * i + 0].data;
2427  __m256d u1 = in[4 * i + 1].data;
2428  __m256d u2 = in[4 * i + 2].data;
2429  __m256d u3 = in[4 * i + 3].data;
2430  __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2431  __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2432  __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2433  __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2434  __m256d res0 = _mm256_unpacklo_pd(t0, t1);
2435  __m256d res1 = _mm256_unpackhi_pd(t0, t1);
2436  __m256d res2 = _mm256_unpacklo_pd(t2, t3);
2437  __m256d res3 = _mm256_unpackhi_pd(t2, t3);
2438 
2439  // Cannot use the same store instructions in both paths of the 'if'
2440  // because the compiler cannot know that there is no aliasing between
2441  // pointers
2442  if (add_into)
2443  {
2444  res0 = _mm256_add_pd(_mm256_loadu_pd(out0 + 4 * i), res0);
2445  _mm256_storeu_pd(out0 + 4 * i, res0);
2446  res1 = _mm256_add_pd(_mm256_loadu_pd(out1 + 4 * i), res1);
2447  _mm256_storeu_pd(out1 + 4 * i, res1);
2448  res2 = _mm256_add_pd(_mm256_loadu_pd(out2 + 4 * i), res2);
2449  _mm256_storeu_pd(out2 + 4 * i, res2);
2450  res3 = _mm256_add_pd(_mm256_loadu_pd(out3 + 4 * i), res3);
2451  _mm256_storeu_pd(out3 + 4 * i, res3);
2452  }
2453  else
2454  {
2455  _mm256_storeu_pd(out0 + 4 * i, res0);
2456  _mm256_storeu_pd(out1 + 4 * i, res1);
2457  _mm256_storeu_pd(out2 + 4 * i, res2);
2458  _mm256_storeu_pd(out3 + 4 * i, res3);
2459  }
2460  }
2461 
2462  // remainder loop of work that does not divide by 4
2463  if (add_into)
2464  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2465  for (unsigned int v = 0; v < 4; ++v)
2466  out[offsets[v] + i] += in[i][v];
2467  else
2468  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2469  for (unsigned int v = 0; v < 4; ++v)
2470  out[offsets[v] + i] = in[i][v];
2471 }
2472 
2473 
2474 
2478 template <>
2479 inline DEAL_II_ALWAYS_INLINE void
2480 vectorized_transpose_and_store(const bool add_into,
2481  const unsigned int n_entries,
2482  const VectorizedArray<double, 4> *in,
2483  std::array<double *, 4> & out)
2484 {
2485  // see the comments in the vectorized_transpose_and_store above
2486 
2487  const unsigned int n_chunks = n_entries / 4;
2488  double * out0 = out[0];
2489  double * out1 = out[1];
2490  double * out2 = out[2];
2491  double * out3 = out[3];
2492  for (unsigned int i = 0; i < n_chunks; ++i)
2493  {
2494  __m256d u0 = in[4 * i + 0].data;
2495  __m256d u1 = in[4 * i + 1].data;
2496  __m256d u2 = in[4 * i + 2].data;
2497  __m256d u3 = in[4 * i + 3].data;
2498  __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2499  __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2500  __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2501  __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2502  __m256d res0 = _mm256_unpacklo_pd(t0, t1);
2503  __m256d res1 = _mm256_unpackhi_pd(t0, t1);
2504  __m256d res2 = _mm256_unpacklo_pd(t2, t3);
2505  __m256d res3 = _mm256_unpackhi_pd(t2, t3);
2506 
2507  // Cannot use the same store instructions in both paths of the 'if'
2508  // because the compiler cannot know that there is no aliasing between
2509  // pointers
2510  if (add_into)
2511  {
2512  res0 = _mm256_add_pd(_mm256_loadu_pd(out0 + 4 * i), res0);
2513  _mm256_storeu_pd(out0 + 4 * i, res0);
2514  res1 = _mm256_add_pd(_mm256_loadu_pd(out1 + 4 * i), res1);
2515  _mm256_storeu_pd(out1 + 4 * i, res1);
2516  res2 = _mm256_add_pd(_mm256_loadu_pd(out2 + 4 * i), res2);
2517  _mm256_storeu_pd(out2 + 4 * i, res2);
2518  res3 = _mm256_add_pd(_mm256_loadu_pd(out3 + 4 * i), res3);
2519  _mm256_storeu_pd(out3 + 4 * i, res3);
2520  }
2521  else
2522  {
2523  _mm256_storeu_pd(out0 + 4 * i, res0);
2524  _mm256_storeu_pd(out1 + 4 * i, res1);
2525  _mm256_storeu_pd(out2 + 4 * i, res2);
2526  _mm256_storeu_pd(out3 + 4 * i, res3);
2527  }
2528  }
2529 
2530  // remainder loop of work that does not divide by 4
2531  if (add_into)
2532  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2533  for (unsigned int v = 0; v < 4; ++v)
2534  out[v][i] += in[i][v];
2535  else
2536  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2537  for (unsigned int v = 0; v < 4; ++v)
2538  out[v][i] = in[i][v];
2539 }
2540 
2541 
2542 
2546 template <>
2547 class VectorizedArray<float, 8>
2548  : public VectorizedArrayBase<VectorizedArray<float, 8>, 8>
2549 {
2550 public:
2554  using value_type = float;
2555 
2560  VectorizedArray() = default;
2561 
2565  VectorizedArray(const float scalar)
2566  {
2567  this->operator=(scalar);
2568  }
2569 
2573  template <typename U>
2574  VectorizedArray(const std::initializer_list<U> &list)
2575  : VectorizedArrayBase<VectorizedArray<float, 8>, 8>(list)
2576  {}
2577 
2582  VectorizedArray &
2583  operator=(const float x) &
2584  {
2585  data = _mm256_set1_ps(x);
2586  return *this;
2587  }
2588 
2594  VectorizedArray &
2595  operator=(const float scalar) && = delete;
2596 
2601  float &
2602  operator[](const unsigned int comp)
2603  {
2604  AssertIndexRange(comp, 8);
2605  return *(reinterpret_cast<float *>(&data) + comp);
2606  }
2607 
2612  const float &
2613  operator[](const unsigned int comp) const
2614  {
2615  AssertIndexRange(comp, 8);
2616  return *(reinterpret_cast<const float *>(&data) + comp);
2617  }
2618 
2623  VectorizedArray &
2624  operator+=(const VectorizedArray &vec)
2625  {
2626  // if the compiler supports vector arithmetic, we can simply use +=
2627  // operator on the given data type. this allows the compiler to combine
2628  // additions with multiplication (fused multiply-add) if those
2629  // instructions are available. Otherwise, we need to use the built-in
2630  // intrinsic command for __m256d
2631 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2632  data += vec.data;
2633 # else
2634  data = _mm256_add_ps(data, vec.data);
2635 # endif
2636  return *this;
2637  }
2638 
2643  VectorizedArray &
2644  operator-=(const VectorizedArray &vec)
2645  {
2646 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2647  data -= vec.data;
2648 # else
2649  data = _mm256_sub_ps(data, vec.data);
2650 # endif
2651  return *this;
2652  }
2657  VectorizedArray &
2658  operator*=(const VectorizedArray &vec)
2659  {
2660 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2661  data *= vec.data;
2662 # else
2663  data = _mm256_mul_ps(data, vec.data);
2664 # endif
2665  return *this;
2666  }
2667 
2672  VectorizedArray &
2673  operator/=(const VectorizedArray &vec)
2674  {
2675 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2676  data /= vec.data;
2677 # else
2678  data = _mm256_div_ps(data, vec.data);
2679 # endif
2680  return *this;
2681  }
2682 
2689  void
2690  load(const float *ptr)
2691  {
2692  data = _mm256_loadu_ps(ptr);
2693  }
2694 
2702  void
2703  store(float *ptr) const
2704  {
2705  _mm256_storeu_ps(ptr, data);
2706  }
2707 
2713  void
2714  streaming_store(float *ptr) const
2715  {
2716  Assert(reinterpret_cast<std::size_t>(ptr) % 32 == 0,
2717  ExcMessage("Memory not aligned"));
2718  _mm256_stream_ps(ptr, data);
2719  }
2720 
2734  void
2735  gather(const float *base_ptr, const unsigned int *offsets)
2736  {
2737 # ifdef __AVX2__
2738  // unfortunately, there does not appear to be a 256 bit integer load, so
2739  // do it by some reinterpret casts here. this is allowed because the Intel
2740  // API allows aliasing between different vector types.
2741  const __m256 index_val =
2742  _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
2743  const __m256i index = *reinterpret_cast<const __m256i *>(&index_val);
2744 
2745  // work around a warning with gcc-12 about an uninitialized initial state
2746  // for gather by starting with a zero guess, even though all lanes will be
2747  // overwritten
2748  __m256 zero = _mm256_setzero_ps();
2749  __m256 mask = _mm256_cmp_ps(zero, zero, _CMP_EQ_OQ);
2750 
2751  data = _mm256_mask_i32gather_ps(zero, base_ptr, index, mask, 4);
2752 # else
2753  for (unsigned int i = 0; i < 8; ++i)
2754  *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
2755 # endif
2756  }
2757 
2771  void
2772  scatter(const unsigned int *offsets, float *base_ptr) const
2773  {
2774  // no scatter operation in AVX/AVX2
2775  for (unsigned int i = 0; i < 8; ++i)
2776  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
2777  }
2778 
2783  float
2784  sum()
2785  {
2787  t1.data = _mm_add_ps(this->get_lower(), this->get_upper());
2788  return t1.sum();
2789  }
2790 
2796  __m256 data;
2797 
2798 private:
2803  __m128
2804  get_lower() const
2805  {
2806  return _mm256_castps256_ps128(data);
2807  }
2808 
2813  __m128
2814  get_upper() const
2815  {
2816  return _mm256_extractf128_ps(data, 1);
2817  }
2818 
2825  get_sqrt() const
2826  {
2827  VectorizedArray res;
2828  res.data = _mm256_sqrt_ps(data);
2829  return res;
2830  }
2831 
2838  get_abs() const
2839  {
2840  // to compute the absolute value, perform bitwise andnot with -0. This
2841  // will leave all value and exponent bits unchanged but force the sign
2842  // value to +.
2843  __m256 mask = _mm256_set1_ps(-0.f);
2844  VectorizedArray res;
2845  res.data = _mm256_andnot_ps(mask, data);
2846  return res;
2847  }
2848 
2855  get_max(const VectorizedArray &other) const
2856  {
2857  VectorizedArray res;
2858  res.data = _mm256_max_ps(data, other.data);
2859  return res;
2860  }
2861 
2868  get_min(const VectorizedArray &other) const
2869  {
2870  VectorizedArray res;
2871  res.data = _mm256_min_ps(data, other.data);
2872  return res;
2873  }
2874 
2875  // Make a few functions friends.
2876  template <typename Number2, std::size_t width2>
2879  template <typename Number2, std::size_t width2>
2882  template <typename Number2, std::size_t width2>
2886  template <typename Number2, std::size_t width2>
2890 };
2891 
2892 
2893 
2897 template <>
2898 inline DEAL_II_ALWAYS_INLINE void
2899 vectorized_load_and_transpose(const unsigned int n_entries,
2900  const float * in,
2901  const unsigned int * offsets,
2903 {
2904  const unsigned int n_chunks = n_entries / 4;
2905  for (unsigned int i = 0; i < n_chunks; ++i)
2906  {
2907  // To avoid warnings about uninitialized variables, need to initialize
2908  // one variable with zero before using it.
2909  __m256 t0, t1, t2, t3 = {};
2910  t0 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[0]), 0);
2911  t0 = _mm256_insertf128_ps(t0, _mm_loadu_ps(in + 4 * i + offsets[4]), 1);
2912  t1 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[1]), 0);
2913  t1 = _mm256_insertf128_ps(t1, _mm_loadu_ps(in + 4 * i + offsets[5]), 1);
2914  t2 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[2]), 0);
2915  t2 = _mm256_insertf128_ps(t2, _mm_loadu_ps(in + 4 * i + offsets[6]), 1);
2916  t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[3]), 0);
2917  t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[7]), 1);
2918 
2919  __m256 v0 = _mm256_shuffle_ps(t0, t1, 0x44);
2920  __m256 v1 = _mm256_shuffle_ps(t0, t1, 0xee);
2921  __m256 v2 = _mm256_shuffle_ps(t2, t3, 0x44);
2922  __m256 v3 = _mm256_shuffle_ps(t2, t3, 0xee);
2923  out[4 * i + 0].data = _mm256_shuffle_ps(v0, v2, 0x88);
2924  out[4 * i + 1].data = _mm256_shuffle_ps(v0, v2, 0xdd);
2925  out[4 * i + 2].data = _mm256_shuffle_ps(v1, v3, 0x88);
2926  out[4 * i + 3].data = _mm256_shuffle_ps(v1, v3, 0xdd);
2927  }
2928 
2929  // remainder loop of work that does not divide by 4
2930  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2931  out[i].gather(in + i, offsets);
2932 }
2933 
2934 
2935 
2939 template <>
2940 inline DEAL_II_ALWAYS_INLINE void
2941 vectorized_load_and_transpose(const unsigned int n_entries,
2942  const std::array<float *, 8> &in,
2944 {
2945  // see the comments in the vectorized_load_and_transpose above
2946 
2947  const unsigned int n_chunks = n_entries / 4;
2948  for (unsigned int i = 0; i < n_chunks; ++i)
2949  {
2950  __m256 t0, t1, t2, t3 = {};
2951  t0 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[0] + 4 * i), 0);
2952  t0 = _mm256_insertf128_ps(t0, _mm_loadu_ps(in[4] + 4 * i), 1);
2953  t1 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[1] + 4 * i), 0);
2954  t1 = _mm256_insertf128_ps(t1, _mm_loadu_ps(in[5] + 4 * i), 1);
2955  t2 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[2] + 4 * i), 0);
2956  t2 = _mm256_insertf128_ps(t2, _mm_loadu_ps(in[6] + 4 * i), 1);
2957  t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[3] + 4 * i), 0);
2958  t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[7] + 4 * i), 1);
2959 
2960  __m256 v0 = _mm256_shuffle_ps(t0, t1, 0x44);
2961  __m256 v1 = _mm256_shuffle_ps(t0, t1, 0xee);
2962  __m256 v2 = _mm256_shuffle_ps(t2, t3, 0x44);
2963  __m256 v3 = _mm256_shuffle_ps(t2, t3, 0xee);
2964  out[4 * i + 0].data = _mm256_shuffle_ps(v0, v2, 0x88);
2965  out[4 * i + 1].data = _mm256_shuffle_ps(v0, v2, 0xdd);
2966  out[4 * i + 2].data = _mm256_shuffle_ps(v1, v3, 0x88);
2967  out[4 * i + 3].data = _mm256_shuffle_ps(v1, v3, 0xdd);
2968  }
2969 
2970  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2971  gather(out[i], in, i);
2972 }
2973 
2974 
2975 
2979 template <>
2980 inline DEAL_II_ALWAYS_INLINE void
2981 vectorized_transpose_and_store(const bool add_into,
2982  const unsigned int n_entries,
2983  const VectorizedArray<float, 8> *in,
2984  const unsigned int * offsets,
2985  float * out)
2986 {
2987  const unsigned int n_chunks = n_entries / 4;
2988  for (unsigned int i = 0; i < n_chunks; ++i)
2989  {
2990  __m256 u0 = in[4 * i + 0].data;
2991  __m256 u1 = in[4 * i + 1].data;
2992  __m256 u2 = in[4 * i + 2].data;
2993  __m256 u3 = in[4 * i + 3].data;
2994  __m256 t0 = _mm256_shuffle_ps(u0, u1, 0x44);
2995  __m256 t1 = _mm256_shuffle_ps(u0, u1, 0xee);
2996  __m256 t2 = _mm256_shuffle_ps(u2, u3, 0x44);
2997  __m256 t3 = _mm256_shuffle_ps(u2, u3, 0xee);
2998  u0 = _mm256_shuffle_ps(t0, t2, 0x88);
2999  u1 = _mm256_shuffle_ps(t0, t2, 0xdd);
3000  u2 = _mm256_shuffle_ps(t1, t3, 0x88);
3001  u3 = _mm256_shuffle_ps(t1, t3, 0xdd);
3002  __m128 res0 = _mm256_extractf128_ps(u0, 0);
3003  __m128 res4 = _mm256_extractf128_ps(u0, 1);
3004  __m128 res1 = _mm256_extractf128_ps(u1, 0);
3005  __m128 res5 = _mm256_extractf128_ps(u1, 1);
3006  __m128 res2 = _mm256_extractf128_ps(u2, 0);
3007  __m128 res6 = _mm256_extractf128_ps(u2, 1);
3008  __m128 res3 = _mm256_extractf128_ps(u3, 0);
3009  __m128 res7 = _mm256_extractf128_ps(u3, 1);
3010 
3011  // Cannot use the same store instructions in both paths of the 'if'
3012  // because the compiler cannot know that there is no aliasing between
3013  // pointers
3014  if (add_into)
3015  {
3016  res0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), res0);
3017  _mm_storeu_ps(out + 4 * i + offsets[0], res0);
3018  res1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), res1);
3019  _mm_storeu_ps(out + 4 * i + offsets[1], res1);
3020  res2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), res2);
3021  _mm_storeu_ps(out + 4 * i + offsets[2], res2);
3022  res3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), res3);
3023  _mm_storeu_ps(out + 4 * i + offsets[3], res3);
3024  res4 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[4]), res4);
3025  _mm_storeu_ps(out + 4 * i + offsets[4], res4);
3026  res5 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[5]), res5);
3027  _mm_storeu_ps(out + 4 * i + offsets[5], res5);
3028  res6 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[6]), res6);
3029  _mm_storeu_ps(out + 4 * i + offsets[6], res6);
3030  res7 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[7]), res7);
3031  _mm_storeu_ps(out + 4 * i + offsets[7], res7);
3032  }
3033  else
3034  {
3035  _mm_storeu_ps(out + 4 * i + offsets[0], res0);
3036  _mm_storeu_ps(out + 4 * i + offsets[1], res1);
3037  _mm_storeu_ps(out + 4 * i + offsets[2], res2);
3038  _mm_storeu_ps(out + 4 * i + offsets[3], res3);
3039  _mm_storeu_ps(out + 4 * i + offsets[4], res4);
3040  _mm_storeu_ps(out + 4 * i + offsets[5], res5);
3041  _mm_storeu_ps(out + 4 * i + offsets[6], res6);
3042  _mm_storeu_ps(out + 4 * i + offsets[7], res7);
3043  }
3044  }
3045 
3046  // remainder loop of work that does not divide by 4
3047  if (add_into)
3048  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3049  for (unsigned int v = 0; v < 8; ++v)
3050  out[offsets[v] + i] += in[i][v];
3051  else
3052  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3053  for (unsigned int v = 0; v < 8; ++v)
3054  out[offsets[v] + i] = in[i][v];
3055 }
3056 
3057 
3058 
3062 template <>
3063 inline DEAL_II_ALWAYS_INLINE void
3064 vectorized_transpose_and_store(const bool add_into,
3065  const unsigned int n_entries,
3066  const VectorizedArray<float, 8> *in,
3067  std::array<float *, 8> & out)
3068 {
3069  // see the comments in the vectorized_transpose_and_store above
3070 
3071  const unsigned int n_chunks = n_entries / 4;
3072  for (unsigned int i = 0; i < n_chunks; ++i)
3073  {
3074  __m256 u0 = in[4 * i + 0].data;
3075  __m256 u1 = in[4 * i + 1].data;
3076  __m256 u2 = in[4 * i + 2].data;
3077  __m256 u3 = in[4 * i + 3].data;
3078  __m256 t0 = _mm256_shuffle_ps(u0, u1, 0x44);
3079  __m256 t1 = _mm256_shuffle_ps(u0, u1, 0xee);
3080  __m256 t2 = _mm256_shuffle_ps(u2, u3, 0x44);
3081  __m256 t3 = _mm256_shuffle_ps(u2, u3, 0xee);
3082  u0 = _mm256_shuffle_ps(t0, t2, 0x88);
3083  u1 = _mm256_shuffle_ps(t0, t2, 0xdd);
3084  u2 = _mm256_shuffle_ps(t1, t3, 0x88);
3085  u3 = _mm256_shuffle_ps(t1, t3, 0xdd);
3086  __m128 res0 = _mm256_extractf128_ps(u0, 0);
3087  __m128 res4 = _mm256_extractf128_ps(u0, 1);
3088  __m128 res1 = _mm256_extractf128_ps(u1, 0);
3089  __m128 res5 = _mm256_extractf128_ps(u1, 1);
3090  __m128 res2 = _mm256_extractf128_ps(u2, 0);
3091  __m128 res6 = _mm256_extractf128_ps(u2, 1);
3092  __m128 res3 = _mm256_extractf128_ps(u3, 0);
3093  __m128 res7 = _mm256_extractf128_ps(u3, 1);
3094 
3095  if (add_into)
3096  {
3097  res0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), res0);
3098  _mm_storeu_ps(out[0] + 4 * i, res0);
3099  res1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), res1);
3100  _mm_storeu_ps(out[1] + 4 * i, res1);
3101  res2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), res2);
3102  _mm_storeu_ps(out[2] + 4 * i, res2);
3103  res3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), res3);
3104  _mm_storeu_ps(out[3] + 4 * i, res3);
3105  res4 = _mm_add_ps(_mm_loadu_ps(out[4] + 4 * i), res4);
3106  _mm_storeu_ps(out[4] + 4 * i, res4);
3107  res5 = _mm_add_ps(_mm_loadu_ps(out[5] + 4 * i), res5);
3108  _mm_storeu_ps(out[5] + 4 * i, res5);
3109  res6 = _mm_add_ps(_mm_loadu_ps(out[6] + 4 * i), res6);
3110  _mm_storeu_ps(out[6] + 4 * i, res6);
3111  res7 = _mm_add_ps(_mm_loadu_ps(out[7] + 4 * i), res7);
3112  _mm_storeu_ps(out[7] + 4 * i, res7);
3113  }
3114  else
3115  {
3116  _mm_storeu_ps(out[0] + 4 * i, res0);
3117  _mm_storeu_ps(out[1] + 4 * i, res1);
3118  _mm_storeu_ps(out[2] + 4 * i, res2);
3119  _mm_storeu_ps(out[3] + 4 * i, res3);
3120  _mm_storeu_ps(out[4] + 4 * i, res4);
3121  _mm_storeu_ps(out[5] + 4 * i, res5);
3122  _mm_storeu_ps(out[6] + 4 * i, res6);
3123  _mm_storeu_ps(out[7] + 4 * i, res7);
3124  }
3125  }
3126 
3127  if (add_into)
3128  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3129  for (unsigned int v = 0; v < 8; ++v)
3130  out[v][i] += in[i][v];
3131  else
3132  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3133  for (unsigned int v = 0; v < 8; ++v)
3134  out[v][i] = in[i][v];
3135 }
3136 
3137 # endif
3138 
3139 // for safety, also check that __AVX512F__ is defined in case the user manually
3140 // set some conflicting compile flags which prevent compilation
3141 
3142 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && defined(__AVX512F__)
3143 
3147 template <>
3148 class VectorizedArray<double, 8>
3149  : public VectorizedArrayBase<VectorizedArray<double, 8>, 8>
3150 {
3151 public:
3155  using value_type = double;
3156 
3161  VectorizedArray() = default;
3162 
3166  VectorizedArray(const double scalar)
3167  {
3168  this->operator=(scalar);
3169  }
3170 
3174  template <typename U>
3175  VectorizedArray(const std::initializer_list<U> &list)
3177  {}
3178 
3183  VectorizedArray &
3184  operator=(const double x) &
3185  {
3186  data = _mm512_set1_pd(x);
3187  return *this;
3188  }
3189 
3190 
3196  VectorizedArray &
3197  operator=(const double scalar) && = delete;
3198 
3203  double &
3204  operator[](const unsigned int comp)
3205  {
3206  AssertIndexRange(comp, 8);
3207  return *(reinterpret_cast<double *>(&data) + comp);
3208  }
3209 
3214  const double &
3215  operator[](const unsigned int comp) const
3216  {
3217  AssertIndexRange(comp, 8);
3218  return *(reinterpret_cast<const double *>(&data) + comp);
3219  }
3220 
3225  VectorizedArray &
3226  operator+=(const VectorizedArray &vec)
3227  {
3228  // if the compiler supports vector arithmetic, we can simply use +=
3229  // operator on the given data type. this allows the compiler to combine
3230  // additions with multiplication (fused multiply-add) if those
3231  // instructions are available. Otherwise, we need to use the built-in
3232  // intrinsic command for __m512d
3233 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3234  data += vec.data;
3235 # else
3236  data = _mm512_add_pd(data, vec.data);
3237 # endif
3238  return *this;
3239  }
3240 
3245  VectorizedArray &
3246  operator-=(const VectorizedArray &vec)
3247  {
3248 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3249  data -= vec.data;
3250 # else
3251  data = _mm512_sub_pd(data, vec.data);
3252 # endif
3253  return *this;
3254  }
3259  VectorizedArray &
3260  operator*=(const VectorizedArray &vec)
3261  {
3262 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3263  data *= vec.data;
3264 # else
3265  data = _mm512_mul_pd(data, vec.data);
3266 # endif
3267  return *this;
3268  }
3269 
3274  VectorizedArray &
3275  operator/=(const VectorizedArray &vec)
3276  {
3277 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3278  data /= vec.data;
3279 # else
3280  data = _mm512_div_pd(data, vec.data);
3281 # endif
3282  return *this;
3283  }
3284 
3291  void
3292  load(const double *ptr)
3293  {
3294  data = _mm512_loadu_pd(ptr);
3295  }
3296 
3298  void
3299  load(const float *ptr)
3300  {
3301  data = _mm512_cvtps_pd(_mm256_loadu_ps(ptr));
3302  }
3303 
3311  void
3312  store(double *ptr) const
3313  {
3314  _mm512_storeu_pd(ptr, data);
3315  }
3316 
3318  void
3319  store(float *ptr) const
3320  {
3321  _mm256_storeu_ps(ptr, _mm512_cvtpd_ps(data));
3322  }
3323 
3329  void
3330  streaming_store(double *ptr) const
3331  {
3332  Assert(reinterpret_cast<std::size_t>(ptr) % 64 == 0,
3333  ExcMessage("Memory not aligned"));
3334  _mm512_stream_pd(ptr, data);
3335  }
3336 
3350  void
3351  gather(const double *base_ptr, const unsigned int *offsets)
3352  {
3353  // unfortunately, there does not appear to be a 256 bit integer load, so
3354  // do it by some reinterpret casts here. this is allowed because the Intel
3355  // API allows aliasing between different vector types.
3356  const __m256 index_val =
3357  _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
3358  const __m256i index = *reinterpret_cast<const __m256i *>(&index_val);
3359 
3360  // work around a warning with gcc-12 about an uninitialized initial state
3361  // for gather by starting with a zero guess, even though all lanes will be
3362  // overwritten
3363  __m512d zero = {};
3364  __mmask8 mask = 0xFF;
3365 
3366  data = _mm512_mask_i32gather_pd(zero, mask, index, base_ptr, 8);
3367  }
3368 
3382  void
3383  scatter(const unsigned int *offsets, double *base_ptr) const
3384  {
3385  for (unsigned int i = 0; i < 8; ++i)
3386  for (unsigned int j = i + 1; j < 8; ++j)
3387  Assert(offsets[i] != offsets[j],
3388  ExcMessage("Result of scatter undefined if two offset elements"
3389  " point to the same position"));
3390 
3391  // unfortunately, there does not appear to be a 256 bit integer load, so
3392  // do it by some reinterpret casts here. this is allowed because the Intel
3393  // API allows aliasing between different vector types.
3394  const __m256 index_val =
3395  _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
3396  const __m256i index = *reinterpret_cast<const __m256i *>(&index_val);
3397  _mm512_i32scatter_pd(base_ptr, index, data, 8);
3398  }
3399 
3404  double
3405  sum()
3406  {
3408  t1.data = _mm256_add_pd(this->get_lower(), this->get_upper());
3409  return t1.sum();
3410  }
3411 
3417  __m512d data;
3418 
3419 private:
3424  __m256d
3425  get_lower() const
3426  {
3427  return _mm512_castpd512_pd256(data);
3428  }
3429 
3434  __m256d
3435  get_upper() const
3436  {
3437  return _mm512_extractf64x4_pd(data, 1);
3438  }
3439 
3446  get_sqrt() const
3447  {
3448  VectorizedArray res;
3449  res.data = _mm512_sqrt_pd(data);
3450  return res;
3451  }
3452 
3459  get_abs() const
3460  {
3461  // to compute the absolute value, perform bitwise andnot with -0. This
3462  // will leave all value and exponent bits unchanged but force the sign
3463  // value to +. Since there is no andnot for AVX512, we interpret the data
3464  // as 64 bit integers and do the andnot on those types (note that andnot
3465  // is a bitwise operation so the data type does not matter)
3466  __m512d mask = _mm512_set1_pd(-0.);
3467  VectorizedArray res;
3468  res.data = reinterpret_cast<__m512d>(
3469  _mm512_andnot_epi64(reinterpret_cast<__m512i>(mask),
3470  reinterpret_cast<__m512i>(data)));
3471  return res;
3472  }
3473 
3480  get_max(const VectorizedArray &other) const
3481  {
3482  VectorizedArray res;
3483  res.data = _mm512_max_pd(data, other.data);
3484  return res;
3485  }
3486 
3493  get_min(const VectorizedArray &other) const
3494  {
3495  VectorizedArray res;
3496  res.data = _mm512_min_pd(data, other.data);
3497  return res;
3498  }
3499 
3500  // Make a few functions friends.
3501  template <typename Number2, std::size_t width2>
3504  template <typename Number2, std::size_t width2>
3507  template <typename Number2, std::size_t width2>
3511  template <typename Number2, std::size_t width2>
3515 };
3516 
3517 
3518 
3522 template <>
3523 inline DEAL_II_ALWAYS_INLINE void
3524 vectorized_load_and_transpose(const unsigned int n_entries,
3525  const double * in,
3526  const unsigned int * offsets,
3528 {
3529  // do not do full transpose because the code is long and will most
3530  // likely not pay off because many processors have two load units
3531  // (for the top 8 instructions) but only 1 permute unit (for the 8
3532  // shuffle/unpack instructions). rather start the transposition on the
3533  // vectorized array of half the size with 256 bits
3534  const unsigned int n_chunks = n_entries / 4;
3535  for (unsigned int i = 0; i < n_chunks; ++i)
3536  {
3537  __m512d t0, t1, t2, t3 = {};
3538 
3539  t0 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[0] + 4 * i), 0);
3540  t0 = _mm512_insertf64x4(t0, _mm256_loadu_pd(in + offsets[2] + 4 * i), 1);
3541  t1 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[1] + 4 * i), 0);
3542  t1 = _mm512_insertf64x4(t1, _mm256_loadu_pd(in + offsets[3] + 4 * i), 1);
3543  t2 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[4] + 4 * i), 0);
3544  t2 = _mm512_insertf64x4(t2, _mm256_loadu_pd(in + offsets[6] + 4 * i), 1);
3545  t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[5] + 4 * i), 0);
3546  t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[7] + 4 * i), 1);
3547 
3548  __m512d v0 = _mm512_shuffle_f64x2(t0, t2, 0x88);
3549  __m512d v1 = _mm512_shuffle_f64x2(t0, t2, 0xdd);
3550  __m512d v2 = _mm512_shuffle_f64x2(t1, t3, 0x88);
3551  __m512d v3 = _mm512_shuffle_f64x2(t1, t3, 0xdd);
3552  out[4 * i + 0].data = _mm512_unpacklo_pd(v0, v2);
3553  out[4 * i + 1].data = _mm512_unpackhi_pd(v0, v2);
3554  out[4 * i + 2].data = _mm512_unpacklo_pd(v1, v3);
3555  out[4 * i + 3].data = _mm512_unpackhi_pd(v1, v3);
3556  }
3557  // remainder loop of work that does not divide by 4
3558  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3559  out[i].gather(in + i, offsets);
3560 }
3561 
3562 
3563 
3567 template <>
3568 inline DEAL_II_ALWAYS_INLINE void
3569 vectorized_load_and_transpose(const unsigned int n_entries,
3570  const std::array<double *, 8> &in,
3572 {
3573  const unsigned int n_chunks = n_entries / 4;
3574  for (unsigned int i = 0; i < n_chunks; ++i)
3575  {
3576  __m512d t0, t1, t2, t3 = {};
3577 
3578  t0 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[0] + 4 * i), 0);
3579  t0 = _mm512_insertf64x4(t0, _mm256_loadu_pd(in[2] + 4 * i), 1);
3580  t1 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[1] + 4 * i), 0);
3581  t1 = _mm512_insertf64x4(t1, _mm256_loadu_pd(in[3] + 4 * i), 1);
3582  t2 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[4] + 4 * i), 0);
3583  t2 = _mm512_insertf64x4(t2, _mm256_loadu_pd(in[6] + 4 * i), 1);
3584  t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[5] + 4 * i), 0);
3585  t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[7] + 4 * i), 1);
3586 
3587  __m512d v0 = _mm512_shuffle_f64x2(t0, t2, 0x88);
3588  __m512d v1 = _mm512_shuffle_f64x2(t0, t2, 0xdd);
3589  __m512d v2 = _mm512_shuffle_f64x2(t1, t3, 0x88);
3590  __m512d v3 = _mm512_shuffle_f64x2(t1, t3, 0xdd);
3591  out[4 * i + 0].data = _mm512_unpacklo_pd(v0, v2);
3592  out[4 * i + 1].data = _mm512_unpackhi_pd(v0, v2);
3593  out[4 * i + 2].data = _mm512_unpacklo_pd(v1, v3);
3594  out[4 * i + 3].data = _mm512_unpackhi_pd(v1, v3);
3595  }
3596 
3597  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3598  gather(out[i], in, i);
3599 }
3600 
3601 
3602 
3606 template <>
3607 inline DEAL_II_ALWAYS_INLINE void
3608 vectorized_transpose_and_store(const bool add_into,
3609  const unsigned int n_entries,
3610  const VectorizedArray<double, 8> *in,
3611  const unsigned int * offsets,
3612  double * out)
3613 {
3614  // as for the load, we split the store operations into 256 bit units to
3615  // better balance between code size, shuffle instructions, and stores
3616  const unsigned int n_chunks = n_entries / 4;
3617  __m512i mask1 = _mm512_set_epi64(0xd, 0xc, 0x5, 0x4, 0x9, 0x8, 0x1, 0x0);
3618  __m512i mask2 = _mm512_set_epi64(0xf, 0xe, 0x7, 0x6, 0xb, 0xa, 0x3, 0x2);
3619  for (unsigned int i = 0; i < n_chunks; ++i)
3620  {
3621  __m512d t0 = _mm512_unpacklo_pd(in[i * 4].data, in[i * 4 + 1].data);
3622  __m512d t1 = _mm512_unpackhi_pd(in[i * 4].data, in[i * 4 + 1].data);
3623  __m512d t2 = _mm512_unpacklo_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
3624  __m512d t3 = _mm512_unpackhi_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
3625  __m512d v0 = _mm512_permutex2var_pd(t0, mask1, t2);
3626  __m512d v1 = _mm512_permutex2var_pd(t0, mask2, t2);
3627  __m512d v2 = _mm512_permutex2var_pd(t1, mask1, t3);
3628  __m512d v3 = _mm512_permutex2var_pd(t1, mask2, t3);
3629  __m256d res0 = _mm512_extractf64x4_pd(v0, 0);
3630  __m256d res4 = _mm512_extractf64x4_pd(v0, 1);
3631  __m256d res1 = _mm512_extractf64x4_pd(v2, 0);
3632  __m256d res5 = _mm512_extractf64x4_pd(v2, 1);
3633  __m256d res2 = _mm512_extractf64x4_pd(v1, 0);
3634  __m256d res6 = _mm512_extractf64x4_pd(v1, 1);
3635  __m256d res3 = _mm512_extractf64x4_pd(v3, 0);
3636  __m256d res7 = _mm512_extractf64x4_pd(v3, 1);
3637 
3638  // Cannot use the same store instructions in both paths of the 'if'
3639  // because the compiler cannot know that there is no aliasing
3640  // between pointers
3641  if (add_into)
3642  {
3643  res0 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[0]), res0);
3644  _mm256_storeu_pd(out + 4 * i + offsets[0], res0);
3645  res1 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[1]), res1);
3646  _mm256_storeu_pd(out + 4 * i + offsets[1], res1);
3647  res2 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[2]), res2);
3648  _mm256_storeu_pd(out + 4 * i + offsets[2], res2);
3649  res3 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[3]), res3);
3650  _mm256_storeu_pd(out + 4 * i + offsets[3], res3);
3651  res4 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[4]), res4);
3652  _mm256_storeu_pd(out + 4 * i + offsets[4], res4);
3653  res5 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[5]), res5);
3654  _mm256_storeu_pd(out + 4 * i + offsets[5], res5);
3655  res6 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[6]), res6);
3656  _mm256_storeu_pd(out + 4 * i + offsets[6], res6);
3657  res7 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[7]), res7);
3658  _mm256_storeu_pd(out + 4 * i + offsets[7], res7);
3659  }
3660  else
3661  {
3662  _mm256_storeu_pd(out + 4 * i + offsets[0], res0);
3663  _mm256_storeu_pd(out + 4 * i + offsets[1], res1);
3664  _mm256_storeu_pd(out + 4 * i + offsets[2], res2);
3665  _mm256_storeu_pd(out + 4 * i + offsets[3], res3);
3666  _mm256_storeu_pd(out + 4 * i + offsets[4], res4);
3667  _mm256_storeu_pd(out + 4 * i + offsets[5], res5);
3668  _mm256_storeu_pd(out + 4 * i + offsets[6], res6);
3669  _mm256_storeu_pd(out + 4 * i + offsets[7], res7);
3670  }
3671  }
3672 
3673  // remainder loop of work that does not divide by 4
3674  if (add_into)
3675  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3676  for (unsigned int v = 0; v < 8; ++v)
3677  out[offsets[v] + i] += in[i][v];
3678  else
3679  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3680  for (unsigned int v = 0; v < 8; ++v)
3681  out[offsets[v] + i] = in[i][v];
3682 }
3683 
3684 
3685 
3689 template <>
3690 inline DEAL_II_ALWAYS_INLINE void
3691 vectorized_transpose_and_store(const bool add_into,
3692  const unsigned int n_entries,
3693  const VectorizedArray<double, 8> *in,
3694  std::array<double *, 8> & out)
3695 {
3696  // see the comments in the vectorized_transpose_and_store above
3697 
3698  const unsigned int n_chunks = n_entries / 4;
3699  __m512i mask1 = _mm512_set_epi64(0xd, 0xc, 0x5, 0x4, 0x9, 0x8, 0x1, 0x0);
3700  __m512i mask2 = _mm512_set_epi64(0xf, 0xe, 0x7, 0x6, 0xb, 0xa, 0x3, 0x2);
3701  for (unsigned int i = 0; i < n_chunks; ++i)
3702  {
3703  __m512d t0 = _mm512_unpacklo_pd(in[i * 4].data, in[i * 4 + 1].data);
3704  __m512d t1 = _mm512_unpackhi_pd(in[i * 4].data, in[i * 4 + 1].data);
3705  __m512d t2 = _mm512_unpacklo_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
3706  __m512d t3 = _mm512_unpackhi_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
3707  __m512d v0 = _mm512_permutex2var_pd(t0, mask1, t2);
3708  __m512d v1 = _mm512_permutex2var_pd(t0, mask2, t2);
3709  __m512d v2 = _mm512_permutex2var_pd(t1, mask1, t3);
3710  __m512d v3 = _mm512_permutex2var_pd(t1, mask2, t3);
3711  __m256d res0 = _mm512_extractf64x4_pd(v0, 0);
3712  __m256d res4 = _mm512_extractf64x4_pd(v0, 1);
3713  __m256d res1 = _mm512_extractf64x4_pd(v2, 0);
3714  __m256d res5 = _mm512_extractf64x4_pd(v2, 1);
3715  __m256d res2 = _mm512_extractf64x4_pd(v1, 0);
3716  __m256d res6 = _mm512_extractf64x4_pd(v1, 1);
3717  __m256d res3 = _mm512_extractf64x4_pd(v3, 0);
3718  __m256d res7 = _mm512_extractf64x4_pd(v3, 1);
3719 
3720  if (add_into)
3721  {
3722  res0 = _mm256_add_pd(_mm256_loadu_pd(out[0] + 4 * i), res0);
3723  _mm256_storeu_pd(out[0] + 4 * i, res0);
3724  res1 = _mm256_add_pd(_mm256_loadu_pd(out[1] + 4 * i), res1);
3725  _mm256_storeu_pd(out[1] + 4 * i, res1);
3726  res2 = _mm256_add_pd(_mm256_loadu_pd(out[2] + 4 * i), res2);
3727  _mm256_storeu_pd(out[2] + 4 * i, res2);
3728  res3 = _mm256_add_pd(_mm256_loadu_pd(out[3] + 4 * i), res3);
3729  _mm256_storeu_pd(out[3] + 4 * i, res3);
3730  res4 = _mm256_add_pd(_mm256_loadu_pd(out[4] + 4 * i), res4);
3731  _mm256_storeu_pd(out[4] + 4 * i, res4);
3732  res5 = _mm256_add_pd(_mm256_loadu_pd(out[5] + 4 * i), res5);
3733  _mm256_storeu_pd(out[5] + 4 * i, res5);
3734  res6 = _mm256_add_pd(_mm256_loadu_pd(out[6] + 4 * i), res6);
3735  _mm256_storeu_pd(out[6] + 4 * i, res6);
3736  res7 = _mm256_add_pd(_mm256_loadu_pd(out[7] + 4 * i), res7);
3737  _mm256_storeu_pd(out[7] + 4 * i, res7);
3738  }
3739  else
3740  {
3741  _mm256_storeu_pd(out[0] + 4 * i, res0);
3742  _mm256_storeu_pd(out[1] + 4 * i, res1);
3743  _mm256_storeu_pd(out[2] + 4 * i, res2);
3744  _mm256_storeu_pd(out[3] + 4 * i, res3);
3745  _mm256_storeu_pd(out[4] + 4 * i, res4);
3746  _mm256_storeu_pd(out[5] + 4 * i, res5);
3747  _mm256_storeu_pd(out[6] + 4 * i, res6);
3748  _mm256_storeu_pd(out[7] + 4 * i, res7);
3749  }
3750  }
3751 
3752  if (add_into)
3753  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3754  for (unsigned int v = 0; v < 8; ++v)
3755  out[v][i] += in[i][v];
3756  else
3757  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3758  for (unsigned int v = 0; v < 8; ++v)
3759  out[v][i] = in[i][v];
3760 }
3761 
3762 
3763 
3767 template <>
3768 class VectorizedArray<float, 16>
3769  : public VectorizedArrayBase<VectorizedArray<float, 16>, 16>
3770 {
3771 public:
3775  using value_type = float;
3776 
3781  VectorizedArray() = default;
3782 
3786  VectorizedArray(const float scalar)
3787  {
3788  this->operator=(scalar);
3789  }
3790 
3794  template <typename U>
3795  VectorizedArray(const std::initializer_list<U> &list)
3796  : VectorizedArrayBase<VectorizedArray<float, 16>, 16>(list)
3797  {}
3798 
3803  VectorizedArray &
3804  operator=(const float x) &
3805  {
3806  data = _mm512_set1_ps(x);
3807  return *this;
3808  }
3809 
3815  VectorizedArray &
3816  operator=(const float scalar) && = delete;
3817 
3822  float &
3823  operator[](const unsigned int comp)
3824  {
3825  AssertIndexRange(comp, 16);
3826  return *(reinterpret_cast<float *>(&data) + comp);
3827  }
3828 
3833  const float &
3834  operator[](const unsigned int comp) const
3835  {
3836  AssertIndexRange(comp, 16);
3837  return *(reinterpret_cast<const float *>(&data) + comp);
3838  }
3839 
3844  VectorizedArray &
3845  operator+=(const VectorizedArray &vec)
3846  {
3847  // if the compiler supports vector arithmetic, we can simply use +=
3848  // operator on the given data type. this allows the compiler to combine
3849  // additions with multiplication (fused multiply-add) if those
3850  // instructions are available. Otherwise, we need to use the built-in
3851  // intrinsic command for __m512d
3852 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3853  data += vec.data;
3854 # else
3855  data = _mm512_add_ps(data, vec.data);
3856 # endif
3857  return *this;
3858  }
3859 
3864  VectorizedArray &
3865  operator-=(const VectorizedArray &vec)
3866  {
3867 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3868  data -= vec.data;
3869 # else
3870  data = _mm512_sub_ps(data, vec.data);
3871 # endif
3872  return *this;
3873  }
3878  VectorizedArray &
3879  operator*=(const VectorizedArray &vec)
3880  {
3881 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3882  data *= vec.data;
3883 # else
3884  data = _mm512_mul_ps(data, vec.data);
3885 # endif
3886  return *this;
3887  }
3888 
3893  VectorizedArray &
3894  operator/=(const VectorizedArray &vec)
3895  {
3896 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3897  data /= vec.data;
3898 # else
3899  data = _mm512_div_ps(data, vec.data);
3900 # endif
3901  return *this;
3902  }
3903 
3910  void
3911  load(const float *ptr)
3912  {
3913  data = _mm512_loadu_ps(ptr);
3914  }
3915 
3923  void
3924  store(float *ptr) const
3925  {
3926  _mm512_storeu_ps(ptr, data);
3927  }
3928 
3934  void
3935  streaming_store(float *ptr) const
3936  {
3937  Assert(reinterpret_cast<std::size_t>(ptr) % 64 == 0,
3938  ExcMessage("Memory not aligned"));
3939  _mm512_stream_ps(ptr, data);
3940  }
3941 
3955  void
3956  gather(const float *base_ptr, const unsigned int *offsets)
3957  {
3958  // unfortunately, there does not appear to be a 512 bit integer load, so
3959  // do it by some reinterpret casts here. this is allowed because the Intel
3960  // API allows aliasing between different vector types.
3961  const __m512 index_val =
3962  _mm512_loadu_ps(reinterpret_cast<const float *>(offsets));
3963  const __m512i index = *reinterpret_cast<const __m512i *>(&index_val);
3964 
3965  // work around a warning with gcc-12 about an uninitialized initial state
3966  // for gather by starting with a zero guess, even though all lanes will be
3967  // overwritten
3968  __m512 zero = {};
3969  __mmask16 mask = 0xFFFF;
3970 
3971  data = _mm512_mask_i32gather_ps(zero, mask, index, base_ptr, 4);
3972  }
3973 
3987  void
3988  scatter(const unsigned int *offsets, float *base_ptr) const
3989  {
3990  for (unsigned int i = 0; i < 16; ++i)
3991  for (unsigned int j = i + 1; j < 16; ++j)
3992  Assert(offsets[i] != offsets[j],
3993  ExcMessage("Result of scatter undefined if two offset elements"
3994  " point to the same position"));
3995 
3996  // unfortunately, there does not appear to be a 512 bit integer load, so
3997  // do it by some reinterpret casts here. this is allowed because the Intel
3998  // API allows aliasing between different vector types.
3999  const __m512 index_val =
4000  _mm512_loadu_ps(reinterpret_cast<const float *>(offsets));
4001  const __m512i index = *reinterpret_cast<const __m512i *>(&index_val);
4002  _mm512_i32scatter_ps(base_ptr, index, data, 4);
4003  }
4004 
4009  float
4010  sum()
4011  {
4013  t1.data = _mm256_add_ps(this->get_lower(), this->get_upper());
4014  return t1.sum();
4015  }
4016 
4022  __m512 data;
4023 
4024 private:
4029  __m256
4030  get_lower() const
4031  {
4032  return _mm512_castps512_ps256(data);
4033  }
4034 
4039  __m256
4040  get_upper() const
4041  {
4042  return _mm256_castpd_ps(_mm512_extractf64x4_pd(_mm512_castps_pd(data), 1));
4043  }
4044 
4051  get_sqrt() const
4052  {
4053  VectorizedArray res;
4054  res.data = _mm512_sqrt_ps(data);
4055  return res;
4056  }
4057 
4064  get_abs() const
4065  {
4066  // to compute the absolute value, perform bitwise andnot with -0. This
4067  // will leave all value and exponent bits unchanged but force the sign
4068  // value to +. Since there is no andnot for AVX512, we interpret the data
4069  // as 32 bit integers and do the andnot on those types (note that andnot
4070  // is a bitwise operation so the data type does not matter)
4071  __m512 mask = _mm512_set1_ps(-0.f);
4072  VectorizedArray res;
4073  res.data = reinterpret_cast<__m512>(
4074  _mm512_andnot_epi32(reinterpret_cast<__m512i>(mask),
4075  reinterpret_cast<__m512i>(data)));
4076  return res;
4077  }
4078 
4085  get_max(const VectorizedArray &other) const
4086  {
4087  VectorizedArray res;
4088  res.data = _mm512_max_ps(data, other.data);
4089  return res;
4090  }
4091 
4098  get_min(const VectorizedArray &other) const
4099  {
4100  VectorizedArray res;
4101  res.data = _mm512_min_ps(data, other.data);
4102  return res;
4103  }
4104 
4105  // Make a few functions friends.
4106  template <typename Number2, std::size_t width2>
4109  template <typename Number2, std::size_t width2>
4112  template <typename Number2, std::size_t width2>
4116  template <typename Number2, std::size_t width2>
4120 };
4121 
4122 
4123 
4127 template <>
4128 inline DEAL_II_ALWAYS_INLINE void
4129 vectorized_load_and_transpose(const unsigned int n_entries,
4130  const float * in,
4131  const unsigned int * offsets,
4133 {
4134  // Similar to the double case, we perform the work on smaller entities. In
4135  // this case, we start from 128 bit arrays and insert them into a full 512
4136  // bit index. This reduces the code size and register pressure because we do
4137  // shuffles on 4 numbers rather than 16.
4138  const unsigned int n_chunks = n_entries / 4;
4139 
4140  // To avoid warnings about uninitialized variables, need to initialize one
4141  // variable to a pre-exisiting value in out, which will never get used in
4142  // the end. Keep the initialization outside the loop because of a bug in
4143  // gcc-9.1 which generates a "vmovapd" instruction instead of "vmovupd" in
4144  // case t3 is initialized to zero (inside/outside of loop), see
4145  // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90991
4146  __m512 t0, t1, t2, t3;
4147  if (n_chunks > 0)
4148  t3 = out[0].data;
4149  for (unsigned int i = 0; i < n_chunks; ++i)
4150  {
4151  t0 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[0] + 4 * i), 0);
4152  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[4] + 4 * i), 1);
4153  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[8] + 4 * i), 2);
4154  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[12] + 4 * i), 3);
4155  t1 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[1] + 4 * i), 0);
4156  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[5] + 4 * i), 1);
4157  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[9] + 4 * i), 2);
4158  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[13] + 4 * i), 3);
4159  t2 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[2] + 4 * i), 0);
4160  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[6] + 4 * i), 1);
4161  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[10] + 4 * i), 2);
4162  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[14] + 4 * i), 3);
4163  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[3] + 4 * i), 0);
4164  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[7] + 4 * i), 1);
4165  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[11] + 4 * i), 2);
4166  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[15] + 4 * i), 3);
4167 
4168  __m512 v0 = _mm512_shuffle_ps(t0, t1, 0x44);
4169  __m512 v1 = _mm512_shuffle_ps(t0, t1, 0xee);
4170  __m512 v2 = _mm512_shuffle_ps(t2, t3, 0x44);
4171  __m512 v3 = _mm512_shuffle_ps(t2, t3, 0xee);
4172 
4173  out[4 * i + 0].data = _mm512_shuffle_ps(v0, v2, 0x88);
4174  out[4 * i + 1].data = _mm512_shuffle_ps(v0, v2, 0xdd);
4175  out[4 * i + 2].data = _mm512_shuffle_ps(v1, v3, 0x88);
4176  out[4 * i + 3].data = _mm512_shuffle_ps(v1, v3, 0xdd);
4177  }
4178 
4179  // remainder loop of work that does not divide by 4
4180  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4181  out[i].gather(in + i, offsets);
4182 }
4183 
4184 
4185 
4189 template <>
4190 inline DEAL_II_ALWAYS_INLINE void
4191 vectorized_load_and_transpose(const unsigned int n_entries,
4192  const std::array<float *, 16> &in,
4194 {
4195  // see the comments in the vectorized_load_and_transpose above
4196 
4197  const unsigned int n_chunks = n_entries / 4;
4198 
4199  __m512 t0, t1, t2, t3;
4200  if (n_chunks > 0)
4201  t3 = out[0].data;
4202  for (unsigned int i = 0; i < n_chunks; ++i)
4203  {
4204  t0 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[0] + 4 * i), 0);
4205  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[4] + 4 * i), 1);
4206  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[8] + 4 * i), 2);
4207  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[12] + 4 * i), 3);
4208  t1 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[1] + 4 * i), 0);
4209  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[5] + 4 * i), 1);
4210  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[9] + 4 * i), 2);
4211  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[13] + 4 * i), 3);
4212  t2 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[2] + 4 * i), 0);
4213  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[6] + 4 * i), 1);
4214  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[10] + 4 * i), 2);
4215  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[14] + 4 * i), 3);
4216  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[3] + 4 * i), 0);
4217  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[7] + 4 * i), 1);
4218  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[11] + 4 * i), 2);
4219  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[15] + 4 * i), 3);
4220 
4221  __m512 v0 = _mm512_shuffle_ps(t0, t1, 0x44);
4222  __m512 v1 = _mm512_shuffle_ps(t0, t1, 0xee);
4223  __m512 v2 = _mm512_shuffle_ps(t2, t3, 0x44);
4224  __m512 v3 = _mm512_shuffle_ps(t2, t3, 0xee);
4225 
4226  out[4 * i + 0].data = _mm512_shuffle_ps(v0, v2, 0x88);
4227  out[4 * i + 1].data = _mm512_shuffle_ps(v0, v2, 0xdd);
4228  out[4 * i + 2].data = _mm512_shuffle_ps(v1, v3, 0x88);
4229  out[4 * i + 3].data = _mm512_shuffle_ps(v1, v3, 0xdd);
4230  }
4231 
4232  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4233  gather(out[i], in, i);
4234 }
4235 
4236 
4237 
4241 template <>
4242 inline DEAL_II_ALWAYS_INLINE void
4243 vectorized_transpose_and_store(const bool add_into,
4244  const unsigned int n_entries,
4245  const VectorizedArray<float, 16> *in,
4246  const unsigned int * offsets,
4247  float * out)
4248 {
4249  const unsigned int n_chunks = n_entries / 4;
4250  for (unsigned int i = 0; i < n_chunks; ++i)
4251  {
4252  __m512 t0 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0x44);
4253  __m512 t1 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0xee);
4254  __m512 t2 =
4255  _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0x44);
4256  __m512 t3 =
4257  _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0xee);
4258  __m512 u0 = _mm512_shuffle_ps(t0, t2, 0x88);
4259  __m512 u1 = _mm512_shuffle_ps(t0, t2, 0xdd);
4260  __m512 u2 = _mm512_shuffle_ps(t1, t3, 0x88);
4261  __m512 u3 = _mm512_shuffle_ps(t1, t3, 0xdd);
4262 
4263  __m128 res0 = _mm512_extractf32x4_ps(u0, 0);
4264  __m128 res4 = _mm512_extractf32x4_ps(u0, 1);
4265  __m128 res8 = _mm512_extractf32x4_ps(u0, 2);
4266  __m128 res12 = _mm512_extractf32x4_ps(u0, 3);
4267  __m128 res1 = _mm512_extractf32x4_ps(u1, 0);
4268  __m128 res5 = _mm512_extractf32x4_ps(u1, 1);
4269  __m128 res9 = _mm512_extractf32x4_ps(u1, 2);
4270  __m128 res13 = _mm512_extractf32x4_ps(u1, 3);
4271  __m128 res2 = _mm512_extractf32x4_ps(u2, 0);
4272  __m128 res6 = _mm512_extractf32x4_ps(u2, 1);
4273  __m128 res10 = _mm512_extractf32x4_ps(u2, 2);
4274  __m128 res14 = _mm512_extractf32x4_ps(u2, 3);
4275  __m128 res3 = _mm512_extractf32x4_ps(u3, 0);
4276  __m128 res7 = _mm512_extractf32x4_ps(u3, 1);
4277  __m128 res11 = _mm512_extractf32x4_ps(u3, 2);
4278  __m128 res15 = _mm512_extractf32x4_ps(u3, 3);
4279 
4280  // Cannot use the same store instructions in both paths of the 'if'
4281  // because the compiler cannot know that there is no aliasing between
4282  // pointers
4283  if (add_into)
4284  {
4285  res0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), res0);
4286  _mm_storeu_ps(out + 4 * i + offsets[0], res0);
4287  res1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), res1);
4288  _mm_storeu_ps(out + 4 * i + offsets[1], res1);
4289  res2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), res2);
4290  _mm_storeu_ps(out + 4 * i + offsets[2], res2);
4291  res3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), res3);
4292  _mm_storeu_ps(out + 4 * i + offsets[3], res3);
4293  res4 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[4]), res4);
4294  _mm_storeu_ps(out + 4 * i + offsets[4], res4);
4295  res5 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[5]), res5);
4296  _mm_storeu_ps(out + 4 * i + offsets[5], res5);
4297  res6 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[6]), res6);
4298  _mm_storeu_ps(out + 4 * i + offsets[6], res6);
4299  res7 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[7]), res7);
4300  _mm_storeu_ps(out + 4 * i + offsets[7], res7);
4301  res8 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[8]), res8);
4302  _mm_storeu_ps(out + 4 * i + offsets[8], res8);
4303  res9 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[9]), res9);
4304  _mm_storeu_ps(out + 4 * i + offsets[9], res9);
4305  res10 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[10]), res10);
4306  _mm_storeu_ps(out + 4 * i + offsets[10], res10);
4307  res11 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[11]), res11);
4308  _mm_storeu_ps(out + 4 * i + offsets[11], res11);
4309  res12 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[12]), res12);
4310  _mm_storeu_ps(out + 4 * i + offsets[12], res12);
4311  res13 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[13]), res13);
4312  _mm_storeu_ps(out + 4 * i + offsets[13], res13);
4313  res14 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[14]), res14);
4314  _mm_storeu_ps(out + 4 * i + offsets[14], res14);
4315  res15 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[15]), res15);
4316  _mm_storeu_ps(out + 4 * i + offsets[15], res15);
4317  }
4318  else
4319  {
4320  _mm_storeu_ps(out + 4 * i + offsets[0], res0);
4321  _mm_storeu_ps(out + 4 * i + offsets[1], res1);
4322  _mm_storeu_ps(out + 4 * i + offsets[2], res2);
4323  _mm_storeu_ps(out + 4 * i + offsets[3], res3);
4324  _mm_storeu_ps(out + 4 * i + offsets[4], res4);
4325  _mm_storeu_ps(out + 4 * i + offsets[5], res5);
4326  _mm_storeu_ps(out + 4 * i + offsets[6], res6);
4327  _mm_storeu_ps(out + 4 * i + offsets[7], res7);
4328  _mm_storeu_ps(out + 4 * i + offsets[8], res8);
4329  _mm_storeu_ps(out + 4 * i + offsets[9], res9);
4330  _mm_storeu_ps(out + 4 * i + offsets[10], res10);
4331  _mm_storeu_ps(out + 4 * i + offsets[11], res11);
4332  _mm_storeu_ps(out + 4 * i + offsets[12], res12);
4333  _mm_storeu_ps(out + 4 * i + offsets[13], res13);
4334  _mm_storeu_ps(out + 4 * i + offsets[14], res14);
4335  _mm_storeu_ps(out + 4 * i + offsets[15], res15);
4336  }
4337  }
4338 
4339  // remainder loop of work that does not divide by 4
4340  if (add_into)
4341  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4342  for (unsigned int v = 0; v < 16; ++v)
4343  out[offsets[v] + i] += in[i][v];
4344  else
4345  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4346  for (unsigned int v = 0; v < 16; ++v)
4347  out[offsets[v] + i] = in[i][v];
4348 }
4349 
4350 
4351 
4355 template <>
4356 inline DEAL_II_ALWAYS_INLINE void
4357 vectorized_transpose_and_store(const bool add_into,
4358  const unsigned int n_entries,
4359  const VectorizedArray<float, 16> *in,
4360  std::array<float *, 16> & out)
4361 {
4362  // see the comments in the vectorized_transpose_and_store above
4363 
4364  const unsigned int n_chunks = n_entries / 4;
4365  for (unsigned int i = 0; i < n_chunks; ++i)
4366  {
4367  __m512 t0 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0x44);
4368  __m512 t1 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0xee);
4369  __m512 t2 =
4370  _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0x44);
4371  __m512 t3 =
4372  _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0xee);
4373  __m512 u0 = _mm512_shuffle_ps(t0, t2, 0x88);
4374  __m512 u1 = _mm512_shuffle_ps(t0, t2, 0xdd);
4375  __m512 u2 = _mm512_shuffle_ps(t1, t3, 0x88);
4376  __m512 u3 = _mm512_shuffle_ps(t1, t3, 0xdd);
4377 
4378  __m128 res0 = _mm512_extractf32x4_ps(u0, 0);
4379  __m128 res4 = _mm512_extractf32x4_ps(u0, 1);
4380  __m128 res8 = _mm512_extractf32x4_ps(u0, 2);
4381  __m128 res12 = _mm512_extractf32x4_ps(u0, 3);
4382  __m128 res1 = _mm512_extractf32x4_ps(u1, 0);
4383  __m128 res5 = _mm512_extractf32x4_ps(u1, 1);
4384  __m128 res9 = _mm512_extractf32x4_ps(u1, 2);
4385  __m128 res13 = _mm512_extractf32x4_ps(u1, 3);
4386  __m128 res2 = _mm512_extractf32x4_ps(u2, 0);
4387  __m128 res6 = _mm512_extractf32x4_ps(u2, 1);
4388  __m128 res10 = _mm512_extractf32x4_ps(u2, 2);
4389  __m128 res14 = _mm512_extractf32x4_ps(u2, 3);
4390  __m128 res3 = _mm512_extractf32x4_ps(u3, 0);
4391  __m128 res7 = _mm512_extractf32x4_ps(u3, 1);
4392  __m128 res11 = _mm512_extractf32x4_ps(u3, 2);
4393  __m128 res15 = _mm512_extractf32x4_ps(u3, 3);
4394 
4395  if (add_into)
4396  {
4397  res0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), res0);
4398  _mm_storeu_ps(out[0] + 4 * i, res0);
4399  res1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), res1);
4400  _mm_storeu_ps(out[1] + 4 * i, res1);
4401  res2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), res2);
4402  _mm_storeu_ps(out[2] + 4 * i, res2);
4403  res3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), res3);
4404  _mm_storeu_ps(out[3] + 4 * i, res3);
4405  res4 = _mm_add_ps(_mm_loadu_ps(out[4] + 4 * i), res4);
4406  _mm_storeu_ps(out[4] + 4 * i, res4);
4407  res5 = _mm_add_ps(_mm_loadu_ps(out[5] + 4 * i), res5);
4408  _mm_storeu_ps(out[5] + 4 * i, res5);
4409  res6 = _mm_add_ps(_mm_loadu_ps(out[6] + 4 * i), res6);
4410  _mm_storeu_ps(out[6] + 4 * i, res6);
4411  res7 = _mm_add_ps(_mm_loadu_ps(out[7] + 4 * i), res7);
4412  _mm_storeu_ps(out[7] + 4 * i, res7);
4413  res8 = _mm_add_ps(_mm_loadu_ps(out[8] + 4 * i), res8);
4414  _mm_storeu_ps(out[8] + 4 * i, res8);
4415  res9 = _mm_add_ps(_mm_loadu_ps(out[9] + 4 * i), res9);
4416  _mm_storeu_ps(out[9] + 4 * i, res9);
4417  res10 = _mm_add_ps(_mm_loadu_ps(out[10] + 4 * i), res10);
4418  _mm_storeu_ps(out[10] + 4 * i, res10);
4419  res11 = _mm_add_ps(_mm_loadu_ps(out[11] + 4 * i), res11);
4420  _mm_storeu_ps(out[11] + 4 * i, res11);
4421  res12 = _mm_add_ps(_mm_loadu_ps(out[12] + 4 * i), res12);
4422  _mm_storeu_ps(out[12] + 4 * i, res12);
4423  res13 = _mm_add_ps(_mm_loadu_ps(out[13] + 4 * i), res13);
4424  _mm_storeu_ps(out[13] + 4 * i, res13);
4425  res14 = _mm_add_ps(_mm_loadu_ps(out[14] + 4 * i), res14);
4426  _mm_storeu_ps(out[14] + 4 * i, res14);
4427  res15 = _mm_add_ps(_mm_loadu_ps(out[15] + 4 * i), res15);
4428  _mm_storeu_ps(out[15] + 4 * i, res15);
4429  }
4430  else
4431  {
4432  _mm_storeu_ps(out[0] + 4 * i, res0);
4433  _mm_storeu_ps(out[1] + 4 * i, res1);
4434  _mm_storeu_ps(out[2] + 4 * i, res2);
4435  _mm_storeu_ps(out[3] + 4 * i, res3);
4436  _mm_storeu_ps(out[4] + 4 * i, res4);
4437  _mm_storeu_ps(out[5] + 4 * i, res5);
4438  _mm_storeu_ps(out[6] + 4 * i, res6);
4439  _mm_storeu_ps(out[7] + 4 * i, res7);
4440  _mm_storeu_ps(out[8] + 4 * i, res8);
4441  _mm_storeu_ps(out[9] + 4 * i, res9);
4442  _mm_storeu_ps(out[10] + 4 * i, res10);
4443  _mm_storeu_ps(out[11] + 4 * i, res11);
4444  _mm_storeu_ps(out[12] + 4 * i, res12);
4445  _mm_storeu_ps(out[13] + 4 * i, res13);
4446  _mm_storeu_ps(out[14] + 4 * i, res14);
4447  _mm_storeu_ps(out[15] + 4 * i, res15);
4448  }
4449  }
4450 
4451  if (add_into)
4452  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4453  for (unsigned int v = 0; v < 16; ++v)
4454  out[v][i] += in[i][v];
4455  else
4456  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4457  for (unsigned int v = 0; v < 16; ++v)
4458  out[v][i] = in[i][v];
4459 }
4460 
4461 # endif
4462 
4463 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__ALTIVEC__) && \
4464  defined(__VSX__)
4465 
4466 template <>
4467 class VectorizedArray<double, 2>
4468  : public VectorizedArrayBase<VectorizedArray<double, 2>, 2>
4469 {
4470 public:
4474  using value_type = double;
4475 
4480  VectorizedArray() = default;
4481 
4485  VectorizedArray(const double scalar)
4486  {
4487  this->operator=(scalar);
4488  }
4489 
4493  template <typename U>
4494  VectorizedArray(const std::initializer_list<U> &list)
4496  {}
4497 
4502  VectorizedArray &
4503  operator=(const double x) &
4504  {
4505  data = vec_splats(x);
4506 
4507  // Some compilers believe that vec_splats sets 'x', but that's not true.
4508  // They then warn about setting a variable and not using it. Suppress the
4509  // warning by "using" the variable:
4510  (void)x;
4511  return *this;
4512  }
4513 
4519  VectorizedArray &
4520  operator=(const double scalar) && = delete;
4521 
4526  double &
4527  operator[](const unsigned int comp)
4528  {
4529  AssertIndexRange(comp, 2);
4530  return *(reinterpret_cast<double *>(&data) + comp);
4531  }
4532 
4537  const double &
4538  operator[](const unsigned int comp) const
4539  {
4540  AssertIndexRange(comp, 2);
4541  return *(reinterpret_cast<const double *>(&data) + comp);
4542  }
4543 
4548  VectorizedArray &
4549  operator+=(const VectorizedArray &vec)
4550  {
4551  data = vec_add(data, vec.data);
4552  return *this;
4553  }
4554 
4559  VectorizedArray &
4560  operator-=(const VectorizedArray &vec)
4561  {
4562  data = vec_sub(data, vec.data);
4563  return *this;
4564  }
4565 
4570  VectorizedArray &
4571  operator*=(const VectorizedArray &vec)
4572  {
4573  data = vec_mul(data, vec.data);
4574  return *this;
4575  }
4576 
4581  VectorizedArray &
4582  operator/=(const VectorizedArray &vec)
4583  {
4584  data = vec_div(data, vec.data);
4585  return *this;
4586  }
4587 
4593  void
4594  load(const double *ptr)
4595  {
4596  data = vec_vsx_ld(0, ptr);
4597  }
4598 
4604  void
4605  store(double *ptr) const
4606  {
4607  vec_vsx_st(data, 0, ptr);
4608  }
4609 
4614  void
4615  streaming_store(double *ptr) const
4616  {
4617  store(ptr);
4618  }
4619 
4624  void
4625  gather(const double *base_ptr, const unsigned int *offsets)
4626  {
4627  for (unsigned int i = 0; i < 2; ++i)
4628  *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
4629  }
4630 
4635  void
4636  scatter(const unsigned int *offsets, double *base_ptr) const
4637  {
4638  for (unsigned int i = 0; i < 2; ++i)
4639  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
4640  }
4641 
4647  __vector double data;
4648 
4649 private:
4656  get_sqrt() const
4657  {
4658  VectorizedArray res;
4659  res.data = vec_sqrt(data);
4660  return res;
4661  }
4662 
4669  get_abs() const
4670  {
4671  VectorizedArray res;
4672  res.data = vec_abs(data);
4673  return res;
4674  }
4675 
4682  get_max(const VectorizedArray &other) const
4683  {
4684  VectorizedArray res;
4685  res.data = vec_max(data, other.data);
4686  return res;
4687  }
4688 
4695  get_min(const VectorizedArray &other) const
4696  {
4697  VectorizedArray res;
4698  res.data = vec_min(data, other.data);
4699  return res;
4700  }
4701 
4702  // Make a few functions friends.
4703  template <typename Number2, std::size_t width2>
4706  template <typename Number2, std::size_t width2>
4709  template <typename Number2, std::size_t width2>
4713  template <typename Number2, std::size_t width2>
4717 };
4718 
4719 
4720 
4721 template <>
4722 class VectorizedArray<float, 4>
4723  : public VectorizedArrayBase<VectorizedArray<float, 4>, 4>
4724 {
4725 public:
4729  using value_type = float;
4730 
4735  VectorizedArray() = default;
4736 
4740  VectorizedArray(const float scalar)
4741  {
4742  this->operator=(scalar);
4743  }
4744 
4748  template <typename U>
4749  VectorizedArray(const std::initializer_list<U> &list)
4750  : VectorizedArrayBase<VectorizedArray<float, 4>, 4>(list)
4751  {}
4752 
4757  VectorizedArray &
4758  operator=(const float x) &
4759  {
4760  data = vec_splats(x);
4761 
4762  // Some compilers believe that vec_splats sets 'x', but that's not true.
4763  // They then warn about setting a variable and not using it. Suppress the
4764  // warning by "using" the variable:
4765  (void)x;
4766  return *this;
4767  }
4768 
4774  VectorizedArray &
4775  operator=(const float scalar) && = delete;
4776 
4781  float &
4782  operator[](const unsigned int comp)
4783  {
4784  AssertIndexRange(comp, 4);
4785  return *(reinterpret_cast<float *>(&data) + comp);
4786  }
4787 
4792  const float &
4793  operator[](const unsigned int comp) const
4794  {
4795  AssertIndexRange(comp, 4);
4796  return *(reinterpret_cast<const float *>(&data) + comp);
4797  }
4798 
4803  VectorizedArray &
4804  operator+=(const VectorizedArray &vec)
4805  {
4806  data = vec_add(data, vec.data);
4807  return *this;
4808  }
4809 
4814  VectorizedArray &
4815  operator-=(const VectorizedArray &vec)
4816  {
4817  data = vec_sub(data, vec.data);
4818  return *this;
4819  }
4820 
4825  VectorizedArray &
4826  operator*=(const VectorizedArray &vec)
4827  {
4828  data = vec_mul(data, vec.data);
4829  return *this;
4830  }
4831 
4836  VectorizedArray &
4837  operator/=(const VectorizedArray &vec)
4838  {
4839  data = vec_div(data, vec.data);
4840  return *this;
4841  }
4842 
4848  void
4849  load(const float *ptr)
4850  {
4851  data = vec_vsx_ld(0, ptr);
4852  }
4853 
4859  void
4860  store(float *ptr) const
4861  {
4862  vec_vsx_st(data, 0, ptr);
4863  }
4864 
4869  void
4870  streaming_store(float *ptr) const
4871  {
4872  store(ptr);
4873  }
4874 
4879  void
4880  gather(const float *base_ptr, const unsigned int *offsets)
4881  {
4882  for (unsigned int i = 0; i < 4; ++i)
4883  *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
4884  }
4885 
4890  void
4891  scatter(const unsigned int *offsets, float *base_ptr) const
4892  {
4893  for (unsigned int i = 0; i < 4; ++i)
4894  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
4895  }
4896 
4902  __vector float data;
4903 
4904 private:
4911  get_sqrt() const
4912  {
4913  VectorizedArray res;
4914  res.data = vec_sqrt(data);
4915  return res;
4916  }
4917 
4924  get_abs() const
4925  {
4926  VectorizedArray res;
4927  res.data = vec_abs(data);
4928  return res;
4929  }
4930 
4937  get_max(const VectorizedArray &other) const
4938  {
4939  VectorizedArray res;
4940  res.data = vec_max(data, other.data);
4941  return res;
4942  }
4943 
4950  get_min(const VectorizedArray &other) const
4951  {
4952  VectorizedArray res;
4953  res.data = vec_min(data, other.data);
4954  return res;
4955  }
4956 
4957  // Make a few functions friends.
4958  template <typename Number2, std::size_t width2>
4961  template <typename Number2, std::size_t width2>
4964  template <typename Number2, std::size_t width2>
4968  template <typename Number2, std::size_t width2>
4972 };
4973 
4974 # endif // if DEAL_II_VECTORIZATION_LEVEL >=1 && defined(__ALTIVEC__) &&
4975  // defined(__VSX__)
4976 
4977 
4978 #endif // DOXYGEN
4979 
4980 
4981 
4992 template <typename Number, std::size_t width>
4993 inline DEAL_II_ALWAYS_INLINE bool
4995  const VectorizedArray<Number, width> &rhs)
4996 {
4997  for (unsigned int i = 0; i < VectorizedArray<Number, width>::size(); ++i)
4998  if (lhs[i] != rhs[i])
4999  return false;
5000 
5001  return true;
5002 }
5003 
5004 
5010 template <typename Number, std::size_t width>
5014 {
5016  return tmp += v;
5017 }
5018 
5024 template <typename Number, std::size_t width>
5028 {
5030  return tmp -= v;
5031 }
5032 
5038 template <typename Number, std::size_t width>
5042 {
5044  return tmp *= v;
5045 }
5046 
5052 template <typename Number, std::size_t width>
5056 {
5058  return tmp /= v;
5059 }
5060 
5067 template <typename Number, std::size_t width>
5069 operator+(const Number &u, const VectorizedArray<Number, width> &v)
5070 {
5072  return tmp += v;
5073 }
5074 
5083 template <std::size_t width>
5085 operator+(const double u, const VectorizedArray<float, width> &v)
5086 {
5088  return tmp += v;
5089 }
5090 
5097 template <typename Number, std::size_t width>
5099 operator+(const VectorizedArray<Number, width> &v, const Number &u)
5100 {
5101  return u + v;
5102 }
5103 
5112 template <std::size_t width>
5114 operator+(const VectorizedArray<float, width> &v, const double u)
5115 {
5116  return u + v;
5117 }
5118 
5125 template <typename Number, std::size_t width>
5127 operator-(const Number &u, const VectorizedArray<Number, width> &v)
5128 {
5130  return tmp -= v;
5131 }
5132 
5141 template <std::size_t width>
5143 operator-(const double u, const VectorizedArray<float, width> &v)
5144 {
5145  VectorizedArray<float, width> tmp = static_cast<float>(u);
5146  return tmp -= v;
5147 }
5148 
5155 template <typename Number, std::size_t width>
5157 operator-(const VectorizedArray<Number, width> &v, const Number &u)
5158 {
5160  return v - tmp;
5161 }
5162 
5171 template <std::size_t width>
5173 operator-(const VectorizedArray<float, width> &v, const double u)
5174 {
5175  VectorizedArray<float, width> tmp = static_cast<float>(u);
5176  return v - tmp;
5177 }
5178 
5185 template <typename Number, std::size_t width>
5187 operator*(const Number &u, const VectorizedArray<Number, width> &v)
5188 {
5190  return tmp *= v;
5191 }
5192 
5201 template <std::size_t width>
5203 operator*(const double u, const VectorizedArray<float, width> &v)
5204 {
5205  VectorizedArray<float, width> tmp = static_cast<float>(u);
5206  return tmp *= v;
5207 }
5208 
5215 template <typename Number, std::size_t width>
5217 operator*(const VectorizedArray<Number, width> &v, const Number &u)
5218 {
5219  return u * v;
5220 }
5221 
5230 template <std::size_t width>
5232 operator*(const VectorizedArray<float, width> &v, const double u)
5233 {
5234  return u * v;
5235 }
5236 
5243 template <typename Number, std::size_t width>
5245 operator/(const Number &u, const VectorizedArray<Number, width> &v)
5246 {
5248  return tmp /= v;
5249 }
5250 
5259 template <std::size_t width>
5261 operator/(const double u, const VectorizedArray<float, width> &v)
5262 {
5263  VectorizedArray<float, width> tmp = static_cast<float>(u);
5264  return tmp /= v;
5265 }
5266 
5273 template <typename Number, std::size_t width>
5275 operator/(const VectorizedArray<Number, width> &v, const Number &u)
5276 {
5278  return v / tmp;
5279 }
5280 
5289 template <std::size_t width>
5291 operator/(const VectorizedArray<float, width> &v, const double u)
5292 {
5293  VectorizedArray<float, width> tmp = static_cast<float>(u);
5294  return v / tmp;
5295 }
5296 
5302 template <typename Number, std::size_t width>
5305 {
5306  return u;
5307 }
5308 
5314 template <typename Number, std::size_t width>
5317 {
5318  // to get a negative sign, subtract the input from zero (could also
5319  // multiply by -1, but this one is slightly simpler)
5320  return VectorizedArray<Number, width>() - u;
5321 }
5322 
5328 template <typename Number, std::size_t width>
5329 inline std::ostream &
5330 operator<<(std::ostream &out, const VectorizedArray<Number, width> &p)
5331 {
5332  constexpr unsigned int n = VectorizedArray<Number, width>::size();
5333  for (unsigned int i = 0; i < n - 1; ++i)
5334  out << p[i] << ' ';
5335  out << p[n - 1];
5336 
5337  return out;
5338 }
5339 
5354 enum class SIMDComparison : int
5355 {
5356 #if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
5357  equal = _CMP_EQ_OQ,
5358  not_equal = _CMP_NEQ_OQ,
5359  less_than = _CMP_LT_OQ,
5360  less_than_or_equal = _CMP_LE_OQ,
5361  greater_than = _CMP_GT_OQ,
5362  greater_than_or_equal = _CMP_GE_OQ
5363 #else
5364  equal,
5365  not_equal,
5366  less_than,
5368  greater_than,
5370 #endif
5371 };
5372 
5373 
5437 template <SIMDComparison predicate, typename Number>
5438 DEAL_II_ALWAYS_INLINE inline Number
5439 compare_and_apply_mask(const Number &left,
5440  const Number &right,
5441  const Number &true_value,
5442  const Number &false_value)
5443 {
5444  bool mask;
5445  switch (predicate)
5446  {
5447  case SIMDComparison::equal:
5448  mask = (left == right);
5449  break;
5451  mask = (left != right);
5452  break;
5454  mask = (left < right);
5455  break;
5457  mask = (left <= right);
5458  break;
5460  mask = (left > right);
5461  break;
5463  mask = (left >= right);
5464  break;
5465  }
5466 
5467  return mask ? true_value : false_value;
5468 }
5469 
5470 
5475 template <SIMDComparison predicate, typename Number>
5478  const VectorizedArray<Number, 1> &right,
5479  const VectorizedArray<Number, 1> &true_value,
5480  const VectorizedArray<Number, 1> &false_value)
5481 {
5483  result.data = compare_and_apply_mask<predicate, Number>(left.data,
5484  right.data,
5485  true_value.data,
5486  false_value.data);
5487  return result;
5488 }
5489 
5492 #ifndef DOXYGEN
5493 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && defined(__AVX512F__)
5494 
5495 template <SIMDComparison predicate>
5498  const VectorizedArray<float, 16> &right,
5499  const VectorizedArray<float, 16> &true_values,
5500  const VectorizedArray<float, 16> &false_values)
5501 {
5502  const __mmask16 mask =
5503  _mm512_cmp_ps_mask(left.data, right.data, static_cast<int>(predicate));
5505  result.data = _mm512_mask_mov_ps(false_values.data, mask, true_values.data);
5506  return result;
5507 }
5508 
5509 
5510 
5511 template <SIMDComparison predicate>
5514  const VectorizedArray<double, 8> &right,
5515  const VectorizedArray<double, 8> &true_values,
5516  const VectorizedArray<double, 8> &false_values)
5517 {
5518  const __mmask16 mask =
5519  _mm512_cmp_pd_mask(left.data, right.data, static_cast<int>(predicate));
5521  result.data = _mm512_mask_mov_pd(false_values.data, mask, true_values.data);
5522  return result;
5523 }
5524 
5525 # endif
5526 
5527 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
5528 
5529 template <SIMDComparison predicate>
5532  const VectorizedArray<float, 8> &right,
5533  const VectorizedArray<float, 8> &true_values,
5534  const VectorizedArray<float, 8> &false_values)
5535 {
5536  const auto mask =
5537  _mm256_cmp_ps(left.data, right.data, static_cast<int>(predicate));
5538 
5540  result.data = _mm256_blendv_ps(false_values.data, true_values.data, mask);
5541  return result;
5542 }
5543 
5544 
5545 template <SIMDComparison predicate>
5548  const VectorizedArray<double, 4> &right,
5549  const VectorizedArray<double, 4> &true_values,
5550  const VectorizedArray<double, 4> &false_values)
5551 {
5552  const auto mask =
5553  _mm256_cmp_pd(left.data, right.data, static_cast<int>(predicate));
5554 
5556  result.data = _mm256_blendv_pd(false_values.data, true_values.data, mask);
5557  return result;
5558 }
5559 
5560 # endif
5561 
5562 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__SSE2__)
5563 
5564 template <SIMDComparison predicate>
5567  const VectorizedArray<float, 4> &right,
5568  const VectorizedArray<float, 4> &true_values,
5569  const VectorizedArray<float, 4> &false_values)
5570 {
5571  __m128 mask;
5572  switch (predicate)
5573  {
5574  case SIMDComparison::equal:
5575  mask = _mm_cmpeq_ps(left.data, right.data);
5576  break;
5578  mask = _mm_cmpneq_ps(left.data, right.data);
5579  break;
5581  mask = _mm_cmplt_ps(left.data, right.data);
5582  break;
5584  mask = _mm_cmple_ps(left.data, right.data);
5585  break;
5587  mask = _mm_cmpgt_ps(left.data, right.data);
5588  break;
5590  mask = _mm_cmpge_ps(left.data, right.data);
5591  break;
5592  }
5593 
5595  result.data = _mm_or_ps(_mm_and_ps(mask, true_values.data),
5596  _mm_andnot_ps(mask, false_values.data));
5597 
5598  return result;
5599 }
5600 
5601 
5602 template <SIMDComparison predicate>
5605  const VectorizedArray<double, 2> &right,
5606  const VectorizedArray<double, 2> &true_values,
5607  const VectorizedArray<double, 2> &false_values)
5608 {
5609  __m128d mask;
5610  switch (predicate)
5611  {
5612  case SIMDComparison::equal:
5613  mask = _mm_cmpeq_pd(left.data, right.data);
5614  break;
5616  mask = _mm_cmpneq_pd(left.data, right.data);
5617  break;
5619  mask = _mm_cmplt_pd(left.data, right.data);
5620  break;
5622  mask = _mm_cmple_pd(left.data, right.data);
5623  break;
5625  mask = _mm_cmpgt_pd(left.data, right.data);
5626  break;
5628  mask = _mm_cmpge_pd(left.data, right.data);
5629  break;
5630  }
5631 
5633  result.data = _mm_or_pd(_mm_and_pd(mask, true_values.data),
5634  _mm_andnot_pd(mask, false_values.data));
5635 
5636  return result;
5637 }
5638 
5639 # endif
5640 #endif // DOXYGEN
5641 
5642 
5643 namespace internal
5644 {
5645  template <typename T>
5647  {
5651  using value_type = T;
5652 
5656  static constexpr std::size_t
5658  {
5659  return 1;
5660  }
5661 
5666 
5673  static constexpr std::size_t
5675  {
5676  return vectorized_value_type::size();
5677  }
5678 
5682  static value_type &
5683  get(value_type &value, unsigned int c)
5684  {
5685  AssertIndexRange(c, 1);
5686  (void)c;
5687 
5688  return value;
5689  }
5690 
5694  static const value_type &
5695  get(const value_type &value, unsigned int c)
5696  {
5697  AssertIndexRange(c, 1);
5698  (void)c;
5699 
5700  return value;
5701  }
5702 
5706  static value_type &
5708  {
5709  AssertIndexRange(c, stride());
5710 
5711  return values[c];
5712  }
5713 
5718  static const value_type &
5720  {
5721  AssertIndexRange(c, stride());
5722 
5723  return values[c];
5724  }
5725  };
5726 
5727  template <typename T, std::size_t width_>
5729  {
5733  using value_type = T;
5734 
5738  static constexpr std::size_t
5740  {
5741  return width_;
5742  }
5743 
5748 
5756  static constexpr std::size_t
5758  {
5759  return 1;
5760  }
5761 
5765  static value_type &
5767  {
5768  AssertIndexRange(c, width_);
5769 
5770  return values[c];
5771  }
5772 
5776  static const value_type &
5777  get(const vectorized_value_type &values, unsigned int c)
5778  {
5779  AssertIndexRange(c, width_);
5780 
5781  return values[c];
5782  }
5783 
5787  static vectorized_value_type &
5789  {
5790  (void)c;
5791  AssertIndexRange(c, stride());
5792 
5793  return values;
5794  }
5795 
5800  static const vectorized_value_type &
5802  {
5803  (void)c;
5804  AssertIndexRange(c, stride());
5805 
5806  return values;
5807  }
5808  };
5809 } // namespace internal
5810 
5811 
5813 
5820 namespace std
5821 {
5829  template <typename Number, std::size_t width>
5830  inline ::VectorizedArray<Number, width>
5831  sin(const ::VectorizedArray<Number, width> &x)
5832  {
5833  // put values in an array and later read in that array with an unaligned
5834  // read. This should save some instructions as compared to directly
5835  // setting the individual elements and also circumvents a compiler
5836  // optimization bug in gcc-4.6 with SSE2 (see also deal.II developers list
5837  // from April 2014, topic "matrix_free/step-48 Test").
5839  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5840  ++i)
5841  values[i] = std::sin(x[i]);
5843  out.load(&values[0]);
5844  return out;
5845  }
5846 
5847 
5848 
5856  template <typename Number, std::size_t width>
5857  inline ::VectorizedArray<Number, width>
5858  cos(const ::VectorizedArray<Number, width> &x)
5859  {
5861  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5862  ++i)
5863  values[i] = std::cos(x[i]);
5865  out.load(&values[0]);
5866  return out;
5867  }
5868 
5869 
5870 
5878  template <typename Number, std::size_t width>
5879  inline ::VectorizedArray<Number, width>
5880  tan(const ::VectorizedArray<Number, width> &x)
5881  {
5883  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5884  ++i)
5885  values[i] = std::tan(x[i]);
5887  out.load(&values[0]);
5888  return out;
5889  }
5890 
5891 
5892 
5900  template <typename Number, std::size_t width>
5901  inline ::VectorizedArray<Number, width>
5902  exp(const ::VectorizedArray<Number, width> &x)
5903  {
5905  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5906  ++i)
5907  values[i] = std::exp(x[i]);
5909  out.load(&values[0]);
5910  return out;
5911  }
5912 
5913 
5914 
5922  template <typename Number, std::size_t width>
5923  inline ::VectorizedArray<Number, width>
5924  log(const ::VectorizedArray<Number, width> &x)
5925  {
5927  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5928  ++i)
5929  values[i] = std::log(x[i]);
5931  out.load(&values[0]);
5932  return out;
5933  }
5934 
5935 
5936 
5944  template <typename Number, std::size_t width>
5945  inline ::VectorizedArray<Number, width>
5946  sqrt(const ::VectorizedArray<Number, width> &x)
5947  {
5948  return x.get_sqrt();
5949  }
5950 
5951 
5952 
5960  template <typename Number, std::size_t width>
5961  inline ::VectorizedArray<Number, width>
5962  pow(const ::VectorizedArray<Number, width> &x, const Number p)
5963  {
5965  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5966  ++i)
5967  values[i] = std::pow(x[i], p);
5969  out.load(&values[0]);
5970  return out;
5971  }
5972 
5973 
5974 
5983  template <typename Number, std::size_t width>
5984  inline ::VectorizedArray<Number, width>
5985  pow(const ::VectorizedArray<Number, width> &x,
5986  const ::VectorizedArray<Number, width> &p)
5987  {
5989  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5990  ++i)
5991  values[i] = std::pow(x[i], p[i]);
5993  out.load(&values[0]);
5994  return out;
5995  }
5996 
5997 
5998 
6006  template <typename Number, std::size_t width>
6007  inline ::VectorizedArray<Number, width>
6008  abs(const ::VectorizedArray<Number, width> &x)
6009  {
6010  return x.get_abs();
6011  }
6012 
6013 
6014 
6022  template <typename Number, std::size_t width>
6023  inline ::VectorizedArray<Number, width>
6024  max(const ::VectorizedArray<Number, width> &x,
6025  const ::VectorizedArray<Number, width> &y)
6026  {
6027  return x.get_max(y);
6028  }
6029 
6030 
6031 
6039  template <typename Number, std::size_t width>
6040  inline ::VectorizedArray<Number, width>
6041  min(const ::VectorizedArray<Number, width> &x,
6042  const ::VectorizedArray<Number, width> &y)
6043  {
6044  return x.get_min(y);
6045  }
6046 
6047 
6048 
6052  template <class T>
6053  struct iterator_traits<::VectorizedArrayIterator<T>>
6054  {
6055  using iterator_category = random_access_iterator_tag;
6056  using value_type = typename T::value_type;
6057  using difference_type = std::ptrdiff_t;
6058  };
6059 
6060 } // namespace std
6061 
6062 #endif
OutputOperator< VectorType > & operator<<(OutputOperator< VectorType > &out, unsigned int step)
Definition: operator.h:165
VectorizedArrayBase()=default
VectorizedArrayIterator< const T > begin() const
VectorizedArrayIterator< const T > end() const
VectorizedArrayIterator< T > end()
VectorizedArrayIterator< T > begin()
static constexpr std::size_t size()
VectorizedArrayBase(const std::initializer_list< U > &list)
VectorizedArrayIterator< T > operator+(const std::size_t &offset) const
VectorizedArrayIterator< T > & operator--()
std::enable_if_t<!std::is_same< U, const U >::value, typename T::value_type > & operator*()
VectorizedArrayIterator< T > & operator=(const VectorizedArrayIterator< T > &other)=default
std::ptrdiff_t operator-(const VectorizedArrayIterator< T > &other) const
bool operator==(const VectorizedArrayIterator< T > &other) const
VectorizedArrayIterator(T &data, const std::size_t lane)
VectorizedArrayIterator< T > & operator+=(const std::size_t offset)
VectorizedArrayIterator< T > & operator++()
bool operator!=(const VectorizedArrayIterator< T > &other) const
const T::value_type & operator*() const
VectorizedArray< Number, width > operator-(const VectorizedArray< Number, width > &u)
VectorizedArray< float, width > operator+(const VectorizedArray< float, width > &v, const double u)
void gather(const Number *base_ptr, const unsigned int *offsets)
void vectorized_load_and_transpose(const unsigned int n_entries, const Number *in, const unsigned int *offsets, VectorizedArray< Number, width > *out)
VectorizedArray & operator=(const Number scalar) &&=delete
VectorizedArray< Number, width > operator+(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArrayType make_vectorized_array(const typename VectorizedArrayType::value_type &u)
VectorizedArray & operator+=(const VectorizedArray &vec)
VectorizedArray< Number, width > operator/(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArray get_abs() const
VectorizedArray< float, width > operator/(const VectorizedArray< float, width > &v, const double u)
VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &x, const ::VectorizedArray< Number, width > &y)
VectorizedArray< Number, width > log(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > operator*(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArray< Number, width > exp(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > operator-(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArray< float, width > operator-(const double u, const VectorizedArray< float, width > &v)
VectorizedArray< Number, width > operator+(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > operator+(const VectorizedArray< Number, width > &u)
VectorizedArray()=default
bool operator==(const VectorizedArray< Number, width > &lhs, const VectorizedArray< Number, width > &rhs)
VectorizedArray< Number, width > tan(const ::VectorizedArray< Number, width > &x)
VectorizedArray(const Number scalar)
VectorizedArray< Number, width > operator-(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray< float, width > operator*(const VectorizedArray< float, width > &v, const double u)
VectorizedArray get_max(const VectorizedArray &other) const
VectorizedArray & operator/=(const VectorizedArray &vec)
VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &x, const ::VectorizedArray< Number, width > &y)
VectorizedArray get_min(const VectorizedArray &other) const
VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &x, const Number p)
VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &x, const ::VectorizedArray< Number, width > &p)
VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &x)
void store(OtherNumber *ptr) const
VectorizedArray< float, width > operator-(const VectorizedArray< float, width > &v, const double u)
Number & operator[](const unsigned int comp)
void load(const OtherNumber *ptr)
void scatter(const unsigned int *offsets, Number *base_ptr) const
VectorizedArray & operator*=(const VectorizedArray &vec)
VectorizedArray< Number, width > operator-(const Number &u, const VectorizedArray< Number, width > &v)
const Number & operator[](const unsigned int comp) const
VectorizedArray< Number, width > operator*(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray< float, width > operator+(const double u, const VectorizedArray< float, width > &v)
VectorizedArray< Number, width > operator*(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray get_sqrt() const
VectorizedArray< Number, width > operator/(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > make_vectorized_array(const Number &u)
VectorizedArray< Number, width > operator/(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > operator+(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > cos(const ::VectorizedArray< Number, width > &x)
VectorizedArray & operator=(const Number scalar) &
VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &x)
void streaming_store(Number *ptr) const
VectorizedArray(const std::initializer_list< U > &list)
void vectorized_transpose_and_store(const bool add_into, const unsigned int n_entries, const VectorizedArray< Number, width > *in, const unsigned int *offsets, Number *out)
VectorizedArray< float, width > operator/(const double u, const VectorizedArray< float, width > &v)
VectorizedArray< float, width > operator*(const double u, const VectorizedArray< float, width > &v)
VectorizedArray & operator-=(const VectorizedArray &vec)
#define DEAL_II_ALWAYS_INLINE
Definition: config.h:108
#define DEAL_II_OPENMP_SIMD_PRAGMA
Definition: config.h:140
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:474
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:475
const unsigned int v0
Definition: grid_tools.cc:1067
const unsigned int v1
Definition: grid_tools.cc:1067
__global__ void vec_add(Number *val, const Number a, const size_type N)
#define Assert(cond, exc)
Definition: exceptions.h:1614
#define AssertIndexRange(index, range)
Definition: exceptions.h:1855
static ::ExceptionBase & ExcMessage(std::string arg1)
Expression fabs(const Expression &x)
static const types::blas_int zero
static const char T
::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
static vectorized_value_type & get_from_vectorized(vectorized_value_type &values, unsigned int c)
static value_type & get(vectorized_value_type &values, unsigned int c)
static const value_type & get(const vectorized_value_type &values, unsigned int c)
static const vectorized_value_type & get_from_vectorized(const vectorized_value_type &values, unsigned int c)
static constexpr std::size_t width()
static constexpr std::size_t stride()
VectorizedArray< T > vectorized_value_type
static value_type & get_from_vectorized(vectorized_value_type &values, unsigned int c)
static value_type & get(value_type &value, unsigned int c)
static const value_type & get_from_vectorized(const vectorized_value_type &values, unsigned int c)
static const value_type & get(const value_type &value, unsigned int c)
void gather(VectorizedArray< Number, width > &out, const std::array< Number *, width > &ptrs, const unsigned int offset)
SIMDComparison
Number compare_and_apply_mask(const Number &left, const Number &right, const Number &true_value, const Number &false_value)