Reference documentation for deal.II version GIT 196b8d1939 2022-07-03 13:35:01+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
vectorization.h
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2011 - 2021 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #ifndef dealii_vectorization_h
18 #define dealii_vectorization_h
19 
20 #include <deal.II/base/config.h>
21 
24 
25 #include <array>
26 #include <cmath>
27 
28 // Note:
29 // The flag DEAL_II_VECTORIZATION_WIDTH_IN_BITS is essentially constructed
30 // according to the following scheme (on x86-based architectures)
31 // #ifdef __AVX512F__
32 // #define DEAL_II_VECTORIZATION_WIDTH_IN_BITS 512
33 // #elif defined (__AVX__)
34 // #define DEAL_II_VECTORIZATION_WIDTH_IN_BITS 256
35 // #elif defined (__SSE2__)
36 // #define DEAL_II_VECTORIZATION_WIDTH_IN_BITS 128
37 // #else
38 // #define DEAL_II_VECTORIZATION_WIDTH_IN_BITS 0
39 // #endif
40 // In addition to checking the flags __AVX512F__, __AVX__ and __SSE2__, a CMake
41 // test, 'check_01_cpu_features.cmake', ensures that these feature are not only
42 // present in the compilation unit but also working properly.
43 
44 #if DEAL_II_VECTORIZATION_WIDTH_IN_BITS > 0
45 
46 // These error messages try to detect the case that deal.II was compiled with
47 // a wider instruction set extension as the current compilation unit, for
48 // example because deal.II was compiled with AVX, but a user project does not
49 // add -march=native or similar flags, making it fall to SSE2. This leads to
50 // very strange errors as the size of data structures differs between the
51 // compiled deal.II code sitting in libdeal_II.so and the user code if not
52 // detected.
53 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && !defined(__AVX__)
54 # error \
55  "Mismatch in vectorization capabilities: AVX was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native."
56 # endif
57 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && !defined(__AVX512F__)
58 # error \
59  "Mismatch in vectorization capabilities: AVX-512F was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native."
60 # endif
61 
62 # ifdef _MSC_VER
63 # include <intrin.h>
64 # elif defined(__ALTIVEC__)
65 # include <altivec.h>
66 
67 // altivec.h defines vector, pixel, bool, but we do not use them, so undefine
68 // them before they make trouble
69 # undef vector
70 # undef pixel
71 # undef bool
72 # else
73 # include <x86intrin.h>
74 # endif
75 
76 #endif
77 
78 
80 
81 
82 // Enable the EnableIfScalar type trait for VectorizedArray<Number> such
83 // that it can be used as a Number type in Tensor<rank,dim,Number>, etc.
84 
85 template <typename Number, std::size_t width>
86 struct EnableIfScalar<VectorizedArray<Number, width>>
87 {
89 };
90 
91 
92 
96 template <typename T>
98 {
99 public:
106  VectorizedArrayIterator(T &data, const std::size_t lane)
107  : data(&data)
108  , lane(lane)
109  {}
110 
114  bool
116  {
117  Assert(this->data == other.data,
118  ExcMessage(
119  "You are trying to compare iterators into different arrays."));
120  return this->lane == other.lane;
121  }
122 
126  bool
128  {
129  Assert(this->data == other.data,
130  ExcMessage(
131  "You are trying to compare iterators into different arrays."));
132  return this->lane != other.lane;
133  }
134 
139  operator=(const VectorizedArrayIterator<T> &other) = default;
140 
145  const typename T::value_type &
146  operator*() const
147  {
148  AssertIndexRange(lane, T::size());
149  return (*data)[lane];
150  }
151 
152 
157  template <typename U = T>
158  typename std::enable_if<!std::is_same<U, const U>::value,
159  typename T::value_type>::type &
161  {
162  AssertIndexRange(lane, T::size());
163  return (*data)[lane];
164  }
165 
173  {
174  AssertIndexRange(lane + 1, T::size() + 1);
175  lane++;
176  return *this;
177  }
178 
184  operator+=(const std::size_t offset)
185  {
186  AssertIndexRange(lane + offset, T::size() + 1);
187  lane += offset;
188  return *this;
189  }
190 
198  {
199  Assert(
200  lane > 0,
201  ExcMessage(
202  "You can't decrement an iterator that is already at the beginning of the range."));
203  --lane;
204  return *this;
205  }
206 
211  operator+(const std::size_t &offset) const
212  {
213  AssertIndexRange(lane + offset, T::size() + 1);
214  return VectorizedArrayIterator<T>(*data, lane + offset);
215  }
216 
220  std::ptrdiff_t
222  {
223  return static_cast<std::ptrdiff_t>(lane) -
224  static_cast<ptrdiff_t>(other.lane);
225  }
226 
227 private:
231  T *data;
232 
236  std::size_t lane;
237 };
238 
239 
240 
250 template <typename T, std::size_t width>
252 {
253 public:
257  VectorizedArrayBase() = default;
258 
262  template <typename U>
263  VectorizedArrayBase(const std::initializer_list<U> &list)
264  {
265  auto i0 = this->begin();
266  auto i1 = list.begin();
267 
268  for (; i1 != list.end(); ++i0, ++i1)
269  {
270  Assert(
271  i0 != this->end(),
272  ExcMessage(
273  "Initializer list exceeds size of this VectorizedArray object."));
274 
275  *i0 = *i1;
276  }
277 
278  for (; i0 != this->end(); ++i0)
279  {
280  *i0 = 0.0;
281  }
282  }
283 
287  static constexpr std::size_t
289  {
290  return width;
291  }
292 
298  {
299  return VectorizedArrayIterator<T>(static_cast<T &>(*this), 0);
300  }
301 
307  begin() const
308  {
309  return VectorizedArrayIterator<const T>(static_cast<const T &>(*this), 0);
310  }
311 
316  end()
317  {
318  return VectorizedArrayIterator<T>(static_cast<T &>(*this), width);
319  }
320 
326  end() const
327  {
328  return VectorizedArrayIterator<const T>(static_cast<const T &>(*this),
329  width);
330  }
331 };
332 
333 
334 
419 template <typename Number, std::size_t width>
421  : public VectorizedArrayBase<VectorizedArray<Number, width>, 1>
422 {
423 public:
427  using value_type = Number;
428 
429  static_assert(width == 1,
430  "You specified an illegal width that is not supported.");
431 
436  VectorizedArray() = default;
437 
441  VectorizedArray(const Number scalar)
442  {
443  this->operator=(scalar);
444  }
445 
449  template <typename U>
450  VectorizedArray(const std::initializer_list<U> &list)
451  : VectorizedArrayBase<VectorizedArray<Number, width>, 1>(list)
452  {}
453 
459  operator=(const Number scalar)
460  {
461  data = scalar;
462  return *this;
463  }
464 
470  Number &
471  operator[](const unsigned int comp)
472  {
473  (void)comp;
474  AssertIndexRange(comp, 1);
475  return data;
476  }
477 
483  const Number &
484  operator[](const unsigned int comp) const
485  {
486  (void)comp;
487  AssertIndexRange(comp, 1);
488  return data;
489  }
490 
497  {
498  data += vec.data;
499  return *this;
500  }
501 
508  {
509  data -= vec.data;
510  return *this;
511  }
512 
519  {
520  data *= vec.data;
521  return *this;
522  }
523 
530  {
531  data /= vec.data;
532  return *this;
533  }
534 
542  void
543  load(const Number *ptr)
544  {
545  data = *ptr;
546  }
547 
555  void
556  store(Number *ptr) const
557  {
558  *ptr = data;
559  }
560 
608  void
609  streaming_store(Number *ptr) const
610  {
611  *ptr = data;
612  }
613 
627  void
628  gather(const Number *base_ptr, const unsigned int *offsets)
629  {
630  data = base_ptr[offsets[0]];
631  }
632 
646  void
647  scatter(const unsigned int *offsets, Number *base_ptr) const
648  {
649  base_ptr[offsets[0]] = data;
650  }
651 
657  Number data;
658 
659 private:
666  get_sqrt() const
667  {
668  VectorizedArray res;
669  res.data = std::sqrt(data);
670  return res;
671  }
672 
679  get_abs() const
680  {
681  VectorizedArray res;
682  res.data = std::fabs(data);
683  return res;
684  }
685 
692  get_max(const VectorizedArray &other) const
693  {
694  VectorizedArray res;
695  res.data = std::max(data, other.data);
696  return res;
697  }
698 
705  get_min(const VectorizedArray &other) const
706  {
707  VectorizedArray res;
708  res.data = std::min(data, other.data);
709  return res;
710  }
711 
712  // Make a few functions friends.
713  template <typename Number2, std::size_t width2>
716  template <typename Number2, std::size_t width2>
719  template <typename Number2, std::size_t width2>
723  template <typename Number2, std::size_t width2>
727 };
728 
729 
730 
735 
736 
743 template <typename Number,
744  std::size_t width =
747  make_vectorized_array(const Number &u)
748 {
750  return result;
751 }
752 
753 
754 
761 template <typename VectorizedArrayType>
762 inline DEAL_II_ALWAYS_INLINE VectorizedArrayType
763 make_vectorized_array(const typename VectorizedArrayType::value_type &u)
764 {
765  static_assert(
766  std::is_same<VectorizedArrayType,
767  VectorizedArray<typename VectorizedArrayType::value_type,
768  VectorizedArrayType::size()>>::value,
769  "VectorizedArrayType is not a VectorizedArray.");
770 
771  VectorizedArrayType result = u;
772  return result;
773 }
774 
775 
776 
788 template <typename Number, std::size_t width>
789 inline DEAL_II_ALWAYS_INLINE void
791  const std::array<Number *, width> &ptrs,
792  const unsigned int offset)
793 {
794  for (unsigned int v = 0; v < width; ++v)
795  out.data[v] = ptrs[v][offset];
796 }
797 
798 
799 
825 template <typename Number, std::size_t width>
826 inline DEAL_II_ALWAYS_INLINE void
827 vectorized_load_and_transpose(const unsigned int n_entries,
828  const Number * in,
829  const unsigned int * offsets,
831 {
832  for (unsigned int i = 0; i < n_entries; ++i)
833  for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
834  out[i][v] = in[offsets[v] + i];
835 }
836 
837 
849 template <typename Number, std::size_t width>
850 inline DEAL_II_ALWAYS_INLINE void
851 vectorized_load_and_transpose(const unsigned int n_entries,
852  const std::array<Number *, width> &in,
854 {
855  for (unsigned int i = 0; i < n_entries; ++i)
856  for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
857  out[i][v] = in[v][i];
858 }
859 
860 
861 
900 template <typename Number, std::size_t width>
901 inline DEAL_II_ALWAYS_INLINE void
902 vectorized_transpose_and_store(const bool add_into,
903  const unsigned int n_entries,
905  const unsigned int * offsets,
906  Number * out)
907 {
908  if (add_into)
909  for (unsigned int i = 0; i < n_entries; ++i)
910  for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
911  out[offsets[v] + i] += in[i][v];
912  else
913  for (unsigned int i = 0; i < n_entries; ++i)
914  for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
915  out[offsets[v] + i] = in[i][v];
916 }
917 
918 
930 template <typename Number, std::size_t width>
931 inline DEAL_II_ALWAYS_INLINE void
932 vectorized_transpose_and_store(const bool add_into,
933  const unsigned int n_entries,
935  std::array<Number *, width> & out)
936 {
937  if (add_into)
938  for (unsigned int i = 0; i < n_entries; ++i)
939  for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
940  out[v][i] += in[i][v];
941  else
942  for (unsigned int i = 0; i < n_entries; ++i)
943  for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
944  out[v][i] = in[i][v];
945 }
946 
947 
949 
950 #ifndef DOXYGEN
951 
952 // for safety, also check that __AVX512F__ is defined in case the user manually
953 // set some conflicting compile flags which prevent compilation
954 
955 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && defined(__AVX512F__)
956 
960 template <>
961 class VectorizedArray<double, 8>
962  : public VectorizedArrayBase<VectorizedArray<double, 8>, 8>
963 {
964 public:
968  using value_type = double;
969 
974  VectorizedArray() = default;
975 
979  VectorizedArray(const double scalar)
980  {
981  this->operator=(scalar);
982  }
983 
987  template <typename U>
988  VectorizedArray(const std::initializer_list<U> &list)
990  {}
991 
997  operator=(const double x)
998  {
999  data = _mm512_set1_pd(x);
1000  return *this;
1001  }
1002 
1007  double &
1008  operator[](const unsigned int comp)
1009  {
1010  AssertIndexRange(comp, 8);
1011  return *(reinterpret_cast<double *>(&data) + comp);
1012  }
1013 
1018  const double &
1019  operator[](const unsigned int comp) const
1020  {
1021  AssertIndexRange(comp, 8);
1022  return *(reinterpret_cast<const double *>(&data) + comp);
1023  }
1024 
1029  VectorizedArray &
1030  operator+=(const VectorizedArray &vec)
1031  {
1032  // if the compiler supports vector arithmetic, we can simply use +=
1033  // operator on the given data type. this allows the compiler to combine
1034  // additions with multiplication (fused multiply-add) if those
1035  // instructions are available. Otherwise, we need to use the built-in
1036  // intrinsic command for __m512d
1037 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1038  data += vec.data;
1039 # else
1040  data = _mm512_add_pd(data, vec.data);
1041 # endif
1042  return *this;
1043  }
1044 
1049  VectorizedArray &
1050  operator-=(const VectorizedArray &vec)
1051  {
1052 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1053  data -= vec.data;
1054 # else
1055  data = _mm512_sub_pd(data, vec.data);
1056 # endif
1057  return *this;
1058  }
1063  VectorizedArray &
1064  operator*=(const VectorizedArray &vec)
1065  {
1066 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1067  data *= vec.data;
1068 # else
1069  data = _mm512_mul_pd(data, vec.data);
1070 # endif
1071  return *this;
1072  }
1073 
1078  VectorizedArray &
1079  operator/=(const VectorizedArray &vec)
1080  {
1081 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1082  data /= vec.data;
1083 # else
1084  data = _mm512_div_pd(data, vec.data);
1085 # endif
1086  return *this;
1087  }
1088 
1095  void
1096  load(const double *ptr)
1097  {
1098  data = _mm512_loadu_pd(ptr);
1099  }
1100 
1108  void
1109  store(double *ptr) const
1110  {
1111  _mm512_storeu_pd(ptr, data);
1112  }
1113 
1119  void
1120  streaming_store(double *ptr) const
1121  {
1122  Assert(reinterpret_cast<std::size_t>(ptr) % 64 == 0,
1123  ExcMessage("Memory not aligned"));
1124  _mm512_stream_pd(ptr, data);
1125  }
1126 
1140  void
1141  gather(const double *base_ptr, const unsigned int *offsets)
1142  {
1143  // unfortunately, there does not appear to be a 256 bit integer load, so
1144  // do it by some reinterpret casts here. this is allowed because the Intel
1145  // API allows aliasing between different vector types.
1146  const __m256 index_val =
1147  _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
1148  const __m256i index = *reinterpret_cast<const __m256i *>(&index_val);
1149 
1150  // work around a warning with gcc-12 about an uninitialized initial state
1151  // for gather by starting with a zero guess, even though all lanes will be
1152  // overwritten
1153  __m512d zero = {};
1154  __mmask8 mask = 0xFF;
1155 
1156  data = _mm512_mask_i32gather_pd(zero, mask, index, base_ptr, 8);
1157  }
1158 
1172  void
1173  scatter(const unsigned int *offsets, double *base_ptr) const
1174  {
1175  for (unsigned int i = 0; i < 8; ++i)
1176  for (unsigned int j = i + 1; j < 8; ++j)
1177  Assert(offsets[i] != offsets[j],
1178  ExcMessage("Result of scatter undefined if two offset elements"
1179  " point to the same position"));
1180 
1181  // unfortunately, there does not appear to be a 256 bit integer load, so
1182  // do it by some reinterpret casts here. this is allowed because the Intel
1183  // API allows aliasing between different vector types.
1184  const __m256 index_val =
1185  _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
1186  const __m256i index = *reinterpret_cast<const __m256i *>(&index_val);
1187  _mm512_i32scatter_pd(base_ptr, index, data, 8);
1188  }
1189 
1195  __m512d data;
1196 
1197 private:
1204  get_sqrt() const
1205  {
1206  VectorizedArray res;
1207  res.data = _mm512_sqrt_pd(data);
1208  return res;
1209  }
1210 
1217  get_abs() const
1218  {
1219  // to compute the absolute value, perform bitwise andnot with -0. This
1220  // will leave all value and exponent bits unchanged but force the sign
1221  // value to +. Since there is no andnot for AVX512, we interpret the data
1222  // as 64 bit integers and do the andnot on those types (note that andnot
1223  // is a bitwise operation so the data type does not matter)
1224  __m512d mask = _mm512_set1_pd(-0.);
1225  VectorizedArray res;
1226  res.data = reinterpret_cast<__m512d>(
1227  _mm512_andnot_epi64(reinterpret_cast<__m512i>(mask),
1228  reinterpret_cast<__m512i>(data)));
1229  return res;
1230  }
1231 
1238  get_max(const VectorizedArray &other) const
1239  {
1240  VectorizedArray res;
1241  res.data = _mm512_max_pd(data, other.data);
1242  return res;
1243  }
1244 
1251  get_min(const VectorizedArray &other) const
1252  {
1253  VectorizedArray res;
1254  res.data = _mm512_min_pd(data, other.data);
1255  return res;
1256  }
1257 
1258  // Make a few functions friends.
1259  template <typename Number2, std::size_t width2>
1262  template <typename Number2, std::size_t width2>
1265  template <typename Number2, std::size_t width2>
1269  template <typename Number2, std::size_t width2>
1273 };
1274 
1275 
1276 
1280 template <>
1281 inline DEAL_II_ALWAYS_INLINE void
1282 vectorized_load_and_transpose(const unsigned int n_entries,
1283  const double * in,
1284  const unsigned int * offsets,
1286 {
1287  // do not do full transpose because the code is long and will most
1288  // likely not pay off because many processors have two load units
1289  // (for the top 8 instructions) but only 1 permute unit (for the 8
1290  // shuffle/unpack instructions). rather start the transposition on the
1291  // vectorized array of half the size with 256 bits
1292  const unsigned int n_chunks = n_entries / 4;
1293  for (unsigned int i = 0; i < n_chunks; ++i)
1294  {
1295  __m512d t0, t1, t2, t3 = {};
1296 
1297  t0 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[0] + 4 * i), 0);
1298  t0 = _mm512_insertf64x4(t0, _mm256_loadu_pd(in + offsets[2] + 4 * i), 1);
1299  t1 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[1] + 4 * i), 0);
1300  t1 = _mm512_insertf64x4(t1, _mm256_loadu_pd(in + offsets[3] + 4 * i), 1);
1301  t2 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[4] + 4 * i), 0);
1302  t2 = _mm512_insertf64x4(t2, _mm256_loadu_pd(in + offsets[6] + 4 * i), 1);
1303  t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[5] + 4 * i), 0);
1304  t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[7] + 4 * i), 1);
1305 
1306  __m512d v0 = _mm512_shuffle_f64x2(t0, t2, 0x88);
1307  __m512d v1 = _mm512_shuffle_f64x2(t0, t2, 0xdd);
1308  __m512d v2 = _mm512_shuffle_f64x2(t1, t3, 0x88);
1309  __m512d v3 = _mm512_shuffle_f64x2(t1, t3, 0xdd);
1310  out[4 * i + 0].data = _mm512_unpacklo_pd(v0, v2);
1311  out[4 * i + 1].data = _mm512_unpackhi_pd(v0, v2);
1312  out[4 * i + 2].data = _mm512_unpacklo_pd(v1, v3);
1313  out[4 * i + 3].data = _mm512_unpackhi_pd(v1, v3);
1314  }
1315  // remainder loop of work that does not divide by 4
1316  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1317  out[i].gather(in + i, offsets);
1318 }
1319 
1320 
1321 
1325 template <>
1326 inline DEAL_II_ALWAYS_INLINE void
1327 vectorized_load_and_transpose(const unsigned int n_entries,
1328  const std::array<double *, 8> &in,
1330 {
1331  const unsigned int n_chunks = n_entries / 4;
1332  for (unsigned int i = 0; i < n_chunks; ++i)
1333  {
1334  __m512d t0, t1, t2, t3 = {};
1335 
1336  t0 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[0] + 4 * i), 0);
1337  t0 = _mm512_insertf64x4(t0, _mm256_loadu_pd(in[2] + 4 * i), 1);
1338  t1 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[1] + 4 * i), 0);
1339  t1 = _mm512_insertf64x4(t1, _mm256_loadu_pd(in[3] + 4 * i), 1);
1340  t2 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[4] + 4 * i), 0);
1341  t2 = _mm512_insertf64x4(t2, _mm256_loadu_pd(in[6] + 4 * i), 1);
1342  t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[5] + 4 * i), 0);
1343  t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[7] + 4 * i), 1);
1344 
1345  __m512d v0 = _mm512_shuffle_f64x2(t0, t2, 0x88);
1346  __m512d v1 = _mm512_shuffle_f64x2(t0, t2, 0xdd);
1347  __m512d v2 = _mm512_shuffle_f64x2(t1, t3, 0x88);
1348  __m512d v3 = _mm512_shuffle_f64x2(t1, t3, 0xdd);
1349  out[4 * i + 0].data = _mm512_unpacklo_pd(v0, v2);
1350  out[4 * i + 1].data = _mm512_unpackhi_pd(v0, v2);
1351  out[4 * i + 2].data = _mm512_unpacklo_pd(v1, v3);
1352  out[4 * i + 3].data = _mm512_unpackhi_pd(v1, v3);
1353  }
1354 
1355  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1356  gather(out[i], in, i);
1357 }
1358 
1359 
1360 
1364 template <>
1365 inline DEAL_II_ALWAYS_INLINE void
1366 vectorized_transpose_and_store(const bool add_into,
1367  const unsigned int n_entries,
1368  const VectorizedArray<double, 8> *in,
1369  const unsigned int * offsets,
1370  double * out)
1371 {
1372  // as for the load, we split the store operations into 256 bit units to
1373  // better balance between code size, shuffle instructions, and stores
1374  const unsigned int n_chunks = n_entries / 4;
1375  __m512i mask1 = _mm512_set_epi64(0xd, 0xc, 0x5, 0x4, 0x9, 0x8, 0x1, 0x0);
1376  __m512i mask2 = _mm512_set_epi64(0xf, 0xe, 0x7, 0x6, 0xb, 0xa, 0x3, 0x2);
1377  for (unsigned int i = 0; i < n_chunks; ++i)
1378  {
1379  __m512d t0 = _mm512_unpacklo_pd(in[i * 4].data, in[i * 4 + 1].data);
1380  __m512d t1 = _mm512_unpackhi_pd(in[i * 4].data, in[i * 4 + 1].data);
1381  __m512d t2 = _mm512_unpacklo_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
1382  __m512d t3 = _mm512_unpackhi_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
1383  __m512d v0 = _mm512_permutex2var_pd(t0, mask1, t2);
1384  __m512d v1 = _mm512_permutex2var_pd(t0, mask2, t2);
1385  __m512d v2 = _mm512_permutex2var_pd(t1, mask1, t3);
1386  __m512d v3 = _mm512_permutex2var_pd(t1, mask2, t3);
1387  __m256d res0 = _mm512_extractf64x4_pd(v0, 0);
1388  __m256d res4 = _mm512_extractf64x4_pd(v0, 1);
1389  __m256d res1 = _mm512_extractf64x4_pd(v2, 0);
1390  __m256d res5 = _mm512_extractf64x4_pd(v2, 1);
1391  __m256d res2 = _mm512_extractf64x4_pd(v1, 0);
1392  __m256d res6 = _mm512_extractf64x4_pd(v1, 1);
1393  __m256d res3 = _mm512_extractf64x4_pd(v3, 0);
1394  __m256d res7 = _mm512_extractf64x4_pd(v3, 1);
1395 
1396  // Cannot use the same store instructions in both paths of the 'if'
1397  // because the compiler cannot know that there is no aliasing
1398  // between pointers
1399  if (add_into)
1400  {
1401  res0 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[0]), res0);
1402  _mm256_storeu_pd(out + 4 * i + offsets[0], res0);
1403  res1 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[1]), res1);
1404  _mm256_storeu_pd(out + 4 * i + offsets[1], res1);
1405  res2 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[2]), res2);
1406  _mm256_storeu_pd(out + 4 * i + offsets[2], res2);
1407  res3 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[3]), res3);
1408  _mm256_storeu_pd(out + 4 * i + offsets[3], res3);
1409  res4 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[4]), res4);
1410  _mm256_storeu_pd(out + 4 * i + offsets[4], res4);
1411  res5 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[5]), res5);
1412  _mm256_storeu_pd(out + 4 * i + offsets[5], res5);
1413  res6 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[6]), res6);
1414  _mm256_storeu_pd(out + 4 * i + offsets[6], res6);
1415  res7 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[7]), res7);
1416  _mm256_storeu_pd(out + 4 * i + offsets[7], res7);
1417  }
1418  else
1419  {
1420  _mm256_storeu_pd(out + 4 * i + offsets[0], res0);
1421  _mm256_storeu_pd(out + 4 * i + offsets[1], res1);
1422  _mm256_storeu_pd(out + 4 * i + offsets[2], res2);
1423  _mm256_storeu_pd(out + 4 * i + offsets[3], res3);
1424  _mm256_storeu_pd(out + 4 * i + offsets[4], res4);
1425  _mm256_storeu_pd(out + 4 * i + offsets[5], res5);
1426  _mm256_storeu_pd(out + 4 * i + offsets[6], res6);
1427  _mm256_storeu_pd(out + 4 * i + offsets[7], res7);
1428  }
1429  }
1430 
1431  // remainder loop of work that does not divide by 4
1432  if (add_into)
1433  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1434  for (unsigned int v = 0; v < 8; ++v)
1435  out[offsets[v] + i] += in[i][v];
1436  else
1437  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1438  for (unsigned int v = 0; v < 8; ++v)
1439  out[offsets[v] + i] = in[i][v];
1440 }
1441 
1442 
1443 
1447 template <>
1448 inline DEAL_II_ALWAYS_INLINE void
1449 vectorized_transpose_and_store(const bool add_into,
1450  const unsigned int n_entries,
1451  const VectorizedArray<double, 8> *in,
1452  std::array<double *, 8> & out)
1453 {
1454  // see the comments in the vectorized_transpose_and_store above
1455 
1456  const unsigned int n_chunks = n_entries / 4;
1457  __m512i mask1 = _mm512_set_epi64(0xd, 0xc, 0x5, 0x4, 0x9, 0x8, 0x1, 0x0);
1458  __m512i mask2 = _mm512_set_epi64(0xf, 0xe, 0x7, 0x6, 0xb, 0xa, 0x3, 0x2);
1459  for (unsigned int i = 0; i < n_chunks; ++i)
1460  {
1461  __m512d t0 = _mm512_unpacklo_pd(in[i * 4].data, in[i * 4 + 1].data);
1462  __m512d t1 = _mm512_unpackhi_pd(in[i * 4].data, in[i * 4 + 1].data);
1463  __m512d t2 = _mm512_unpacklo_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
1464  __m512d t3 = _mm512_unpackhi_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
1465  __m512d v0 = _mm512_permutex2var_pd(t0, mask1, t2);
1466  __m512d v1 = _mm512_permutex2var_pd(t0, mask2, t2);
1467  __m512d v2 = _mm512_permutex2var_pd(t1, mask1, t3);
1468  __m512d v3 = _mm512_permutex2var_pd(t1, mask2, t3);
1469  __m256d res0 = _mm512_extractf64x4_pd(v0, 0);
1470  __m256d res4 = _mm512_extractf64x4_pd(v0, 1);
1471  __m256d res1 = _mm512_extractf64x4_pd(v2, 0);
1472  __m256d res5 = _mm512_extractf64x4_pd(v2, 1);
1473  __m256d res2 = _mm512_extractf64x4_pd(v1, 0);
1474  __m256d res6 = _mm512_extractf64x4_pd(v1, 1);
1475  __m256d res3 = _mm512_extractf64x4_pd(v3, 0);
1476  __m256d res7 = _mm512_extractf64x4_pd(v3, 1);
1477 
1478  if (add_into)
1479  {
1480  res0 = _mm256_add_pd(_mm256_loadu_pd(out[0] + 4 * i), res0);
1481  _mm256_storeu_pd(out[0] + 4 * i, res0);
1482  res1 = _mm256_add_pd(_mm256_loadu_pd(out[1] + 4 * i), res1);
1483  _mm256_storeu_pd(out[1] + 4 * i, res1);
1484  res2 = _mm256_add_pd(_mm256_loadu_pd(out[2] + 4 * i), res2);
1485  _mm256_storeu_pd(out[2] + 4 * i, res2);
1486  res3 = _mm256_add_pd(_mm256_loadu_pd(out[3] + 4 * i), res3);
1487  _mm256_storeu_pd(out[3] + 4 * i, res3);
1488  res4 = _mm256_add_pd(_mm256_loadu_pd(out[4] + 4 * i), res4);
1489  _mm256_storeu_pd(out[4] + 4 * i, res4);
1490  res5 = _mm256_add_pd(_mm256_loadu_pd(out[5] + 4 * i), res5);
1491  _mm256_storeu_pd(out[5] + 4 * i, res5);
1492  res6 = _mm256_add_pd(_mm256_loadu_pd(out[6] + 4 * i), res6);
1493  _mm256_storeu_pd(out[6] + 4 * i, res6);
1494  res7 = _mm256_add_pd(_mm256_loadu_pd(out[7] + 4 * i), res7);
1495  _mm256_storeu_pd(out[7] + 4 * i, res7);
1496  }
1497  else
1498  {
1499  _mm256_storeu_pd(out[0] + 4 * i, res0);
1500  _mm256_storeu_pd(out[1] + 4 * i, res1);
1501  _mm256_storeu_pd(out[2] + 4 * i, res2);
1502  _mm256_storeu_pd(out[3] + 4 * i, res3);
1503  _mm256_storeu_pd(out[4] + 4 * i, res4);
1504  _mm256_storeu_pd(out[5] + 4 * i, res5);
1505  _mm256_storeu_pd(out[6] + 4 * i, res6);
1506  _mm256_storeu_pd(out[7] + 4 * i, res7);
1507  }
1508  }
1509 
1510  if (add_into)
1511  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1512  for (unsigned int v = 0; v < 8; ++v)
1513  out[v][i] += in[i][v];
1514  else
1515  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1516  for (unsigned int v = 0; v < 8; ++v)
1517  out[v][i] = in[i][v];
1518 }
1519 
1520 
1521 
1525 template <>
1526 class VectorizedArray<float, 16>
1527  : public VectorizedArrayBase<VectorizedArray<float, 16>, 16>
1528 {
1529 public:
1533  using value_type = float;
1534 
1539  VectorizedArray() = default;
1540 
1544  VectorizedArray(const float scalar)
1545  {
1546  this->operator=(scalar);
1547  }
1548 
1552  template <typename U>
1553  VectorizedArray(const std::initializer_list<U> &list)
1554  : VectorizedArrayBase<VectorizedArray<float, 16>, 16>(list)
1555  {}
1556 
1561  VectorizedArray &
1562  operator=(const float x)
1563  {
1564  data = _mm512_set1_ps(x);
1565  return *this;
1566  }
1567 
1572  float &
1573  operator[](const unsigned int comp)
1574  {
1575  AssertIndexRange(comp, 16);
1576  return *(reinterpret_cast<float *>(&data) + comp);
1577  }
1578 
1583  const float &
1584  operator[](const unsigned int comp) const
1585  {
1586  AssertIndexRange(comp, 16);
1587  return *(reinterpret_cast<const float *>(&data) + comp);
1588  }
1589 
1594  VectorizedArray &
1595  operator+=(const VectorizedArray &vec)
1596  {
1597  // if the compiler supports vector arithmetic, we can simply use +=
1598  // operator on the given data type. this allows the compiler to combine
1599  // additions with multiplication (fused multiply-add) if those
1600  // instructions are available. Otherwise, we need to use the built-in
1601  // intrinsic command for __m512d
1602 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1603  data += vec.data;
1604 # else
1605  data = _mm512_add_ps(data, vec.data);
1606 # endif
1607  return *this;
1608  }
1609 
1614  VectorizedArray &
1615  operator-=(const VectorizedArray &vec)
1616  {
1617 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1618  data -= vec.data;
1619 # else
1620  data = _mm512_sub_ps(data, vec.data);
1621 # endif
1622  return *this;
1623  }
1628  VectorizedArray &
1629  operator*=(const VectorizedArray &vec)
1630  {
1631 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1632  data *= vec.data;
1633 # else
1634  data = _mm512_mul_ps(data, vec.data);
1635 # endif
1636  return *this;
1637  }
1638 
1643  VectorizedArray &
1644  operator/=(const VectorizedArray &vec)
1645  {
1646 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1647  data /= vec.data;
1648 # else
1649  data = _mm512_div_ps(data, vec.data);
1650 # endif
1651  return *this;
1652  }
1653 
1660  void
1661  load(const float *ptr)
1662  {
1663  data = _mm512_loadu_ps(ptr);
1664  }
1665 
1673  void
1674  store(float *ptr) const
1675  {
1676  _mm512_storeu_ps(ptr, data);
1677  }
1678 
1684  void
1685  streaming_store(float *ptr) const
1686  {
1687  Assert(reinterpret_cast<std::size_t>(ptr) % 64 == 0,
1688  ExcMessage("Memory not aligned"));
1689  _mm512_stream_ps(ptr, data);
1690  }
1691 
1705  void
1706  gather(const float *base_ptr, const unsigned int *offsets)
1707  {
1708  // unfortunately, there does not appear to be a 512 bit integer load, so
1709  // do it by some reinterpret casts here. this is allowed because the Intel
1710  // API allows aliasing between different vector types.
1711  const __m512 index_val =
1712  _mm512_loadu_ps(reinterpret_cast<const float *>(offsets));
1713  const __m512i index = *reinterpret_cast<const __m512i *>(&index_val);
1714 
1715  // work around a warning with gcc-12 about an uninitialized initial state
1716  // for gather by starting with a zero guess, even though all lanes will be
1717  // overwritten
1718  __m512 zero = {};
1719  __mmask16 mask = 0xFFFF;
1720 
1721  data = _mm512_mask_i32gather_ps(zero, mask, index, base_ptr, 4);
1722  }
1723 
1737  void
1738  scatter(const unsigned int *offsets, float *base_ptr) const
1739  {
1740  for (unsigned int i = 0; i < 16; ++i)
1741  for (unsigned int j = i + 1; j < 16; ++j)
1742  Assert(offsets[i] != offsets[j],
1743  ExcMessage("Result of scatter undefined if two offset elements"
1744  " point to the same position"));
1745 
1746  // unfortunately, there does not appear to be a 512 bit integer load, so
1747  // do it by some reinterpret casts here. this is allowed because the Intel
1748  // API allows aliasing between different vector types.
1749  const __m512 index_val =
1750  _mm512_loadu_ps(reinterpret_cast<const float *>(offsets));
1751  const __m512i index = *reinterpret_cast<const __m512i *>(&index_val);
1752  _mm512_i32scatter_ps(base_ptr, index, data, 4);
1753  }
1754 
1760  __m512 data;
1761 
1762 private:
1769  get_sqrt() const
1770  {
1771  VectorizedArray res;
1772  res.data = _mm512_sqrt_ps(data);
1773  return res;
1774  }
1775 
1782  get_abs() const
1783  {
1784  // to compute the absolute value, perform bitwise andnot with -0. This
1785  // will leave all value and exponent bits unchanged but force the sign
1786  // value to +. Since there is no andnot for AVX512, we interpret the data
1787  // as 32 bit integers and do the andnot on those types (note that andnot
1788  // is a bitwise operation so the data type does not matter)
1789  __m512 mask = _mm512_set1_ps(-0.f);
1790  VectorizedArray res;
1791  res.data = reinterpret_cast<__m512>(
1792  _mm512_andnot_epi32(reinterpret_cast<__m512i>(mask),
1793  reinterpret_cast<__m512i>(data)));
1794  return res;
1795  }
1796 
1803  get_max(const VectorizedArray &other) const
1804  {
1805  VectorizedArray res;
1806  res.data = _mm512_max_ps(data, other.data);
1807  return res;
1808  }
1809 
1816  get_min(const VectorizedArray &other) const
1817  {
1818  VectorizedArray res;
1819  res.data = _mm512_min_ps(data, other.data);
1820  return res;
1821  }
1822 
1823  // Make a few functions friends.
1824  template <typename Number2, std::size_t width2>
1827  template <typename Number2, std::size_t width2>
1830  template <typename Number2, std::size_t width2>
1834  template <typename Number2, std::size_t width2>
1838 };
1839 
1840 
1841 
1845 template <>
1846 inline DEAL_II_ALWAYS_INLINE void
1847 vectorized_load_and_transpose(const unsigned int n_entries,
1848  const float * in,
1849  const unsigned int * offsets,
1851 {
1852  // Similar to the double case, we perform the work on smaller entities. In
1853  // this case, we start from 128 bit arrays and insert them into a full 512
1854  // bit index. This reduces the code size and register pressure because we do
1855  // shuffles on 4 numbers rather than 16.
1856  const unsigned int n_chunks = n_entries / 4;
1857 
1858  // To avoid warnings about uninitialized variables, need to initialize one
1859  // variable to a pre-exisiting value in out, which will never get used in
1860  // the end. Keep the initialization outside the loop because of a bug in
1861  // gcc-9.1 which generates a "vmovapd" instruction instead of "vmovupd" in
1862  // case t3 is initialized to zero (inside/outside of loop), see
1863  // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90991
1864  __m512 t0, t1, t2, t3;
1865  if (n_chunks > 0)
1866  t3 = out[0].data;
1867  for (unsigned int i = 0; i < n_chunks; ++i)
1868  {
1869  t0 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[0] + 4 * i), 0);
1870  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[4] + 4 * i), 1);
1871  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[8] + 4 * i), 2);
1872  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[12] + 4 * i), 3);
1873  t1 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[1] + 4 * i), 0);
1874  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[5] + 4 * i), 1);
1875  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[9] + 4 * i), 2);
1876  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[13] + 4 * i), 3);
1877  t2 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[2] + 4 * i), 0);
1878  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[6] + 4 * i), 1);
1879  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[10] + 4 * i), 2);
1880  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[14] + 4 * i), 3);
1881  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[3] + 4 * i), 0);
1882  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[7] + 4 * i), 1);
1883  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[11] + 4 * i), 2);
1884  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[15] + 4 * i), 3);
1885 
1886  __m512 v0 = _mm512_shuffle_ps(t0, t1, 0x44);
1887  __m512 v1 = _mm512_shuffle_ps(t0, t1, 0xee);
1888  __m512 v2 = _mm512_shuffle_ps(t2, t3, 0x44);
1889  __m512 v3 = _mm512_shuffle_ps(t2, t3, 0xee);
1890 
1891  out[4 * i + 0].data = _mm512_shuffle_ps(v0, v2, 0x88);
1892  out[4 * i + 1].data = _mm512_shuffle_ps(v0, v2, 0xdd);
1893  out[4 * i + 2].data = _mm512_shuffle_ps(v1, v3, 0x88);
1894  out[4 * i + 3].data = _mm512_shuffle_ps(v1, v3, 0xdd);
1895  }
1896 
1897  // remainder loop of work that does not divide by 4
1898  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1899  out[i].gather(in + i, offsets);
1900 }
1901 
1902 
1903 
1907 template <>
1908 inline DEAL_II_ALWAYS_INLINE void
1909 vectorized_load_and_transpose(const unsigned int n_entries,
1910  const std::array<float *, 16> &in,
1912 {
1913  // see the comments in the vectorized_load_and_transpose above
1914 
1915  const unsigned int n_chunks = n_entries / 4;
1916 
1917  __m512 t0, t1, t2, t3;
1918  if (n_chunks > 0)
1919  t3 = out[0].data;
1920  for (unsigned int i = 0; i < n_chunks; ++i)
1921  {
1922  t0 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[0] + 4 * i), 0);
1923  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[4] + 4 * i), 1);
1924  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[8] + 4 * i), 2);
1925  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[12] + 4 * i), 3);
1926  t1 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[1] + 4 * i), 0);
1927  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[5] + 4 * i), 1);
1928  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[9] + 4 * i), 2);
1929  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[13] + 4 * i), 3);
1930  t2 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[2] + 4 * i), 0);
1931  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[6] + 4 * i), 1);
1932  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[10] + 4 * i), 2);
1933  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[14] + 4 * i), 3);
1934  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[3] + 4 * i), 0);
1935  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[7] + 4 * i), 1);
1936  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[11] + 4 * i), 2);
1937  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[15] + 4 * i), 3);
1938 
1939  __m512 v0 = _mm512_shuffle_ps(t0, t1, 0x44);
1940  __m512 v1 = _mm512_shuffle_ps(t0, t1, 0xee);
1941  __m512 v2 = _mm512_shuffle_ps(t2, t3, 0x44);
1942  __m512 v3 = _mm512_shuffle_ps(t2, t3, 0xee);
1943 
1944  out[4 * i + 0].data = _mm512_shuffle_ps(v0, v2, 0x88);
1945  out[4 * i + 1].data = _mm512_shuffle_ps(v0, v2, 0xdd);
1946  out[4 * i + 2].data = _mm512_shuffle_ps(v1, v3, 0x88);
1947  out[4 * i + 3].data = _mm512_shuffle_ps(v1, v3, 0xdd);
1948  }
1949 
1950  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1951  gather(out[i], in, i);
1952 }
1953 
1954 
1955 
1959 template <>
1960 inline DEAL_II_ALWAYS_INLINE void
1961 vectorized_transpose_and_store(const bool add_into,
1962  const unsigned int n_entries,
1963  const VectorizedArray<float, 16> *in,
1964  const unsigned int * offsets,
1965  float * out)
1966 {
1967  const unsigned int n_chunks = n_entries / 4;
1968  for (unsigned int i = 0; i < n_chunks; ++i)
1969  {
1970  __m512 t0 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0x44);
1971  __m512 t1 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0xee);
1972  __m512 t2 =
1973  _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0x44);
1974  __m512 t3 =
1975  _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0xee);
1976  __m512 u0 = _mm512_shuffle_ps(t0, t2, 0x88);
1977  __m512 u1 = _mm512_shuffle_ps(t0, t2, 0xdd);
1978  __m512 u2 = _mm512_shuffle_ps(t1, t3, 0x88);
1979  __m512 u3 = _mm512_shuffle_ps(t1, t3, 0xdd);
1980 
1981  __m128 res0 = _mm512_extractf32x4_ps(u0, 0);
1982  __m128 res4 = _mm512_extractf32x4_ps(u0, 1);
1983  __m128 res8 = _mm512_extractf32x4_ps(u0, 2);
1984  __m128 res12 = _mm512_extractf32x4_ps(u0, 3);
1985  __m128 res1 = _mm512_extractf32x4_ps(u1, 0);
1986  __m128 res5 = _mm512_extractf32x4_ps(u1, 1);
1987  __m128 res9 = _mm512_extractf32x4_ps(u1, 2);
1988  __m128 res13 = _mm512_extractf32x4_ps(u1, 3);
1989  __m128 res2 = _mm512_extractf32x4_ps(u2, 0);
1990  __m128 res6 = _mm512_extractf32x4_ps(u2, 1);
1991  __m128 res10 = _mm512_extractf32x4_ps(u2, 2);
1992  __m128 res14 = _mm512_extractf32x4_ps(u2, 3);
1993  __m128 res3 = _mm512_extractf32x4_ps(u3, 0);
1994  __m128 res7 = _mm512_extractf32x4_ps(u3, 1);
1995  __m128 res11 = _mm512_extractf32x4_ps(u3, 2);
1996  __m128 res15 = _mm512_extractf32x4_ps(u3, 3);
1997 
1998  // Cannot use the same store instructions in both paths of the 'if'
1999  // because the compiler cannot know that there is no aliasing between
2000  // pointers
2001  if (add_into)
2002  {
2003  res0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), res0);
2004  _mm_storeu_ps(out + 4 * i + offsets[0], res0);
2005  res1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), res1);
2006  _mm_storeu_ps(out + 4 * i + offsets[1], res1);
2007  res2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), res2);
2008  _mm_storeu_ps(out + 4 * i + offsets[2], res2);
2009  res3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), res3);
2010  _mm_storeu_ps(out + 4 * i + offsets[3], res3);
2011  res4 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[4]), res4);
2012  _mm_storeu_ps(out + 4 * i + offsets[4], res4);
2013  res5 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[5]), res5);
2014  _mm_storeu_ps(out + 4 * i + offsets[5], res5);
2015  res6 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[6]), res6);
2016  _mm_storeu_ps(out + 4 * i + offsets[6], res6);
2017  res7 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[7]), res7);
2018  _mm_storeu_ps(out + 4 * i + offsets[7], res7);
2019  res8 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[8]), res8);
2020  _mm_storeu_ps(out + 4 * i + offsets[8], res8);
2021  res9 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[9]), res9);
2022  _mm_storeu_ps(out + 4 * i + offsets[9], res9);
2023  res10 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[10]), res10);
2024  _mm_storeu_ps(out + 4 * i + offsets[10], res10);
2025  res11 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[11]), res11);
2026  _mm_storeu_ps(out + 4 * i + offsets[11], res11);
2027  res12 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[12]), res12);
2028  _mm_storeu_ps(out + 4 * i + offsets[12], res12);
2029  res13 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[13]), res13);
2030  _mm_storeu_ps(out + 4 * i + offsets[13], res13);
2031  res14 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[14]), res14);
2032  _mm_storeu_ps(out + 4 * i + offsets[14], res14);
2033  res15 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[15]), res15);
2034  _mm_storeu_ps(out + 4 * i + offsets[15], res15);
2035  }
2036  else
2037  {
2038  _mm_storeu_ps(out + 4 * i + offsets[0], res0);
2039  _mm_storeu_ps(out + 4 * i + offsets[1], res1);
2040  _mm_storeu_ps(out + 4 * i + offsets[2], res2);
2041  _mm_storeu_ps(out + 4 * i + offsets[3], res3);
2042  _mm_storeu_ps(out + 4 * i + offsets[4], res4);
2043  _mm_storeu_ps(out + 4 * i + offsets[5], res5);
2044  _mm_storeu_ps(out + 4 * i + offsets[6], res6);
2045  _mm_storeu_ps(out + 4 * i + offsets[7], res7);
2046  _mm_storeu_ps(out + 4 * i + offsets[8], res8);
2047  _mm_storeu_ps(out + 4 * i + offsets[9], res9);
2048  _mm_storeu_ps(out + 4 * i + offsets[10], res10);
2049  _mm_storeu_ps(out + 4 * i + offsets[11], res11);
2050  _mm_storeu_ps(out + 4 * i + offsets[12], res12);
2051  _mm_storeu_ps(out + 4 * i + offsets[13], res13);
2052  _mm_storeu_ps(out + 4 * i + offsets[14], res14);
2053  _mm_storeu_ps(out + 4 * i + offsets[15], res15);
2054  }
2055  }
2056 
2057  // remainder loop of work that does not divide by 4
2058  if (add_into)
2059  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2060  for (unsigned int v = 0; v < 16; ++v)
2061  out[offsets[v] + i] += in[i][v];
2062  else
2063  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2064  for (unsigned int v = 0; v < 16; ++v)
2065  out[offsets[v] + i] = in[i][v];
2066 }
2067 
2068 
2069 
2073 template <>
2074 inline DEAL_II_ALWAYS_INLINE void
2075 vectorized_transpose_and_store(const bool add_into,
2076  const unsigned int n_entries,
2077  const VectorizedArray<float, 16> *in,
2078  std::array<float *, 16> & out)
2079 {
2080  // see the comments in the vectorized_transpose_and_store above
2081 
2082  const unsigned int n_chunks = n_entries / 4;
2083  for (unsigned int i = 0; i < n_chunks; ++i)
2084  {
2085  __m512 t0 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0x44);
2086  __m512 t1 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0xee);
2087  __m512 t2 =
2088  _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0x44);
2089  __m512 t3 =
2090  _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0xee);
2091  __m512 u0 = _mm512_shuffle_ps(t0, t2, 0x88);
2092  __m512 u1 = _mm512_shuffle_ps(t0, t2, 0xdd);
2093  __m512 u2 = _mm512_shuffle_ps(t1, t3, 0x88);
2094  __m512 u3 = _mm512_shuffle_ps(t1, t3, 0xdd);
2095 
2096  __m128 res0 = _mm512_extractf32x4_ps(u0, 0);
2097  __m128 res4 = _mm512_extractf32x4_ps(u0, 1);
2098  __m128 res8 = _mm512_extractf32x4_ps(u0, 2);
2099  __m128 res12 = _mm512_extractf32x4_ps(u0, 3);
2100  __m128 res1 = _mm512_extractf32x4_ps(u1, 0);
2101  __m128 res5 = _mm512_extractf32x4_ps(u1, 1);
2102  __m128 res9 = _mm512_extractf32x4_ps(u1, 2);
2103  __m128 res13 = _mm512_extractf32x4_ps(u1, 3);
2104  __m128 res2 = _mm512_extractf32x4_ps(u2, 0);
2105  __m128 res6 = _mm512_extractf32x4_ps(u2, 1);
2106  __m128 res10 = _mm512_extractf32x4_ps(u2, 2);
2107  __m128 res14 = _mm512_extractf32x4_ps(u2, 3);
2108  __m128 res3 = _mm512_extractf32x4_ps(u3, 0);
2109  __m128 res7 = _mm512_extractf32x4_ps(u3, 1);
2110  __m128 res11 = _mm512_extractf32x4_ps(u3, 2);
2111  __m128 res15 = _mm512_extractf32x4_ps(u3, 3);
2112 
2113  if (add_into)
2114  {
2115  res0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), res0);
2116  _mm_storeu_ps(out[0] + 4 * i, res0);
2117  res1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), res1);
2118  _mm_storeu_ps(out[1] + 4 * i, res1);
2119  res2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), res2);
2120  _mm_storeu_ps(out[2] + 4 * i, res2);
2121  res3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), res3);
2122  _mm_storeu_ps(out[3] + 4 * i, res3);
2123  res4 = _mm_add_ps(_mm_loadu_ps(out[4] + 4 * i), res4);
2124  _mm_storeu_ps(out[4] + 4 * i, res4);
2125  res5 = _mm_add_ps(_mm_loadu_ps(out[5] + 4 * i), res5);
2126  _mm_storeu_ps(out[5] + 4 * i, res5);
2127  res6 = _mm_add_ps(_mm_loadu_ps(out[6] + 4 * i), res6);
2128  _mm_storeu_ps(out[6] + 4 * i, res6);
2129  res7 = _mm_add_ps(_mm_loadu_ps(out[7] + 4 * i), res7);
2130  _mm_storeu_ps(out[7] + 4 * i, res7);
2131  res8 = _mm_add_ps(_mm_loadu_ps(out[8] + 4 * i), res8);
2132  _mm_storeu_ps(out[8] + 4 * i, res8);
2133  res9 = _mm_add_ps(_mm_loadu_ps(out[9] + 4 * i), res9);
2134  _mm_storeu_ps(out[9] + 4 * i, res9);
2135  res10 = _mm_add_ps(_mm_loadu_ps(out[10] + 4 * i), res10);
2136  _mm_storeu_ps(out[10] + 4 * i, res10);
2137  res11 = _mm_add_ps(_mm_loadu_ps(out[11] + 4 * i), res11);
2138  _mm_storeu_ps(out[11] + 4 * i, res11);
2139  res12 = _mm_add_ps(_mm_loadu_ps(out[12] + 4 * i), res12);
2140  _mm_storeu_ps(out[12] + 4 * i, res12);
2141  res13 = _mm_add_ps(_mm_loadu_ps(out[13] + 4 * i), res13);
2142  _mm_storeu_ps(out[13] + 4 * i, res13);
2143  res14 = _mm_add_ps(_mm_loadu_ps(out[14] + 4 * i), res14);
2144  _mm_storeu_ps(out[14] + 4 * i, res14);
2145  res15 = _mm_add_ps(_mm_loadu_ps(out[15] + 4 * i), res15);
2146  _mm_storeu_ps(out[15] + 4 * i, res15);
2147  }
2148  else
2149  {
2150  _mm_storeu_ps(out[0] + 4 * i, res0);
2151  _mm_storeu_ps(out[1] + 4 * i, res1);
2152  _mm_storeu_ps(out[2] + 4 * i, res2);
2153  _mm_storeu_ps(out[3] + 4 * i, res3);
2154  _mm_storeu_ps(out[4] + 4 * i, res4);
2155  _mm_storeu_ps(out[5] + 4 * i, res5);
2156  _mm_storeu_ps(out[6] + 4 * i, res6);
2157  _mm_storeu_ps(out[7] + 4 * i, res7);
2158  _mm_storeu_ps(out[8] + 4 * i, res8);
2159  _mm_storeu_ps(out[9] + 4 * i, res9);
2160  _mm_storeu_ps(out[10] + 4 * i, res10);
2161  _mm_storeu_ps(out[11] + 4 * i, res11);
2162  _mm_storeu_ps(out[12] + 4 * i, res12);
2163  _mm_storeu_ps(out[13] + 4 * i, res13);
2164  _mm_storeu_ps(out[14] + 4 * i, res14);
2165  _mm_storeu_ps(out[15] + 4 * i, res15);
2166  }
2167  }
2168 
2169  if (add_into)
2170  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2171  for (unsigned int v = 0; v < 16; ++v)
2172  out[v][i] += in[i][v];
2173  else
2174  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2175  for (unsigned int v = 0; v < 16; ++v)
2176  out[v][i] = in[i][v];
2177 }
2178 
2179 # endif
2180 
2181 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
2182 
2186 template <>
2187 class VectorizedArray<double, 4>
2188  : public VectorizedArrayBase<VectorizedArray<double, 4>, 4>
2189 {
2190 public:
2194  using value_type = double;
2195 
2200  VectorizedArray() = default;
2201 
2205  VectorizedArray(const double scalar)
2206  {
2207  this->operator=(scalar);
2208  }
2209 
2213  template <typename U>
2214  VectorizedArray(const std::initializer_list<U> &list)
2216  {}
2217 
2222  VectorizedArray &
2223  operator=(const double x)
2224  {
2225  data = _mm256_set1_pd(x);
2226  return *this;
2227  }
2228 
2233  double &
2234  operator[](const unsigned int comp)
2235  {
2236  AssertIndexRange(comp, 4);
2237  return *(reinterpret_cast<double *>(&data) + comp);
2238  }
2239 
2244  const double &
2245  operator[](const unsigned int comp) const
2246  {
2247  AssertIndexRange(comp, 4);
2248  return *(reinterpret_cast<const double *>(&data) + comp);
2249  }
2250 
2255  VectorizedArray &
2256  operator+=(const VectorizedArray &vec)
2257  {
2258  // if the compiler supports vector arithmetic, we can simply use +=
2259  // operator on the given data type. this allows the compiler to combine
2260  // additions with multiplication (fused multiply-add) if those
2261  // instructions are available. Otherwise, we need to use the built-in
2262  // intrinsic command for __m256d
2263 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2264  data += vec.data;
2265 # else
2266  data = _mm256_add_pd(data, vec.data);
2267 # endif
2268  return *this;
2269  }
2270 
2275  VectorizedArray &
2276  operator-=(const VectorizedArray &vec)
2277  {
2278 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2279  data -= vec.data;
2280 # else
2281  data = _mm256_sub_pd(data, vec.data);
2282 # endif
2283  return *this;
2284  }
2289  VectorizedArray &
2290  operator*=(const VectorizedArray &vec)
2291  {
2292 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2293  data *= vec.data;
2294 # else
2295  data = _mm256_mul_pd(data, vec.data);
2296 # endif
2297  return *this;
2298  }
2299 
2304  VectorizedArray &
2305  operator/=(const VectorizedArray &vec)
2306  {
2307 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2308  data /= vec.data;
2309 # else
2310  data = _mm256_div_pd(data, vec.data);
2311 # endif
2312  return *this;
2313  }
2314 
2321  void
2322  load(const double *ptr)
2323  {
2324  data = _mm256_loadu_pd(ptr);
2325  }
2326 
2334  void
2335  store(double *ptr) const
2336  {
2337  _mm256_storeu_pd(ptr, data);
2338  }
2339 
2345  void
2346  streaming_store(double *ptr) const
2347  {
2348  Assert(reinterpret_cast<std::size_t>(ptr) % 32 == 0,
2349  ExcMessage("Memory not aligned"));
2350  _mm256_stream_pd(ptr, data);
2351  }
2352 
2366  void
2367  gather(const double *base_ptr, const unsigned int *offsets)
2368  {
2369 # ifdef __AVX2__
2370  // unfortunately, there does not appear to be a 128 bit integer load, so
2371  // do it by some reinterpret casts here. this is allowed because the Intel
2372  // API allows aliasing between different vector types.
2373  const __m128 index_val =
2374  _mm_loadu_ps(reinterpret_cast<const float *>(offsets));
2375  const __m128i index = *reinterpret_cast<const __m128i *>(&index_val);
2376 
2377  // work around a warning with gcc-12 about an uninitialized initial state
2378  // for gather by starting with a zero guess, even though all lanes will be
2379  // overwritten
2380  __m256d zero = _mm256_setzero_pd();
2381  __m256d mask = _mm256_cmp_pd(zero, zero, _CMP_EQ_OQ);
2382 
2383  data = _mm256_mask_i32gather_pd(zero, base_ptr, index, mask, 8);
2384 # else
2385  for (unsigned int i = 0; i < 4; ++i)
2386  *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
2387 # endif
2388  }
2389 
2403  void
2404  scatter(const unsigned int *offsets, double *base_ptr) const
2405  {
2406  // no scatter operation in AVX/AVX2
2407  for (unsigned int i = 0; i < 4; ++i)
2408  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
2409  }
2410 
2416  __m256d data;
2417 
2418 private:
2425  get_sqrt() const
2426  {
2427  VectorizedArray res;
2428  res.data = _mm256_sqrt_pd(data);
2429  return res;
2430  }
2431 
2438  get_abs() const
2439  {
2440  // to compute the absolute value, perform bitwise andnot with -0. This
2441  // will leave all value and exponent bits unchanged but force the sign
2442  // value to +.
2443  __m256d mask = _mm256_set1_pd(-0.);
2444  VectorizedArray res;
2445  res.data = _mm256_andnot_pd(mask, data);
2446  return res;
2447  }
2448 
2455  get_max(const VectorizedArray &other) const
2456  {
2457  VectorizedArray res;
2458  res.data = _mm256_max_pd(data, other.data);
2459  return res;
2460  }
2461 
2468  get_min(const VectorizedArray &other) const
2469  {
2470  VectorizedArray res;
2471  res.data = _mm256_min_pd(data, other.data);
2472  return res;
2473  }
2474 
2475  // Make a few functions friends.
2476  template <typename Number2, std::size_t width2>
2479  template <typename Number2, std::size_t width2>
2482  template <typename Number2, std::size_t width2>
2486  template <typename Number2, std::size_t width2>
2490 };
2491 
2492 
2493 
2497 template <>
2498 inline DEAL_II_ALWAYS_INLINE void
2499 vectorized_load_and_transpose(const unsigned int n_entries,
2500  const double * in,
2501  const unsigned int * offsets,
2503 {
2504  const unsigned int n_chunks = n_entries / 4;
2505  const double * in0 = in + offsets[0];
2506  const double * in1 = in + offsets[1];
2507  const double * in2 = in + offsets[2];
2508  const double * in3 = in + offsets[3];
2509 
2510  for (unsigned int i = 0; i < n_chunks; ++i)
2511  {
2512  __m256d u0 = _mm256_loadu_pd(in0 + 4 * i);
2513  __m256d u1 = _mm256_loadu_pd(in1 + 4 * i);
2514  __m256d u2 = _mm256_loadu_pd(in2 + 4 * i);
2515  __m256d u3 = _mm256_loadu_pd(in3 + 4 * i);
2516  __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2517  __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2518  __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2519  __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2520  out[4 * i + 0].data = _mm256_unpacklo_pd(t0, t1);
2521  out[4 * i + 1].data = _mm256_unpackhi_pd(t0, t1);
2522  out[4 * i + 2].data = _mm256_unpacklo_pd(t2, t3);
2523  out[4 * i + 3].data = _mm256_unpackhi_pd(t2, t3);
2524  }
2525 
2526  // remainder loop of work that does not divide by 4
2527  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2528  out[i].gather(in + i, offsets);
2529 }
2530 
2531 
2532 
2536 template <>
2537 inline DEAL_II_ALWAYS_INLINE void
2538 vectorized_load_and_transpose(const unsigned int n_entries,
2539  const std::array<double *, 4> &in,
2541 {
2542  // see the comments in the vectorized_load_and_transpose above
2543 
2544  const unsigned int n_chunks = n_entries / 4;
2545  const double * in0 = in[0];
2546  const double * in1 = in[1];
2547  const double * in2 = in[2];
2548  const double * in3 = in[3];
2549 
2550  for (unsigned int i = 0; i < n_chunks; ++i)
2551  {
2552  __m256d u0 = _mm256_loadu_pd(in0 + 4 * i);
2553  __m256d u1 = _mm256_loadu_pd(in1 + 4 * i);
2554  __m256d u2 = _mm256_loadu_pd(in2 + 4 * i);
2555  __m256d u3 = _mm256_loadu_pd(in3 + 4 * i);
2556  __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2557  __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2558  __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2559  __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2560  out[4 * i + 0].data = _mm256_unpacklo_pd(t0, t1);
2561  out[4 * i + 1].data = _mm256_unpackhi_pd(t0, t1);
2562  out[4 * i + 2].data = _mm256_unpacklo_pd(t2, t3);
2563  out[4 * i + 3].data = _mm256_unpackhi_pd(t2, t3);
2564  }
2565 
2566  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2567  gather(out[i], in, i);
2568 }
2569 
2570 
2571 
2575 template <>
2576 inline DEAL_II_ALWAYS_INLINE void
2577 vectorized_transpose_and_store(const bool add_into,
2578  const unsigned int n_entries,
2579  const VectorizedArray<double, 4> *in,
2580  const unsigned int * offsets,
2581  double * out)
2582 {
2583  const unsigned int n_chunks = n_entries / 4;
2584  double * out0 = out + offsets[0];
2585  double * out1 = out + offsets[1];
2586  double * out2 = out + offsets[2];
2587  double * out3 = out + offsets[3];
2588  for (unsigned int i = 0; i < n_chunks; ++i)
2589  {
2590  __m256d u0 = in[4 * i + 0].data;
2591  __m256d u1 = in[4 * i + 1].data;
2592  __m256d u2 = in[4 * i + 2].data;
2593  __m256d u3 = in[4 * i + 3].data;
2594  __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2595  __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2596  __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2597  __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2598  __m256d res0 = _mm256_unpacklo_pd(t0, t1);
2599  __m256d res1 = _mm256_unpackhi_pd(t0, t1);
2600  __m256d res2 = _mm256_unpacklo_pd(t2, t3);
2601  __m256d res3 = _mm256_unpackhi_pd(t2, t3);
2602 
2603  // Cannot use the same store instructions in both paths of the 'if'
2604  // because the compiler cannot know that there is no aliasing between
2605  // pointers
2606  if (add_into)
2607  {
2608  res0 = _mm256_add_pd(_mm256_loadu_pd(out0 + 4 * i), res0);
2609  _mm256_storeu_pd(out0 + 4 * i, res0);
2610  res1 = _mm256_add_pd(_mm256_loadu_pd(out1 + 4 * i), res1);
2611  _mm256_storeu_pd(out1 + 4 * i, res1);
2612  res2 = _mm256_add_pd(_mm256_loadu_pd(out2 + 4 * i), res2);
2613  _mm256_storeu_pd(out2 + 4 * i, res2);
2614  res3 = _mm256_add_pd(_mm256_loadu_pd(out3 + 4 * i), res3);
2615  _mm256_storeu_pd(out3 + 4 * i, res3);
2616  }
2617  else
2618  {
2619  _mm256_storeu_pd(out0 + 4 * i, res0);
2620  _mm256_storeu_pd(out1 + 4 * i, res1);
2621  _mm256_storeu_pd(out2 + 4 * i, res2);
2622  _mm256_storeu_pd(out3 + 4 * i, res3);
2623  }
2624  }
2625 
2626  // remainder loop of work that does not divide by 4
2627  if (add_into)
2628  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2629  for (unsigned int v = 0; v < 4; ++v)
2630  out[offsets[v] + i] += in[i][v];
2631  else
2632  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2633  for (unsigned int v = 0; v < 4; ++v)
2634  out[offsets[v] + i] = in[i][v];
2635 }
2636 
2637 
2638 
2642 template <>
2643 inline DEAL_II_ALWAYS_INLINE void
2644 vectorized_transpose_and_store(const bool add_into,
2645  const unsigned int n_entries,
2646  const VectorizedArray<double, 4> *in,
2647  std::array<double *, 4> & out)
2648 {
2649  // see the comments in the vectorized_transpose_and_store above
2650 
2651  const unsigned int n_chunks = n_entries / 4;
2652  double * out0 = out[0];
2653  double * out1 = out[1];
2654  double * out2 = out[2];
2655  double * out3 = out[3];
2656  for (unsigned int i = 0; i < n_chunks; ++i)
2657  {
2658  __m256d u0 = in[4 * i + 0].data;
2659  __m256d u1 = in[4 * i + 1].data;
2660  __m256d u2 = in[4 * i + 2].data;
2661  __m256d u3 = in[4 * i + 3].data;
2662  __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2663  __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2664  __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2665  __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2666  __m256d res0 = _mm256_unpacklo_pd(t0, t1);
2667  __m256d res1 = _mm256_unpackhi_pd(t0, t1);
2668  __m256d res2 = _mm256_unpacklo_pd(t2, t3);
2669  __m256d res3 = _mm256_unpackhi_pd(t2, t3);
2670 
2671  // Cannot use the same store instructions in both paths of the 'if'
2672  // because the compiler cannot know that there is no aliasing between
2673  // pointers
2674  if (add_into)
2675  {
2676  res0 = _mm256_add_pd(_mm256_loadu_pd(out0 + 4 * i), res0);
2677  _mm256_storeu_pd(out0 + 4 * i, res0);
2678  res1 = _mm256_add_pd(_mm256_loadu_pd(out1 + 4 * i), res1);
2679  _mm256_storeu_pd(out1 + 4 * i, res1);
2680  res2 = _mm256_add_pd(_mm256_loadu_pd(out2 + 4 * i), res2);
2681  _mm256_storeu_pd(out2 + 4 * i, res2);
2682  res3 = _mm256_add_pd(_mm256_loadu_pd(out3 + 4 * i), res3);
2683  _mm256_storeu_pd(out3 + 4 * i, res3);
2684  }
2685  else
2686  {
2687  _mm256_storeu_pd(out0 + 4 * i, res0);
2688  _mm256_storeu_pd(out1 + 4 * i, res1);
2689  _mm256_storeu_pd(out2 + 4 * i, res2);
2690  _mm256_storeu_pd(out3 + 4 * i, res3);
2691  }
2692  }
2693 
2694  // remainder loop of work that does not divide by 4
2695  if (add_into)
2696  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2697  for (unsigned int v = 0; v < 4; ++v)
2698  out[v][i] += in[i][v];
2699  else
2700  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2701  for (unsigned int v = 0; v < 4; ++v)
2702  out[v][i] = in[i][v];
2703 }
2704 
2705 
2706 
2710 template <>
2711 class VectorizedArray<float, 8>
2712  : public VectorizedArrayBase<VectorizedArray<float, 8>, 8>
2713 {
2714 public:
2718  using value_type = float;
2719 
2724  VectorizedArray() = default;
2725 
2729  VectorizedArray(const float scalar)
2730  {
2731  this->operator=(scalar);
2732  }
2733 
2737  template <typename U>
2738  VectorizedArray(const std::initializer_list<U> &list)
2739  : VectorizedArrayBase<VectorizedArray<float, 8>, 8>(list)
2740  {}
2741 
2746  VectorizedArray &
2747  operator=(const float x)
2748  {
2749  data = _mm256_set1_ps(x);
2750  return *this;
2751  }
2752 
2757  float &
2758  operator[](const unsigned int comp)
2759  {
2760  AssertIndexRange(comp, 8);
2761  return *(reinterpret_cast<float *>(&data) + comp);
2762  }
2763 
2768  const float &
2769  operator[](const unsigned int comp) const
2770  {
2771  AssertIndexRange(comp, 8);
2772  return *(reinterpret_cast<const float *>(&data) + comp);
2773  }
2774 
2779  VectorizedArray &
2780  operator+=(const VectorizedArray &vec)
2781  {
2782  // if the compiler supports vector arithmetic, we can simply use +=
2783  // operator on the given data type. this allows the compiler to combine
2784  // additions with multiplication (fused multiply-add) if those
2785  // instructions are available. Otherwise, we need to use the built-in
2786  // intrinsic command for __m256d
2787 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2788  data += vec.data;
2789 # else
2790  data = _mm256_add_ps(data, vec.data);
2791 # endif
2792  return *this;
2793  }
2794 
2799  VectorizedArray &
2800  operator-=(const VectorizedArray &vec)
2801  {
2802 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2803  data -= vec.data;
2804 # else
2805  data = _mm256_sub_ps(data, vec.data);
2806 # endif
2807  return *this;
2808  }
2813  VectorizedArray &
2814  operator*=(const VectorizedArray &vec)
2815  {
2816 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2817  data *= vec.data;
2818 # else
2819  data = _mm256_mul_ps(data, vec.data);
2820 # endif
2821  return *this;
2822  }
2823 
2828  VectorizedArray &
2829  operator/=(const VectorizedArray &vec)
2830  {
2831 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2832  data /= vec.data;
2833 # else
2834  data = _mm256_div_ps(data, vec.data);
2835 # endif
2836  return *this;
2837  }
2838 
2845  void
2846  load(const float *ptr)
2847  {
2848  data = _mm256_loadu_ps(ptr);
2849  }
2850 
2858  void
2859  store(float *ptr) const
2860  {
2861  _mm256_storeu_ps(ptr, data);
2862  }
2863 
2869  void
2870  streaming_store(float *ptr) const
2871  {
2872  Assert(reinterpret_cast<std::size_t>(ptr) % 32 == 0,
2873  ExcMessage("Memory not aligned"));
2874  _mm256_stream_ps(ptr, data);
2875  }
2876 
2890  void
2891  gather(const float *base_ptr, const unsigned int *offsets)
2892  {
2893 # ifdef __AVX2__
2894  // unfortunately, there does not appear to be a 256 bit integer load, so
2895  // do it by some reinterpret casts here. this is allowed because the Intel
2896  // API allows aliasing between different vector types.
2897  const __m256 index_val =
2898  _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
2899  const __m256i index = *reinterpret_cast<const __m256i *>(&index_val);
2900 
2901  // work around a warning with gcc-12 about an uninitialized initial state
2902  // for gather by starting with a zero guess, even though all lanes will be
2903  // overwritten
2904  __m256 zero = _mm256_setzero_ps();
2905  __m256 mask = _mm256_cmp_ps(zero, zero, _CMP_EQ_OQ);
2906 
2907  data = _mm256_mask_i32gather_ps(zero, base_ptr, index, mask, 4);
2908 # else
2909  for (unsigned int i = 0; i < 8; ++i)
2910  *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
2911 # endif
2912  }
2913 
2927  void
2928  scatter(const unsigned int *offsets, float *base_ptr) const
2929  {
2930  // no scatter operation in AVX/AVX2
2931  for (unsigned int i = 0; i < 8; ++i)
2932  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
2933  }
2934 
2940  __m256 data;
2941 
2942 private:
2949  get_sqrt() const
2950  {
2951  VectorizedArray res;
2952  res.data = _mm256_sqrt_ps(data);
2953  return res;
2954  }
2955 
2962  get_abs() const
2963  {
2964  // to compute the absolute value, perform bitwise andnot with -0. This
2965  // will leave all value and exponent bits unchanged but force the sign
2966  // value to +.
2967  __m256 mask = _mm256_set1_ps(-0.f);
2968  VectorizedArray res;
2969  res.data = _mm256_andnot_ps(mask, data);
2970  return res;
2971  }
2972 
2979  get_max(const VectorizedArray &other) const
2980  {
2981  VectorizedArray res;
2982  res.data = _mm256_max_ps(data, other.data);
2983  return res;
2984  }
2985 
2992  get_min(const VectorizedArray &other) const
2993  {
2994  VectorizedArray res;
2995  res.data = _mm256_min_ps(data, other.data);
2996  return res;
2997  }
2998 
2999  // Make a few functions friends.
3000  template <typename Number2, std::size_t width2>
3003  template <typename Number2, std::size_t width2>
3006  template <typename Number2, std::size_t width2>
3010  template <typename Number2, std::size_t width2>
3014 };
3015 
3016 
3017 
3021 template <>
3022 inline DEAL_II_ALWAYS_INLINE void
3023 vectorized_load_and_transpose(const unsigned int n_entries,
3024  const float * in,
3025  const unsigned int * offsets,
3027 {
3028  const unsigned int n_chunks = n_entries / 4;
3029  for (unsigned int i = 0; i < n_chunks; ++i)
3030  {
3031  // To avoid warnings about uninitialized variables, need to initialize
3032  // one variable with zero before using it.
3033  __m256 t0, t1, t2, t3 = {};
3034  t0 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[0]), 0);
3035  t0 = _mm256_insertf128_ps(t0, _mm_loadu_ps(in + 4 * i + offsets[4]), 1);
3036  t1 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[1]), 0);
3037  t1 = _mm256_insertf128_ps(t1, _mm_loadu_ps(in + 4 * i + offsets[5]), 1);
3038  t2 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[2]), 0);
3039  t2 = _mm256_insertf128_ps(t2, _mm_loadu_ps(in + 4 * i + offsets[6]), 1);
3040  t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[3]), 0);
3041  t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[7]), 1);
3042 
3043  __m256 v0 = _mm256_shuffle_ps(t0, t1, 0x44);
3044  __m256 v1 = _mm256_shuffle_ps(t0, t1, 0xee);
3045  __m256 v2 = _mm256_shuffle_ps(t2, t3, 0x44);
3046  __m256 v3 = _mm256_shuffle_ps(t2, t3, 0xee);
3047  out[4 * i + 0].data = _mm256_shuffle_ps(v0, v2, 0x88);
3048  out[4 * i + 1].data = _mm256_shuffle_ps(v0, v2, 0xdd);
3049  out[4 * i + 2].data = _mm256_shuffle_ps(v1, v3, 0x88);
3050  out[4 * i + 3].data = _mm256_shuffle_ps(v1, v3, 0xdd);
3051  }
3052 
3053  // remainder loop of work that does not divide by 4
3054  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3055  out[i].gather(in + i, offsets);
3056 }
3057 
3058 
3059 
3063 template <>
3064 inline DEAL_II_ALWAYS_INLINE void
3065 vectorized_load_and_transpose(const unsigned int n_entries,
3066  const std::array<float *, 8> &in,
3068 {
3069  // see the comments in the vectorized_load_and_transpose above
3070 
3071  const unsigned int n_chunks = n_entries / 4;
3072  for (unsigned int i = 0; i < n_chunks; ++i)
3073  {
3074  __m256 t0, t1, t2, t3 = {};
3075  t0 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[0] + 4 * i), 0);
3076  t0 = _mm256_insertf128_ps(t0, _mm_loadu_ps(in[4] + 4 * i), 1);
3077  t1 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[1] + 4 * i), 0);
3078  t1 = _mm256_insertf128_ps(t1, _mm_loadu_ps(in[5] + 4 * i), 1);
3079  t2 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[2] + 4 * i), 0);
3080  t2 = _mm256_insertf128_ps(t2, _mm_loadu_ps(in[6] + 4 * i), 1);
3081  t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[3] + 4 * i), 0);
3082  t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[7] + 4 * i), 1);
3083 
3084  __m256 v0 = _mm256_shuffle_ps(t0, t1, 0x44);
3085  __m256 v1 = _mm256_shuffle_ps(t0, t1, 0xee);
3086  __m256 v2 = _mm256_shuffle_ps(t2, t3, 0x44);
3087  __m256 v3 = _mm256_shuffle_ps(t2, t3, 0xee);
3088  out[4 * i + 0].data = _mm256_shuffle_ps(v0, v2, 0x88);
3089  out[4 * i + 1].data = _mm256_shuffle_ps(v0, v2, 0xdd);
3090  out[4 * i + 2].data = _mm256_shuffle_ps(v1, v3, 0x88);
3091  out[4 * i + 3].data = _mm256_shuffle_ps(v1, v3, 0xdd);
3092  }
3093 
3094  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3095  gather(out[i], in, i);
3096 }
3097 
3098 
3099 
3103 template <>
3104 inline DEAL_II_ALWAYS_INLINE void
3105 vectorized_transpose_and_store(const bool add_into,
3106  const unsigned int n_entries,
3107  const VectorizedArray<float, 8> *in,
3108  const unsigned int * offsets,
3109  float * out)
3110 {
3111  const unsigned int n_chunks = n_entries / 4;
3112  for (unsigned int i = 0; i < n_chunks; ++i)
3113  {
3114  __m256 u0 = in[4 * i + 0].data;
3115  __m256 u1 = in[4 * i + 1].data;
3116  __m256 u2 = in[4 * i + 2].data;
3117  __m256 u3 = in[4 * i + 3].data;
3118  __m256 t0 = _mm256_shuffle_ps(u0, u1, 0x44);
3119  __m256 t1 = _mm256_shuffle_ps(u0, u1, 0xee);
3120  __m256 t2 = _mm256_shuffle_ps(u2, u3, 0x44);
3121  __m256 t3 = _mm256_shuffle_ps(u2, u3, 0xee);
3122  u0 = _mm256_shuffle_ps(t0, t2, 0x88);
3123  u1 = _mm256_shuffle_ps(t0, t2, 0xdd);
3124  u2 = _mm256_shuffle_ps(t1, t3, 0x88);
3125  u3 = _mm256_shuffle_ps(t1, t3, 0xdd);
3126  __m128 res0 = _mm256_extractf128_ps(u0, 0);
3127  __m128 res4 = _mm256_extractf128_ps(u0, 1);
3128  __m128 res1 = _mm256_extractf128_ps(u1, 0);
3129  __m128 res5 = _mm256_extractf128_ps(u1, 1);
3130  __m128 res2 = _mm256_extractf128_ps(u2, 0);
3131  __m128 res6 = _mm256_extractf128_ps(u2, 1);
3132  __m128 res3 = _mm256_extractf128_ps(u3, 0);
3133  __m128 res7 = _mm256_extractf128_ps(u3, 1);
3134 
3135  // Cannot use the same store instructions in both paths of the 'if'
3136  // because the compiler cannot know that there is no aliasing between
3137  // pointers
3138  if (add_into)
3139  {
3140  res0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), res0);
3141  _mm_storeu_ps(out + 4 * i + offsets[0], res0);
3142  res1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), res1);
3143  _mm_storeu_ps(out + 4 * i + offsets[1], res1);
3144  res2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), res2);
3145  _mm_storeu_ps(out + 4 * i + offsets[2], res2);
3146  res3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), res3);
3147  _mm_storeu_ps(out + 4 * i + offsets[3], res3);
3148  res4 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[4]), res4);
3149  _mm_storeu_ps(out + 4 * i + offsets[4], res4);
3150  res5 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[5]), res5);
3151  _mm_storeu_ps(out + 4 * i + offsets[5], res5);
3152  res6 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[6]), res6);
3153  _mm_storeu_ps(out + 4 * i + offsets[6], res6);
3154  res7 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[7]), res7);
3155  _mm_storeu_ps(out + 4 * i + offsets[7], res7);
3156  }
3157  else
3158  {
3159  _mm_storeu_ps(out + 4 * i + offsets[0], res0);
3160  _mm_storeu_ps(out + 4 * i + offsets[1], res1);
3161  _mm_storeu_ps(out + 4 * i + offsets[2], res2);
3162  _mm_storeu_ps(out + 4 * i + offsets[3], res3);
3163  _mm_storeu_ps(out + 4 * i + offsets[4], res4);
3164  _mm_storeu_ps(out + 4 * i + offsets[5], res5);
3165  _mm_storeu_ps(out + 4 * i + offsets[6], res6);
3166  _mm_storeu_ps(out + 4 * i + offsets[7], res7);
3167  }
3168  }
3169 
3170  // remainder loop of work that does not divide by 4
3171  if (add_into)
3172  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3173  for (unsigned int v = 0; v < 8; ++v)
3174  out[offsets[v] + i] += in[i][v];
3175  else
3176  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3177  for (unsigned int v = 0; v < 8; ++v)
3178  out[offsets[v] + i] = in[i][v];
3179 }
3180 
3181 
3182 
3186 template <>
3187 inline DEAL_II_ALWAYS_INLINE void
3188 vectorized_transpose_and_store(const bool add_into,
3189  const unsigned int n_entries,
3190  const VectorizedArray<float, 8> *in,
3191  std::array<float *, 8> & out)
3192 {
3193  // see the comments in the vectorized_transpose_and_store above
3194 
3195  const unsigned int n_chunks = n_entries / 4;
3196  for (unsigned int i = 0; i < n_chunks; ++i)
3197  {
3198  __m256 u0 = in[4 * i + 0].data;
3199  __m256 u1 = in[4 * i + 1].data;
3200  __m256 u2 = in[4 * i + 2].data;
3201  __m256 u3 = in[4 * i + 3].data;
3202  __m256 t0 = _mm256_shuffle_ps(u0, u1, 0x44);
3203  __m256 t1 = _mm256_shuffle_ps(u0, u1, 0xee);
3204  __m256 t2 = _mm256_shuffle_ps(u2, u3, 0x44);
3205  __m256 t3 = _mm256_shuffle_ps(u2, u3, 0xee);
3206  u0 = _mm256_shuffle_ps(t0, t2, 0x88);
3207  u1 = _mm256_shuffle_ps(t0, t2, 0xdd);
3208  u2 = _mm256_shuffle_ps(t1, t3, 0x88);
3209  u3 = _mm256_shuffle_ps(t1, t3, 0xdd);
3210  __m128 res0 = _mm256_extractf128_ps(u0, 0);
3211  __m128 res4 = _mm256_extractf128_ps(u0, 1);
3212  __m128 res1 = _mm256_extractf128_ps(u1, 0);
3213  __m128 res5 = _mm256_extractf128_ps(u1, 1);
3214  __m128 res2 = _mm256_extractf128_ps(u2, 0);
3215  __m128 res6 = _mm256_extractf128_ps(u2, 1);
3216  __m128 res3 = _mm256_extractf128_ps(u3, 0);
3217  __m128 res7 = _mm256_extractf128_ps(u3, 1);
3218 
3219  if (add_into)
3220  {
3221  res0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), res0);
3222  _mm_storeu_ps(out[0] + 4 * i, res0);
3223  res1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), res1);
3224  _mm_storeu_ps(out[1] + 4 * i, res1);
3225  res2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), res2);
3226  _mm_storeu_ps(out[2] + 4 * i, res2);
3227  res3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), res3);
3228  _mm_storeu_ps(out[3] + 4 * i, res3);
3229  res4 = _mm_add_ps(_mm_loadu_ps(out[4] + 4 * i), res4);
3230  _mm_storeu_ps(out[4] + 4 * i, res4);
3231  res5 = _mm_add_ps(_mm_loadu_ps(out[5] + 4 * i), res5);
3232  _mm_storeu_ps(out[5] + 4 * i, res5);
3233  res6 = _mm_add_ps(_mm_loadu_ps(out[6] + 4 * i), res6);
3234  _mm_storeu_ps(out[6] + 4 * i, res6);
3235  res7 = _mm_add_ps(_mm_loadu_ps(out[7] + 4 * i), res7);
3236  _mm_storeu_ps(out[7] + 4 * i, res7);
3237  }
3238  else
3239  {
3240  _mm_storeu_ps(out[0] + 4 * i, res0);
3241  _mm_storeu_ps(out[1] + 4 * i, res1);
3242  _mm_storeu_ps(out[2] + 4 * i, res2);
3243  _mm_storeu_ps(out[3] + 4 * i, res3);
3244  _mm_storeu_ps(out[4] + 4 * i, res4);
3245  _mm_storeu_ps(out[5] + 4 * i, res5);
3246  _mm_storeu_ps(out[6] + 4 * i, res6);
3247  _mm_storeu_ps(out[7] + 4 * i, res7);
3248  }
3249  }
3250 
3251  if (add_into)
3252  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3253  for (unsigned int v = 0; v < 8; ++v)
3254  out[v][i] += in[i][v];
3255  else
3256  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3257  for (unsigned int v = 0; v < 8; ++v)
3258  out[v][i] = in[i][v];
3259 }
3260 
3261 # endif
3262 
3263 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__SSE2__)
3264 
3268 template <>
3269 class VectorizedArray<double, 2>
3270  : public VectorizedArrayBase<VectorizedArray<double, 2>, 2>
3271 {
3272 public:
3276  using value_type = double;
3277 
3282  VectorizedArray() = default;
3283 
3287  VectorizedArray(const double scalar)
3288  {
3289  this->operator=(scalar);
3290  }
3291 
3295  template <typename U>
3296  VectorizedArray(const std::initializer_list<U> &list)
3298  {}
3299 
3304  VectorizedArray &
3305  operator=(const double x)
3306  {
3307  data = _mm_set1_pd(x);
3308  return *this;
3309  }
3310 
3315  double &
3316  operator[](const unsigned int comp)
3317  {
3318  AssertIndexRange(comp, 2);
3319  return *(reinterpret_cast<double *>(&data) + comp);
3320  }
3321 
3326  const double &
3327  operator[](const unsigned int comp) const
3328  {
3329  AssertIndexRange(comp, 2);
3330  return *(reinterpret_cast<const double *>(&data) + comp);
3331  }
3332 
3337  VectorizedArray &
3338  operator+=(const VectorizedArray &vec)
3339  {
3340 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3341  data += vec.data;
3342 # else
3343  data = _mm_add_pd(data, vec.data);
3344 # endif
3345  return *this;
3346  }
3347 
3352  VectorizedArray &
3353  operator-=(const VectorizedArray &vec)
3354  {
3355 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3356  data -= vec.data;
3357 # else
3358  data = _mm_sub_pd(data, vec.data);
3359 # endif
3360  return *this;
3361  }
3362 
3367  VectorizedArray &
3368  operator*=(const VectorizedArray &vec)
3369  {
3370 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3371  data *= vec.data;
3372 # else
3373  data = _mm_mul_pd(data, vec.data);
3374 # endif
3375  return *this;
3376  }
3377 
3382  VectorizedArray &
3383  operator/=(const VectorizedArray &vec)
3384  {
3385 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3386  data /= vec.data;
3387 # else
3388  data = _mm_div_pd(data, vec.data);
3389 # endif
3390  return *this;
3391  }
3392 
3399  void
3400  load(const double *ptr)
3401  {
3402  data = _mm_loadu_pd(ptr);
3403  }
3404 
3412  void
3413  store(double *ptr) const
3414  {
3415  _mm_storeu_pd(ptr, data);
3416  }
3417 
3423  void
3424  streaming_store(double *ptr) const
3425  {
3426  Assert(reinterpret_cast<std::size_t>(ptr) % 16 == 0,
3427  ExcMessage("Memory not aligned"));
3428  _mm_stream_pd(ptr, data);
3429  }
3430 
3444  void
3445  gather(const double *base_ptr, const unsigned int *offsets)
3446  {
3447  for (unsigned int i = 0; i < 2; ++i)
3448  *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
3449  }
3450 
3464  void
3465  scatter(const unsigned int *offsets, double *base_ptr) const
3466  {
3467  for (unsigned int i = 0; i < 2; ++i)
3468  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
3469  }
3470 
3476  __m128d data;
3477 
3478 private:
3485  get_sqrt() const
3486  {
3487  VectorizedArray res;
3488  res.data = _mm_sqrt_pd(data);
3489  return res;
3490  }
3491 
3498  get_abs() const
3499  {
3500  // to compute the absolute value, perform
3501  // bitwise andnot with -0. This will leave all
3502  // value and exponent bits unchanged but force
3503  // the sign value to +.
3504  __m128d mask = _mm_set1_pd(-0.);
3505  VectorizedArray res;
3506  res.data = _mm_andnot_pd(mask, data);
3507  return res;
3508  }
3509 
3516  get_max(const VectorizedArray &other) const
3517  {
3518  VectorizedArray res;
3519  res.data = _mm_max_pd(data, other.data);
3520  return res;
3521  }
3522 
3529  get_min(const VectorizedArray &other) const
3530  {
3531  VectorizedArray res;
3532  res.data = _mm_min_pd(data, other.data);
3533  return res;
3534  }
3535 
3536  // Make a few functions friends.
3537  template <typename Number2, std::size_t width2>
3540  template <typename Number2, std::size_t width2>
3543  template <typename Number2, std::size_t width2>
3547  template <typename Number2, std::size_t width2>
3551 };
3552 
3553 
3554 
3558 template <>
3559 inline DEAL_II_ALWAYS_INLINE void
3560 vectorized_load_and_transpose(const unsigned int n_entries,
3561  const double * in,
3562  const unsigned int * offsets,
3564 {
3565  const unsigned int n_chunks = n_entries / 2;
3566  for (unsigned int i = 0; i < n_chunks; ++i)
3567  {
3568  __m128d u0 = _mm_loadu_pd(in + 2 * i + offsets[0]);
3569  __m128d u1 = _mm_loadu_pd(in + 2 * i + offsets[1]);
3570  out[2 * i + 0].data = _mm_unpacklo_pd(u0, u1);
3571  out[2 * i + 1].data = _mm_unpackhi_pd(u0, u1);
3572  }
3573 
3574  // remainder loop of work that does not divide by 2
3575  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3576  for (unsigned int v = 0; v < 2; ++v)
3577  out[i][v] = in[offsets[v] + i];
3578 }
3579 
3580 
3581 
3585 template <>
3586 inline DEAL_II_ALWAYS_INLINE void
3587 vectorized_load_and_transpose(const unsigned int n_entries,
3588  const std::array<double *, 2> &in,
3590 {
3591  // see the comments in the vectorized_load_and_transpose above
3592 
3593  const unsigned int n_chunks = n_entries / 2;
3594  for (unsigned int i = 0; i < n_chunks; ++i)
3595  {
3596  __m128d u0 = _mm_loadu_pd(in[0] + 2 * i);
3597  __m128d u1 = _mm_loadu_pd(in[1] + 2 * i);
3598  out[2 * i + 0].data = _mm_unpacklo_pd(u0, u1);
3599  out[2 * i + 1].data = _mm_unpackhi_pd(u0, u1);
3600  }
3601 
3602  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3603  for (unsigned int v = 0; v < 2; ++v)
3604  out[i][v] = in[v][i];
3605 }
3606 
3607 
3608 
3612 template <>
3613 inline DEAL_II_ALWAYS_INLINE void
3614 vectorized_transpose_and_store(const bool add_into,
3615  const unsigned int n_entries,
3616  const VectorizedArray<double, 2> *in,
3617  const unsigned int * offsets,
3618  double * out)
3619 {
3620  const unsigned int n_chunks = n_entries / 2;
3621  if (add_into)
3622  {
3623  for (unsigned int i = 0; i < n_chunks; ++i)
3624  {
3625  __m128d u0 = in[2 * i + 0].data;
3626  __m128d u1 = in[2 * i + 1].data;
3627  __m128d res0 = _mm_unpacklo_pd(u0, u1);
3628  __m128d res1 = _mm_unpackhi_pd(u0, u1);
3629  _mm_storeu_pd(out + 2 * i + offsets[0],
3630  _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[0]),
3631  res0));
3632  _mm_storeu_pd(out + 2 * i + offsets[1],
3633  _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[1]),
3634  res1));
3635  }
3636  // remainder loop of work that does not divide by 2
3637  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3638  for (unsigned int v = 0; v < 2; ++v)
3639  out[offsets[v] + i] += in[i][v];
3640  }
3641  else
3642  {
3643  for (unsigned int i = 0; i < n_chunks; ++i)
3644  {
3645  __m128d u0 = in[2 * i + 0].data;
3646  __m128d u1 = in[2 * i + 1].data;
3647  __m128d res0 = _mm_unpacklo_pd(u0, u1);
3648  __m128d res1 = _mm_unpackhi_pd(u0, u1);
3649  _mm_storeu_pd(out + 2 * i + offsets[0], res0);
3650  _mm_storeu_pd(out + 2 * i + offsets[1], res1);
3651  }
3652  // remainder loop of work that does not divide by 2
3653  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3654  for (unsigned int v = 0; v < 2; ++v)
3655  out[offsets[v] + i] = in[i][v];
3656  }
3657 }
3658 
3659 
3660 
3664 template <>
3665 inline DEAL_II_ALWAYS_INLINE void
3666 vectorized_transpose_and_store(const bool add_into,
3667  const unsigned int n_entries,
3668  const VectorizedArray<double, 2> *in,
3669  std::array<double *, 2> & out)
3670 {
3671  // see the comments in the vectorized_transpose_and_store above
3672 
3673  const unsigned int n_chunks = n_entries / 2;
3674  if (add_into)
3675  {
3676  for (unsigned int i = 0; i < n_chunks; ++i)
3677  {
3678  __m128d u0 = in[2 * i + 0].data;
3679  __m128d u1 = in[2 * i + 1].data;
3680  __m128d res0 = _mm_unpacklo_pd(u0, u1);
3681  __m128d res1 = _mm_unpackhi_pd(u0, u1);
3682  _mm_storeu_pd(out[0] + 2 * i,
3683  _mm_add_pd(_mm_loadu_pd(out[0] + 2 * i), res0));
3684  _mm_storeu_pd(out[1] + 2 * i,
3685  _mm_add_pd(_mm_loadu_pd(out[1] + 2 * i), res1));
3686  }
3687 
3688  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3689  for (unsigned int v = 0; v < 2; ++v)
3690  out[v][i] += in[i][v];
3691  }
3692  else
3693  {
3694  for (unsigned int i = 0; i < n_chunks; ++i)
3695  {
3696  __m128d u0 = in[2 * i + 0].data;
3697  __m128d u1 = in[2 * i + 1].data;
3698  __m128d res0 = _mm_unpacklo_pd(u0, u1);
3699  __m128d res1 = _mm_unpackhi_pd(u0, u1);
3700  _mm_storeu_pd(out[0] + 2 * i, res0);
3701  _mm_storeu_pd(out[1] + 2 * i, res1);
3702  }
3703 
3704  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3705  for (unsigned int v = 0; v < 2; ++v)
3706  out[v][i] = in[i][v];
3707  }
3708 }
3709 
3710 
3711 
3715 template <>
3716 class VectorizedArray<float, 4>
3717  : public VectorizedArrayBase<VectorizedArray<float, 4>, 4>
3718 {
3719 public:
3723  using value_type = float;
3724 
3733  VectorizedArray() = default;
3734 
3738  VectorizedArray(const float scalar)
3739  {
3740  this->operator=(scalar);
3741  }
3742 
3746  template <typename U>
3747  VectorizedArray(const std::initializer_list<U> &list)
3748  : VectorizedArrayBase<VectorizedArray<float, 4>, 4>(list)
3749  {}
3750 
3752  VectorizedArray &
3753  operator=(const float x)
3754  {
3755  data = _mm_set1_ps(x);
3756  return *this;
3757  }
3758 
3763  float &
3764  operator[](const unsigned int comp)
3765  {
3766  AssertIndexRange(comp, 4);
3767  return *(reinterpret_cast<float *>(&data) + comp);
3768  }
3769 
3774  const float &
3775  operator[](const unsigned int comp) const
3776  {
3777  AssertIndexRange(comp, 4);
3778  return *(reinterpret_cast<const float *>(&data) + comp);
3779  }
3780 
3785  VectorizedArray &
3786  operator+=(const VectorizedArray &vec)
3787  {
3788 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3789  data += vec.data;
3790 # else
3791  data = _mm_add_ps(data, vec.data);
3792 # endif
3793  return *this;
3794  }
3795 
3800  VectorizedArray &
3801  operator-=(const VectorizedArray &vec)
3802  {
3803 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3804  data -= vec.data;
3805 # else
3806  data = _mm_sub_ps(data, vec.data);
3807 # endif
3808  return *this;
3809  }
3810 
3815  VectorizedArray &
3816  operator*=(const VectorizedArray &vec)
3817  {
3818 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3819  data *= vec.data;
3820 # else
3821  data = _mm_mul_ps(data, vec.data);
3822 # endif
3823  return *this;
3824  }
3825 
3830  VectorizedArray &
3831  operator/=(const VectorizedArray &vec)
3832  {
3833 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3834  data /= vec.data;
3835 # else
3836  data = _mm_div_ps(data, vec.data);
3837 # endif
3838  return *this;
3839  }
3840 
3847  void
3848  load(const float *ptr)
3849  {
3850  data = _mm_loadu_ps(ptr);
3851  }
3852 
3860  void
3861  store(float *ptr) const
3862  {
3863  _mm_storeu_ps(ptr, data);
3864  }
3865 
3871  void
3872  streaming_store(float *ptr) const
3873  {
3874  Assert(reinterpret_cast<std::size_t>(ptr) % 16 == 0,
3875  ExcMessage("Memory not aligned"));
3876  _mm_stream_ps(ptr, data);
3877  }
3878 
3892  void
3893  gather(const float *base_ptr, const unsigned int *offsets)
3894  {
3895  for (unsigned int i = 0; i < 4; ++i)
3896  *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
3897  }
3898 
3912  void
3913  scatter(const unsigned int *offsets, float *base_ptr) const
3914  {
3915  for (unsigned int i = 0; i < 4; ++i)
3916  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
3917  }
3918 
3924  __m128 data;
3925 
3926 private:
3933  get_sqrt() const
3934  {
3935  VectorizedArray res;
3936  res.data = _mm_sqrt_ps(data);
3937  return res;
3938  }
3939 
3946  get_abs() const
3947  {
3948  // to compute the absolute value, perform bitwise andnot with -0. This
3949  // will leave all value and exponent bits unchanged but force the sign
3950  // value to +.
3951  __m128 mask = _mm_set1_ps(-0.f);
3952  VectorizedArray res;
3953  res.data = _mm_andnot_ps(mask, data);
3954  return res;
3955  }
3956 
3963  get_max(const VectorizedArray &other) const
3964  {
3965  VectorizedArray res;
3966  res.data = _mm_max_ps(data, other.data);
3967  return res;
3968  }
3969 
3976  get_min(const VectorizedArray &other) const
3977  {
3978  VectorizedArray res;
3979  res.data = _mm_min_ps(data, other.data);
3980  return res;
3981  }
3982 
3983  // Make a few functions friends.
3984  template <typename Number2, std::size_t width2>
3987  template <typename Number2, std::size_t width2>
3990  template <typename Number2, std::size_t width2>
3994  template <typename Number2, std::size_t width2>
3998 };
3999 
4000 
4001 
4005 template <>
4006 inline DEAL_II_ALWAYS_INLINE void
4007 vectorized_load_and_transpose(const unsigned int n_entries,
4008  const float * in,
4009  const unsigned int * offsets,
4011 {
4012  const unsigned int n_chunks = n_entries / 4;
4013  for (unsigned int i = 0; i < n_chunks; ++i)
4014  {
4015  __m128 u0 = _mm_loadu_ps(in + 4 * i + offsets[0]);
4016  __m128 u1 = _mm_loadu_ps(in + 4 * i + offsets[1]);
4017  __m128 u2 = _mm_loadu_ps(in + 4 * i + offsets[2]);
4018  __m128 u3 = _mm_loadu_ps(in + 4 * i + offsets[3]);
4019  __m128 v0 = _mm_shuffle_ps(u0, u1, 0x44);
4020  __m128 v1 = _mm_shuffle_ps(u0, u1, 0xee);
4021  __m128 v2 = _mm_shuffle_ps(u2, u3, 0x44);
4022  __m128 v3 = _mm_shuffle_ps(u2, u3, 0xee);
4023  out[4 * i + 0].data = _mm_shuffle_ps(v0, v2, 0x88);
4024  out[4 * i + 1].data = _mm_shuffle_ps(v0, v2, 0xdd);
4025  out[4 * i + 2].data = _mm_shuffle_ps(v1, v3, 0x88);
4026  out[4 * i + 3].data = _mm_shuffle_ps(v1, v3, 0xdd);
4027  }
4028 
4029  // remainder loop of work that does not divide by 4
4030  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4031  for (unsigned int v = 0; v < 4; ++v)
4032  out[i][v] = in[offsets[v] + i];
4033 }
4034 
4035 
4036 
4040 template <>
4041 inline DEAL_II_ALWAYS_INLINE void
4042 vectorized_load_and_transpose(const unsigned int n_entries,
4043  const std::array<float *, 4> &in,
4045 {
4046  // see the comments in the vectorized_load_and_transpose above
4047 
4048  const unsigned int n_chunks = n_entries / 4;
4049  for (unsigned int i = 0; i < n_chunks; ++i)
4050  {
4051  __m128 u0 = _mm_loadu_ps(in[0] + 4 * i);
4052  __m128 u1 = _mm_loadu_ps(in[1] + 4 * i);
4053  __m128 u2 = _mm_loadu_ps(in[2] + 4 * i);
4054  __m128 u3 = _mm_loadu_ps(in[3] + 4 * i);
4055  __m128 v0 = _mm_shuffle_ps(u0, u1, 0x44);
4056  __m128 v1 = _mm_shuffle_ps(u0, u1, 0xee);
4057  __m128 v2 = _mm_shuffle_ps(u2, u3, 0x44);
4058  __m128 v3 = _mm_shuffle_ps(u2, u3, 0xee);
4059  out[4 * i + 0].data = _mm_shuffle_ps(v0, v2, 0x88);
4060  out[4 * i + 1].data = _mm_shuffle_ps(v0, v2, 0xdd);
4061  out[4 * i + 2].data = _mm_shuffle_ps(v1, v3, 0x88);
4062  out[4 * i + 3].data = _mm_shuffle_ps(v1, v3, 0xdd);
4063  }
4064 
4065  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4066  for (unsigned int v = 0; v < 4; ++v)
4067  out[i][v] = in[v][i];
4068 }
4069 
4070 
4071 
4075 template <>
4076 inline DEAL_II_ALWAYS_INLINE void
4077 vectorized_transpose_and_store(const bool add_into,
4078  const unsigned int n_entries,
4079  const VectorizedArray<float, 4> *in,
4080  const unsigned int * offsets,
4081  float * out)
4082 {
4083  const unsigned int n_chunks = n_entries / 4;
4084  for (unsigned int i = 0; i < n_chunks; ++i)
4085  {
4086  __m128 u0 = in[4 * i + 0].data;
4087  __m128 u1 = in[4 * i + 1].data;
4088  __m128 u2 = in[4 * i + 2].data;
4089  __m128 u3 = in[4 * i + 3].data;
4090  __m128 t0 = _mm_shuffle_ps(u0, u1, 0x44);
4091  __m128 t1 = _mm_shuffle_ps(u0, u1, 0xee);
4092  __m128 t2 = _mm_shuffle_ps(u2, u3, 0x44);
4093  __m128 t3 = _mm_shuffle_ps(u2, u3, 0xee);
4094  u0 = _mm_shuffle_ps(t0, t2, 0x88);
4095  u1 = _mm_shuffle_ps(t0, t2, 0xdd);
4096  u2 = _mm_shuffle_ps(t1, t3, 0x88);
4097  u3 = _mm_shuffle_ps(t1, t3, 0xdd);
4098 
4099  // Cannot use the same store instructions in both paths of the 'if'
4100  // because the compiler cannot know that there is no aliasing between
4101  // pointers
4102  if (add_into)
4103  {
4104  u0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), u0);
4105  _mm_storeu_ps(out + 4 * i + offsets[0], u0);
4106  u1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), u1);
4107  _mm_storeu_ps(out + 4 * i + offsets[1], u1);
4108  u2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), u2);
4109  _mm_storeu_ps(out + 4 * i + offsets[2], u2);
4110  u3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), u3);
4111  _mm_storeu_ps(out + 4 * i + offsets[3], u3);
4112  }
4113  else
4114  {
4115  _mm_storeu_ps(out + 4 * i + offsets[0], u0);
4116  _mm_storeu_ps(out + 4 * i + offsets[1], u1);
4117  _mm_storeu_ps(out + 4 * i + offsets[2], u2);
4118  _mm_storeu_ps(out + 4 * i + offsets[3], u3);
4119  }
4120  }
4121 
4122  // remainder loop of work that does not divide by 4
4123  if (add_into)
4124  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4125  for (unsigned int v = 0; v < 4; ++v)
4126  out[offsets[v] + i] += in[i][v];
4127  else
4128  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4129  for (unsigned int v = 0; v < 4; ++v)
4130  out[offsets[v] + i] = in[i][v];
4131 }
4132 
4133 
4134 
4138 template <>
4139 inline DEAL_II_ALWAYS_INLINE void
4140 vectorized_transpose_and_store(const bool add_into,
4141  const unsigned int n_entries,
4142  const VectorizedArray<float, 4> *in,
4143  std::array<float *, 4> & out)
4144 {
4145  // see the comments in the vectorized_transpose_and_store above
4146 
4147  const unsigned int n_chunks = n_entries / 4;
4148  for (unsigned int i = 0; i < n_chunks; ++i)
4149  {
4150  __m128 u0 = in[4 * i + 0].data;
4151  __m128 u1 = in[4 * i + 1].data;
4152  __m128 u2 = in[4 * i + 2].data;
4153  __m128 u3 = in[4 * i + 3].data;
4154  __m128 t0 = _mm_shuffle_ps(u0, u1, 0x44);
4155  __m128 t1 = _mm_shuffle_ps(u0, u1, 0xee);
4156  __m128 t2 = _mm_shuffle_ps(u2, u3, 0x44);
4157  __m128 t3 = _mm_shuffle_ps(u2, u3, 0xee);
4158  u0 = _mm_shuffle_ps(t0, t2, 0x88);
4159  u1 = _mm_shuffle_ps(t0, t2, 0xdd);
4160  u2 = _mm_shuffle_ps(t1, t3, 0x88);
4161  u3 = _mm_shuffle_ps(t1, t3, 0xdd);
4162 
4163  if (add_into)
4164  {
4165  u0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), u0);
4166  _mm_storeu_ps(out[0] + 4 * i, u0);
4167  u1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), u1);
4168  _mm_storeu_ps(out[1] + 4 * i, u1);
4169  u2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), u2);
4170  _mm_storeu_ps(out[2] + 4 * i, u2);
4171  u3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), u3);
4172  _mm_storeu_ps(out[3] + 4 * i, u3);
4173  }
4174  else
4175  {
4176  _mm_storeu_ps(out[0] + 4 * i, u0);
4177  _mm_storeu_ps(out[1] + 4 * i, u1);
4178  _mm_storeu_ps(out[2] + 4 * i, u2);
4179  _mm_storeu_ps(out[3] + 4 * i, u3);
4180  }
4181  }
4182 
4183  if (add_into)
4184  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4185  for (unsigned int v = 0; v < 4; ++v)
4186  out[v][i] += in[i][v];
4187  else
4188  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4189  for (unsigned int v = 0; v < 4; ++v)
4190  out[v][i] = in[i][v];
4191 }
4192 
4193 
4194 
4195 # endif // if DEAL_II_VECTORIZATION_WIDTH_IN_BITS > 0 && defined(__SSE2__)
4196 
4197 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__ALTIVEC__) && \
4198  defined(__VSX__)
4199 
4200 template <>
4201 class VectorizedArray<double, 2>
4202  : public VectorizedArrayBase<VectorizedArray<double, 2>, 2>
4203 {
4204 public:
4208  using value_type = double;
4209 
4214  VectorizedArray() = default;
4215 
4219  VectorizedArray(const double scalar)
4220  {
4221  this->operator=(scalar);
4222  }
4223 
4227  template <typename U>
4228  VectorizedArray(const std::initializer_list<U> &list)
4230  {}
4231 
4236  VectorizedArray &
4237  operator=(const double x)
4238  {
4239  data = vec_splats(x);
4240 
4241  // Some compilers believe that vec_splats sets 'x', but that's not true.
4242  // They then warn about setting a variable and not using it. Suppress the
4243  // warning by "using" the variable:
4244  (void)x;
4245  return *this;
4246  }
4247 
4252  double &
4253  operator[](const unsigned int comp)
4254  {
4255  AssertIndexRange(comp, 2);
4256  return *(reinterpret_cast<double *>(&data) + comp);
4257  }
4258 
4263  const double &
4264  operator[](const unsigned int comp) const
4265  {
4266  AssertIndexRange(comp, 2);
4267  return *(reinterpret_cast<const double *>(&data) + comp);
4268  }
4269 
4274  VectorizedArray &
4275  operator+=(const VectorizedArray &vec)
4276  {
4277  data = vec_add(data, vec.data);
4278  return *this;
4279  }
4280 
4285  VectorizedArray &
4286  operator-=(const VectorizedArray &vec)
4287  {
4288  data = vec_sub(data, vec.data);
4289  return *this;
4290  }
4291 
4296  VectorizedArray &
4297  operator*=(const VectorizedArray &vec)
4298  {
4299  data = vec_mul(data, vec.data);
4300  return *this;
4301  }
4302 
4307  VectorizedArray &
4308  operator/=(const VectorizedArray &vec)
4309  {
4310  data = vec_div(data, vec.data);
4311  return *this;
4312  }
4313 
4319  void
4320  load(const double *ptr)
4321  {
4322  data = vec_vsx_ld(0, ptr);
4323  }
4324 
4330  void
4331  store(double *ptr) const
4332  {
4333  vec_vsx_st(data, 0, ptr);
4334  }
4335 
4340  void
4341  streaming_store(double *ptr) const
4342  {
4343  store(ptr);
4344  }
4345 
4350  void
4351  gather(const double *base_ptr, const unsigned int *offsets)
4352  {
4353  for (unsigned int i = 0; i < 2; ++i)
4354  *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
4355  }
4356 
4361  void
4362  scatter(const unsigned int *offsets, double *base_ptr) const
4363  {
4364  for (unsigned int i = 0; i < 2; ++i)
4365  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
4366  }
4367 
4373  __vector double data;
4374 
4375 private:
4382  get_sqrt() const
4383  {
4384  VectorizedArray res;
4385  res.data = vec_sqrt(data);
4386  return res;
4387  }
4388 
4395  get_abs() const
4396  {
4397  VectorizedArray res;
4398  res.data = vec_abs(data);
4399  return res;
4400  }
4401 
4408  get_max(const VectorizedArray &other) const
4409  {
4410  VectorizedArray res;
4411  res.data = vec_max(data, other.data);
4412  return res;
4413  }
4414 
4421  get_min(const VectorizedArray &other) const
4422  {
4423  VectorizedArray res;
4424  res.data = vec_min(data, other.data);
4425  return res;
4426  }
4427 
4428  // Make a few functions friends.
4429  template <typename Number2, std::size_t width2>
4432  template <typename Number2, std::size_t width2>
4435  template <typename Number2, std::size_t width2>
4439  template <typename Number2, std::size_t width2>
4443 };
4444 
4445 
4446 
4447 template <>
4448 class VectorizedArray<float, 4>
4449  : public VectorizedArrayBase<VectorizedArray<float, 4>, 4>
4450 {
4451 public:
4455  using value_type = float;
4456 
4461  VectorizedArray() = default;
4462 
4466  VectorizedArray(const float scalar)
4467  {
4468  this->operator=(scalar);
4469  }
4470 
4474  template <typename U>
4475  VectorizedArray(const std::initializer_list<U> &list)
4476  : VectorizedArrayBase<VectorizedArray<float, 4>, 4>(list)
4477  {}
4478 
4483  VectorizedArray &
4484  operator=(const float x)
4485  {
4486  data = vec_splats(x);
4487 
4488  // Some compilers believe that vec_splats sets 'x', but that's not true.
4489  // They then warn about setting a variable and not using it. Suppress the
4490  // warning by "using" the variable:
4491  (void)x;
4492  return *this;
4493  }
4494 
4499  float &
4500  operator[](const unsigned int comp)
4501  {
4502  AssertIndexRange(comp, 4);
4503  return *(reinterpret_cast<float *>(&data) + comp);
4504  }
4505 
4510  const float &
4511  operator[](const unsigned int comp) const
4512  {
4513  AssertIndexRange(comp, 4);
4514  return *(reinterpret_cast<const float *>(&data) + comp);
4515  }
4516 
4521  VectorizedArray &
4522  operator+=(const VectorizedArray &vec)
4523  {
4524  data = vec_add(data, vec.data);
4525  return *this;
4526  }
4527 
4532  VectorizedArray &
4533  operator-=(const VectorizedArray &vec)
4534  {
4535  data = vec_sub(data, vec.data);
4536  return *this;
4537  }
4538 
4543  VectorizedArray &
4544  operator*=(const VectorizedArray &vec)
4545  {
4546  data = vec_mul(data, vec.data);
4547  return *this;
4548  }
4549 
4554  VectorizedArray &
4555  operator/=(const VectorizedArray &vec)
4556  {
4557  data = vec_div(data, vec.data);
4558  return *this;
4559  }
4560 
4566  void
4567  load(const float *ptr)
4568  {
4569  data = vec_vsx_ld(0, ptr);
4570  }
4571 
4577  void
4578  store(float *ptr) const
4579  {
4580  vec_vsx_st(data, 0, ptr);
4581  }
4582 
4587  void
4588  streaming_store(float *ptr) const
4589  {
4590  store(ptr);
4591  }
4592 
4597  void
4598  gather(const float *base_ptr, const unsigned int *offsets)
4599  {
4600  for (unsigned int i = 0; i < 4; ++i)
4601  *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
4602  }
4603 
4608  void
4609  scatter(const unsigned int *offsets, float *base_ptr) const
4610  {
4611  for (unsigned int i = 0; i < 4; ++i)
4612  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
4613  }
4614 
4620  __vector float data;
4621 
4622 private:
4629  get_sqrt() const
4630  {
4631  VectorizedArray res;
4632  res.data = vec_sqrt(data);
4633  return res;
4634  }
4635 
4642  get_abs() const
4643  {
4644  VectorizedArray res;
4645  res.data = vec_abs(data);
4646  return res;
4647  }
4648 
4655  get_max(const VectorizedArray &other) const
4656  {
4657  VectorizedArray res;
4658  res.data = vec_max(data, other.data);
4659  return res;
4660  }
4661 
4668  get_min(const VectorizedArray &other) const
4669  {
4670  VectorizedArray res;
4671  res.data = vec_min(data, other.data);
4672  return res;
4673  }
4674 
4675  // Make a few functions friends.
4676  template <typename Number2, std::size_t width2>
4679  template <typename Number2, std::size_t width2>
4682  template <typename Number2, std::size_t width2>
4686  template <typename Number2, std::size_t width2>
4690 };
4691 
4692 # endif // if DEAL_II_VECTORIZATION_LEVEL >=1 && defined(__ALTIVEC__) &&
4693  // defined(__VSX__)
4694 
4695 
4696 #endif // DOXYGEN
4697 
4702 
4708 template <typename Number, std::size_t width>
4709 inline DEAL_II_ALWAYS_INLINE bool
4711  const VectorizedArray<Number, width> &rhs)
4712 {
4713  for (unsigned int i = 0; i < VectorizedArray<Number, width>::size(); ++i)
4714  if (lhs[i] != rhs[i])
4715  return false;
4716 
4717  return true;
4718 }
4719 
4720 
4726 template <typename Number, std::size_t width>
4730 {
4732  return tmp += v;
4733 }
4734 
4740 template <typename Number, std::size_t width>
4744 {
4746  return tmp -= v;
4747 }
4748 
4754 template <typename Number, std::size_t width>
4758 {
4760  return tmp *= v;
4761 }
4762 
4768 template <typename Number, std::size_t width>
4772 {
4774  return tmp /= v;
4775 }
4776 
4783 template <typename Number, std::size_t width>
4785 operator+(const Number &u, const VectorizedArray<Number, width> &v)
4786 {
4788  return tmp += v;
4789 }
4790 
4799 template <std::size_t width>
4801 operator+(const double u, const VectorizedArray<float, width> &v)
4802 {
4804  return tmp += v;
4805 }
4806 
4813 template <typename Number, std::size_t width>
4815 operator+(const VectorizedArray<Number, width> &v, const Number &u)
4816 {
4817  return u + v;
4818 }
4819 
4828 template <std::size_t width>
4830 operator+(const VectorizedArray<float, width> &v, const double u)
4831 {
4832  return u + v;
4833 }
4834 
4841 template <typename Number, std::size_t width>
4843 operator-(const Number &u, const VectorizedArray<Number, width> &v)
4844 {
4846  return tmp -= v;
4847 }
4848 
4857 template <std::size_t width>
4859 operator-(const double u, const VectorizedArray<float, width> &v)
4860 {
4861  VectorizedArray<float, width> tmp = static_cast<float>(u);
4862  return tmp -= v;
4863 }
4864 
4871 template <typename Number, std::size_t width>
4873 operator-(const VectorizedArray<Number, width> &v, const Number &u)
4874 {
4876  return v - tmp;
4877 }
4878 
4887 template <std::size_t width>
4889 operator-(const VectorizedArray<float, width> &v, const double u)
4890 {
4891  VectorizedArray<float, width> tmp = static_cast<float>(u);
4892  return v - tmp;
4893 }
4894 
4901 template <typename Number, std::size_t width>
4903 operator*(const Number &u, const VectorizedArray<Number, width> &v)
4904 {
4906  return tmp *= v;
4907 }
4908 
4917 template <std::size_t width>
4919 operator*(const double u, const VectorizedArray<float, width> &v)
4920 {
4921  VectorizedArray<float, width> tmp = static_cast<float>(u);
4922  return tmp *= v;
4923 }
4924 
4931 template <typename Number, std::size_t width>
4933 operator*(const VectorizedArray<Number, width> &v, const Number &u)
4934 {
4935  return u * v;
4936 }
4937 
4946 template <std::size_t width>
4948 operator*(const VectorizedArray<float, width> &v, const double u)
4949 {
4950  return u * v;
4951 }
4952 
4959 template <typename Number, std::size_t width>
4961 operator/(const Number &u, const VectorizedArray<Number, width> &v)
4962 {
4964  return tmp /= v;
4965 }
4966 
4975 template <std::size_t width>
4977 operator/(const double u, const VectorizedArray<float, width> &v)
4978 {
4979  VectorizedArray<float, width> tmp = static_cast<float>(u);
4980  return tmp /= v;
4981 }
4982 
4989 template <typename Number, std::size_t width>
4991 operator/(const VectorizedArray<Number, width> &v, const Number &u)
4992 {
4994  return v / tmp;
4995 }
4996 
5005 template <std::size_t width>
5007 operator/(const VectorizedArray<float, width> &v, const double u)
5008 {
5009  VectorizedArray<float, width> tmp = static_cast<float>(u);
5010  return v / tmp;
5011 }
5012 
5018 template <typename Number, std::size_t width>
5021 {
5022  return u;
5023 }
5024 
5030 template <typename Number, std::size_t width>
5033 {
5034  // to get a negative sign, subtract the input from zero (could also
5035  // multiply by -1, but this one is slightly simpler)
5036  return VectorizedArray<Number, width>() - u;
5037 }
5038 
5044 template <typename Number, std::size_t width>
5045 inline std::ostream &
5046 operator<<(std::ostream &out, const VectorizedArray<Number, width> &p)
5047 {
5048  constexpr unsigned int n = VectorizedArray<Number, width>::size();
5049  for (unsigned int i = 0; i < n - 1; ++i)
5050  out << p[i] << ' ';
5051  out << p[n - 1];
5052 
5053  return out;
5054 }
5055 
5057 
5062 
5063 
5071 enum class SIMDComparison : int
5072 {
5073 #if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
5074  equal = _CMP_EQ_OQ,
5075  not_equal = _CMP_NEQ_OQ,
5076  less_than = _CMP_LT_OQ,
5077  less_than_or_equal = _CMP_LE_OQ,
5078  greater_than = _CMP_GT_OQ,
5079  greater_than_or_equal = _CMP_GE_OQ
5080 #else
5081  equal,
5082  not_equal,
5083  less_than,
5085  greater_than,
5087 #endif
5088 };
5089 
5090 
5154 template <SIMDComparison predicate, typename Number>
5155 DEAL_II_ALWAYS_INLINE inline Number
5156 compare_and_apply_mask(const Number &left,
5157  const Number &right,
5158  const Number &true_value,
5159  const Number &false_value)
5160 {
5161  bool mask;
5162  switch (predicate)
5163  {
5164  case SIMDComparison::equal:
5165  mask = (left == right);
5166  break;
5168  mask = (left != right);
5169  break;
5171  mask = (left < right);
5172  break;
5174  mask = (left <= right);
5175  break;
5177  mask = (left > right);
5178  break;
5180  mask = (left >= right);
5181  break;
5182  }
5183 
5184  return mask ? true_value : false_value;
5185 }
5186 
5187 
5192 template <SIMDComparison predicate, typename Number>
5195  const VectorizedArray<Number, 1> &right,
5196  const VectorizedArray<Number, 1> &true_value,
5197  const VectorizedArray<Number, 1> &false_value)
5198 {
5200  result.data = compare_and_apply_mask<predicate, Number>(left.data,
5201  right.data,
5202  true_value.data,
5203  false_value.data);
5204  return result;
5205 }
5206 
5208 
5209 #ifndef DOXYGEN
5210 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && defined(__AVX512F__)
5211 
5212 template <SIMDComparison predicate>
5215  const VectorizedArray<float, 16> &right,
5216  const VectorizedArray<float, 16> &true_values,
5217  const VectorizedArray<float, 16> &false_values)
5218 {
5219  const __mmask16 mask =
5220  _mm512_cmp_ps_mask(left.data, right.data, static_cast<int>(predicate));
5222  result.data = _mm512_mask_mov_ps(false_values.data, mask, true_values.data);
5223  return result;
5224 }
5225 
5226 
5227 
5228 template <SIMDComparison predicate>
5231  const VectorizedArray<double, 8> &right,
5232  const VectorizedArray<double, 8> &true_values,
5233  const VectorizedArray<double, 8> &false_values)
5234 {
5235  const __mmask16 mask =
5236  _mm512_cmp_pd_mask(left.data, right.data, static_cast<int>(predicate));
5238  result.data = _mm512_mask_mov_pd(false_values.data, mask, true_values.data);
5239  return result;
5240 }
5241 
5242 # endif
5243 
5244 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
5245 
5246 template <SIMDComparison predicate>
5249  const VectorizedArray<float, 8> &right,
5250  const VectorizedArray<float, 8> &true_values,
5251  const VectorizedArray<float, 8> &false_values)
5252 {
5253  const auto mask =
5254  _mm256_cmp_ps(left.data, right.data, static_cast<int>(predicate));
5255 
5257  result.data = _mm256_blendv_ps(false_values.data, true_values.data, mask);
5258  return result;
5259 }
5260 
5261 
5262 template <SIMDComparison predicate>
5265  const VectorizedArray<double, 4> &right,
5266  const VectorizedArray<double, 4> &true_values,
5267  const VectorizedArray<double, 4> &false_values)
5268 {
5269  const auto mask =
5270  _mm256_cmp_pd(left.data, right.data, static_cast<int>(predicate));
5271 
5273  result.data = _mm256_blendv_pd(false_values.data, true_values.data, mask);
5274  return result;
5275 }
5276 
5277 # endif
5278 
5279 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__SSE2__)
5280 
5281 template <SIMDComparison predicate>
5284  const VectorizedArray<float, 4> &right,
5285  const VectorizedArray<float, 4> &true_values,
5286  const VectorizedArray<float, 4> &false_values)
5287 {
5288  __m128 mask;
5289  switch (predicate)
5290  {
5291  case SIMDComparison::equal:
5292  mask = _mm_cmpeq_ps(left.data, right.data);
5293  break;
5295  mask = _mm_cmpneq_ps(left.data, right.data);
5296  break;
5298  mask = _mm_cmplt_ps(left.data, right.data);
5299  break;
5301  mask = _mm_cmple_ps(left.data, right.data);
5302  break;
5304  mask = _mm_cmpgt_ps(left.data, right.data);
5305  break;
5307  mask = _mm_cmpge_ps(left.data, right.data);
5308  break;
5309  }
5310 
5312  result.data = _mm_or_ps(_mm_and_ps(mask, true_values.data),
5313  _mm_andnot_ps(mask, false_values.data));
5314 
5315  return result;
5316 }
5317 
5318 
5319 template <SIMDComparison predicate>
5322  const VectorizedArray<double, 2> &right,
5323  const VectorizedArray<double, 2> &true_values,
5324  const VectorizedArray<double, 2> &false_values)
5325 {
5326  __m128d mask;
5327  switch (predicate)
5328  {
5329  case SIMDComparison::equal:
5330  mask = _mm_cmpeq_pd(left.data, right.data);
5331  break;
5333  mask = _mm_cmpneq_pd(left.data, right.data);
5334  break;
5336  mask = _mm_cmplt_pd(left.data, right.data);
5337  break;
5339  mask = _mm_cmple_pd(left.data, right.data);
5340  break;
5342  mask = _mm_cmpgt_pd(left.data, right.data);
5343  break;
5345  mask = _mm_cmpge_pd(left.data, right.data);
5346  break;
5347  }
5348 
5350  result.data = _mm_or_pd(_mm_and_pd(mask, true_values.data),
5351  _mm_andnot_pd(mask, false_values.data));
5352 
5353  return result;
5354 }
5355 
5356 # endif
5357 #endif // DOXYGEN
5358 
5359 
5360 namespace internal
5361 {
5362  template <typename T>
5364  {
5365  using value_type = T;
5366  };
5367 
5368  template <typename T, std::size_t width>
5370  {
5371  using value_type = T;
5372  };
5373 } // namespace internal
5374 
5375 
5377 
5384 namespace std
5385 {
5393  template <typename Number, std::size_t width>
5394  inline ::VectorizedArray<Number, width>
5395  sin(const ::VectorizedArray<Number, width> &x)
5396  {
5397  // put values in an array and later read in that array with an unaligned
5398  // read. This should save some instructions as compared to directly
5399  // setting the individual elements and also circumvents a compiler
5400  // optimization bug in gcc-4.6 with SSE2 (see also deal.II developers list
5401  // from April 2014, topic "matrix_free/step-48 Test").
5403  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5404  ++i)
5405  values[i] = std::sin(x[i]);
5407  out.load(&values[0]);
5408  return out;
5409  }
5410 
5411 
5412 
5420  template <typename Number, std::size_t width>
5421  inline ::VectorizedArray<Number, width>
5422  cos(const ::VectorizedArray<Number, width> &x)
5423  {
5425  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5426  ++i)
5427  values[i] = std::cos(x[i]);
5429  out.load(&values[0]);
5430  return out;
5431  }
5432 
5433 
5434 
5442  template <typename Number, std::size_t width>
5443  inline ::VectorizedArray<Number, width>
5444  tan(const ::VectorizedArray<Number, width> &x)
5445  {
5447  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5448  ++i)
5449  values[i] = std::tan(x[i]);
5451  out.load(&values[0]);
5452  return out;
5453  }
5454 
5455 
5456 
5464  template <typename Number, std::size_t width>
5465  inline ::VectorizedArray<Number, width>
5466  exp(const ::VectorizedArray<Number, width> &x)
5467  {
5469  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5470  ++i)
5471  values[i] = std::exp(x[i]);
5473  out.load(&values[0]);
5474  return out;
5475  }
5476 
5477 
5478 
5486  template <typename Number, std::size_t width>
5487  inline ::VectorizedArray<Number, width>
5488  log(const ::VectorizedArray<Number, width> &x)
5489  {
5491  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5492  ++i)
5493  values[i] = std::log(x[i]);
5495  out.load(&values[0]);
5496  return out;
5497  }
5498 
5499 
5500 
5508  template <typename Number, std::size_t width>
5509  inline ::VectorizedArray<Number, width>
5510  sqrt(const ::VectorizedArray<Number, width> &x)
5511  {
5512  return x.get_sqrt();
5513  }
5514 
5515 
5516 
5524  template <typename Number, std::size_t width>
5525  inline ::VectorizedArray<Number, width>
5526  pow(const ::VectorizedArray<Number, width> &x, const Number p)
5527  {
5529  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5530  ++i)
5531  values[i] = std::pow(x[i], p);
5533  out.load(&values[0]);
5534  return out;
5535  }
5536 
5537 
5538 
5547  template <typename Number, std::size_t width>
5548  inline ::VectorizedArray<Number, width>
5549  pow(const ::VectorizedArray<Number, width> &x,
5550  const ::VectorizedArray<Number, width> &p)
5551  {
5553  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5554  ++i)
5555  values[i] = std::pow(x[i], p[i]);
5557  out.load(&values[0]);
5558  return out;
5559  }
5560 
5561 
5562 
5570  template <typename Number, std::size_t width>
5571  inline ::VectorizedArray<Number, width>
5572  abs(const ::VectorizedArray<Number, width> &x)
5573  {
5574  return x.get_abs();
5575  }
5576 
5577 
5578 
5586  template <typename Number, std::size_t width>
5587  inline ::VectorizedArray<Number, width>
5588  max(const ::VectorizedArray<Number, width> &x,
5589  const ::VectorizedArray<Number, width> &y)
5590  {
5591  return x.get_max(y);
5592  }
5593 
5594 
5595 
5603  template <typename Number, std::size_t width>
5604  inline ::VectorizedArray<Number, width>
5605  min(const ::VectorizedArray<Number, width> &x,
5606  const ::VectorizedArray<Number, width> &y)
5607  {
5608  return x.get_min(y);
5609  }
5610 
5611 
5612 
5616  template <class T>
5617  struct iterator_traits<::VectorizedArrayIterator<T>>
5618  {
5619  using iterator_category = random_access_iterator_tag;
5620  using value_type = typename T::value_type;
5621  using difference_type = std::ptrdiff_t;
5622  };
5623 
5624 } // namespace std
5625 
5626 #endif
OutputOperator< VectorType > & operator<<(OutputOperator< VectorType > &out, unsigned int step)
Definition: operator.h:165
VectorizedArrayBase()=default
VectorizedArrayIterator< const T > begin() const
VectorizedArrayIterator< const T > end() const
VectorizedArrayIterator< T > end()
VectorizedArrayIterator< T > begin()
static constexpr std::size_t size()
VectorizedArrayBase(const std::initializer_list< U > &list)
VectorizedArrayIterator< T > operator+(const std::size_t &offset) const
VectorizedArrayIterator< T > & operator--()
VectorizedArrayIterator< T > & operator=(const VectorizedArrayIterator< T > &other)=default
std::ptrdiff_t operator-(const VectorizedArrayIterator< T > &other) const
bool operator==(const VectorizedArrayIterator< T > &other) const
VectorizedArrayIterator(T &data, const std::size_t lane)
VectorizedArrayIterator< T > & operator+=(const std::size_t offset)
VectorizedArrayIterator< T > & operator++()
std::enable_if<!std::is_same< U, const U >::value, typename T::value_type >::type & operator*()
bool operator!=(const VectorizedArrayIterator< T > &other) const
const T::value_type & operator*() const
VectorizedArray< Number, width > operator-(const VectorizedArray< Number, width > &u)
VectorizedArray< float, width > operator+(const VectorizedArray< float, width > &v, const double u)
void gather(const Number *base_ptr, const unsigned int *offsets)
void vectorized_load_and_transpose(const unsigned int n_entries, const Number *in, const unsigned int *offsets, VectorizedArray< Number, width > *out)
VectorizedArray< Number, width > operator+(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArrayType make_vectorized_array(const typename VectorizedArrayType::value_type &u)
VectorizedArray & operator+=(const VectorizedArray &vec)
VectorizedArray< Number, width > operator/(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArray get_abs() const
VectorizedArray< float, width > operator/(const VectorizedArray< float, width > &v, const double u)
VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &x, const ::VectorizedArray< Number, width > &y)
VectorizedArray< Number, width > log(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > operator*(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArray< Number, width > exp(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > operator-(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArray & operator=(const Number scalar)
VectorizedArray< float, width > operator-(const double u, const VectorizedArray< float, width > &v)
VectorizedArray< Number, width > operator+(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > operator+(const VectorizedArray< Number, width > &u)
VectorizedArray()=default
bool operator==(const VectorizedArray< Number, width > &lhs, const VectorizedArray< Number, width > &rhs)
VectorizedArray< Number, width > tan(const ::VectorizedArray< Number, width > &x)
VectorizedArray(const Number scalar)
VectorizedArray< Number, width > operator-(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray< float, width > operator*(const VectorizedArray< float, width > &v, const double u)
VectorizedArray get_max(const VectorizedArray &other) const
VectorizedArray & operator/=(const VectorizedArray &vec)
VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &x, const ::VectorizedArray< Number, width > &y)
VectorizedArray get_min(const VectorizedArray &other) const
VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &x, const Number p)
VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &x, const ::VectorizedArray< Number, width > &p)
VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &x)
VectorizedArray< float, width > operator-(const VectorizedArray< float, width > &v, const double u)
Number & operator[](const unsigned int comp)
void store(Number *ptr) const
void scatter(const unsigned int *offsets, Number *base_ptr) const
VectorizedArray & operator*=(const VectorizedArray &vec)
VectorizedArray< Number, width > operator-(const Number &u, const VectorizedArray< Number, width > &v)
void load(const Number *ptr)
const Number & operator[](const unsigned int comp) const
VectorizedArray< Number, width > operator*(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray< float, width > operator+(const double u, const VectorizedArray< float, width > &v)
VectorizedArray< Number, width > operator*(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray get_sqrt() const
VectorizedArray< Number, width > operator/(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > make_vectorized_array(const Number &u)
VectorizedArray< Number, width > operator/(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > operator+(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > cos(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &x)
void streaming_store(Number *ptr) const
VectorizedArray(const std::initializer_list< U > &list)
void vectorized_transpose_and_store(const bool add_into, const unsigned int n_entries, const VectorizedArray< Number, width > *in, const unsigned int *offsets, Number *out)
VectorizedArray< float, width > operator/(const double u, const VectorizedArray< float, width > &v)
VectorizedArray< float, width > operator*(const double u, const VectorizedArray< float, width > &v)
VectorizedArray & operator-=(const VectorizedArray &vec)
#define DEAL_II_ALWAYS_INLINE
Definition: config.h:102
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:442
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:443
const unsigned int v0
Definition: grid_tools.cc:1000
const unsigned int v1
Definition: grid_tools.cc:1000
__global__ void vec_add(Number *val, const Number a, const size_type N)
#define Assert(cond, exc)
Definition: exceptions.h:1473
#define AssertIndexRange(index, range)
Definition: exceptions.h:1732
static ::ExceptionBase & ExcMessage(std::string arg1)
Expression fabs(const Expression &x)
static const types::blas_int zero
static const char T
::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
void gather(VectorizedArray< Number, width > &out, const std::array< Number *, width > &ptrs, const unsigned int offset)
SIMDComparison
Number compare_and_apply_mask(const Number &left, const Number &right, const Number &true_value, const Number &false_value)