Reference documentation for deal.II version GIT 194dd8bb02 2022-12-03 08:20:02+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
vectorization.h
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2011 - 2021 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #ifndef dealii_vectorization_h
18 #define dealii_vectorization_h
19 
20 #include <deal.II/base/config.h>
21 
24 
25 #include <array>
26 #include <cmath>
27 
28 // Note:
29 // The flag DEAL_II_VECTORIZATION_WIDTH_IN_BITS is essentially constructed
30 // according to the following scheme (on x86-based architectures)
31 // #ifdef __AVX512F__
32 // #define DEAL_II_VECTORIZATION_WIDTH_IN_BITS 512
33 // #elif defined (__AVX__)
34 // #define DEAL_II_VECTORIZATION_WIDTH_IN_BITS 256
35 // #elif defined (__SSE2__)
36 // #define DEAL_II_VECTORIZATION_WIDTH_IN_BITS 128
37 // #else
38 // #define DEAL_II_VECTORIZATION_WIDTH_IN_BITS 0
39 // #endif
40 // In addition to checking the flags __AVX512F__, __AVX__ and __SSE2__, a CMake
41 // test, 'check_01_cpu_features.cmake', ensures that these feature are not only
42 // present in the compilation unit but also working properly.
43 
44 #if DEAL_II_VECTORIZATION_WIDTH_IN_BITS > 0
45 
46 // These error messages try to detect the case that deal.II was compiled with
47 // a wider instruction set extension as the current compilation unit, for
48 // example because deal.II was compiled with AVX, but a user project does not
49 // add -march=native or similar flags, making it fall to SSE2. This leads to
50 // very strange errors as the size of data structures differs between the
51 // compiled deal.II code sitting in libdeal_II.so and the user code if not
52 // detected.
53 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && !defined(__AVX__)
54 # error \
55  "Mismatch in vectorization capabilities: AVX was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native."
56 # endif
57 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && !defined(__AVX512F__)
58 # error \
59  "Mismatch in vectorization capabilities: AVX-512F was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native."
60 # endif
61 
62 # ifdef _MSC_VER
63 # include <intrin.h>
64 # elif defined(__ALTIVEC__)
65 # include <altivec.h>
66 
67 // altivec.h defines vector, pixel, bool, but we do not use them, so undefine
68 // them before they make trouble
69 # undef vector
70 # undef pixel
71 # undef bool
72 # else
73 # include <x86intrin.h>
74 # endif
75 
76 #endif
77 
78 
80 
81 
82 // Enable the EnableIfScalar type trait for VectorizedArray<Number> such
83 // that it can be used as a Number type in Tensor<rank,dim,Number>, etc.
84 
85 template <typename Number, std::size_t width>
86 struct EnableIfScalar<VectorizedArray<Number, width>>
87 {
89 };
90 
91 
92 
96 template <typename T>
98 {
99 public:
106  VectorizedArrayIterator(T &data, const std::size_t lane)
107  : data(&data)
108  , lane(lane)
109  {}
110 
114  bool
116  {
117  Assert(this->data == other.data,
118  ExcMessage(
119  "You are trying to compare iterators into different arrays."));
120  return this->lane == other.lane;
121  }
122 
126  bool
128  {
129  Assert(this->data == other.data,
130  ExcMessage(
131  "You are trying to compare iterators into different arrays."));
132  return this->lane != other.lane;
133  }
134 
139  operator=(const VectorizedArrayIterator<T> &other) = default;
140 
145  const typename T::value_type &
146  operator*() const
147  {
148  AssertIndexRange(lane, T::size());
149  return (*data)[lane];
150  }
151 
152 
157  template <typename U = T>
158  std::enable_if_t<!std::is_same<U, const U>::value, typename T::value_type> &
160  {
161  AssertIndexRange(lane, T::size());
162  return (*data)[lane];
163  }
164 
172  {
173  AssertIndexRange(lane + 1, T::size() + 1);
174  lane++;
175  return *this;
176  }
177 
183  operator+=(const std::size_t offset)
184  {
185  AssertIndexRange(lane + offset, T::size() + 1);
186  lane += offset;
187  return *this;
188  }
189 
197  {
198  Assert(
199  lane > 0,
200  ExcMessage(
201  "You can't decrement an iterator that is already at the beginning of the range."));
202  --lane;
203  return *this;
204  }
205 
210  operator+(const std::size_t &offset) const
211  {
212  AssertIndexRange(lane + offset, T::size() + 1);
213  return VectorizedArrayIterator<T>(*data, lane + offset);
214  }
215 
219  std::ptrdiff_t
221  {
222  return static_cast<std::ptrdiff_t>(lane) -
223  static_cast<ptrdiff_t>(other.lane);
224  }
225 
226 private:
230  T *data;
231 
235  std::size_t lane;
236 };
237 
238 
239 
249 template <typename T, std::size_t width>
251 {
252 public:
256  VectorizedArrayBase() = default;
257 
261  template <typename U>
262  VectorizedArrayBase(const std::initializer_list<U> &list)
263  {
264  auto i0 = this->begin();
265  auto i1 = list.begin();
266 
267  for (; i1 != list.end(); ++i0, ++i1)
268  {
269  Assert(
270  i0 != this->end(),
271  ExcMessage(
272  "Initializer list exceeds size of this VectorizedArray object."));
273 
274  *i0 = *i1;
275  }
276 
277  for (; i0 != this->end(); ++i0)
278  {
279  *i0 = 0.0;
280  }
281  }
282 
286  static constexpr std::size_t
288  {
289  return width;
290  }
291 
297  {
298  return VectorizedArrayIterator<T>(static_cast<T &>(*this), 0);
299  }
300 
306  begin() const
307  {
308  return VectorizedArrayIterator<const T>(static_cast<const T &>(*this), 0);
309  }
310 
315  end()
316  {
317  return VectorizedArrayIterator<T>(static_cast<T &>(*this), width);
318  }
319 
325  end() const
326  {
327  return VectorizedArrayIterator<const T>(static_cast<const T &>(*this),
328  width);
329  }
330 };
331 
332 
333 
418 template <typename Number, std::size_t width>
420  : public VectorizedArrayBase<VectorizedArray<Number, width>, 1>
421 {
422 public:
426  using value_type = Number;
427 
428  static_assert(width == 1,
429  "You specified an illegal width that is not supported.");
430 
435  VectorizedArray() = default;
436 
440  VectorizedArray(const Number scalar)
441  {
442  this->operator=(scalar);
443  }
444 
448  template <typename U>
449  VectorizedArray(const std::initializer_list<U> &list)
450  : VectorizedArrayBase<VectorizedArray<Number, width>, 1>(list)
451  {}
452 
458  operator=(const Number scalar)
459  {
460  data = scalar;
461  return *this;
462  }
463 
469  Number &
470  operator[](const unsigned int comp)
471  {
472  (void)comp;
473  AssertIndexRange(comp, 1);
474  return data;
475  }
476 
482  const Number &
483  operator[](const unsigned int comp) const
484  {
485  (void)comp;
486  AssertIndexRange(comp, 1);
487  return data;
488  }
489 
496  {
497  data += vec.data;
498  return *this;
499  }
500 
507  {
508  data -= vec.data;
509  return *this;
510  }
511 
518  {
519  data *= vec.data;
520  return *this;
521  }
522 
529  {
530  data /= vec.data;
531  return *this;
532  }
533 
540  template <typename OtherNumber>
542  load(const OtherNumber *ptr)
543  {
544  data = *ptr;
545  }
546 
553  template <typename OtherNumber>
555  store(OtherNumber *ptr) const
556  {
557  *ptr = data;
558  }
559 
607  void
608  streaming_store(Number *ptr) const
609  {
610  *ptr = data;
611  }
612 
626  void
627  gather(const Number *base_ptr, const unsigned int *offsets)
628  {
629  data = base_ptr[offsets[0]];
630  }
631 
645  void
646  scatter(const unsigned int *offsets, Number *base_ptr) const
647  {
648  base_ptr[offsets[0]] = data;
649  }
650 
656  Number data;
657 
658 private:
665  get_sqrt() const
666  {
667  VectorizedArray res;
668  res.data = std::sqrt(data);
669  return res;
670  }
671 
678  get_abs() const
679  {
680  VectorizedArray res;
681  res.data = std::fabs(data);
682  return res;
683  }
684 
691  get_max(const VectorizedArray &other) const
692  {
693  VectorizedArray res;
694  res.data = std::max(data, other.data);
695  return res;
696  }
697 
704  get_min(const VectorizedArray &other) const
705  {
706  VectorizedArray res;
707  res.data = std::min(data, other.data);
708  return res;
709  }
710 
711  // Make a few functions friends.
712  template <typename Number2, std::size_t width2>
715  template <typename Number2, std::size_t width2>
718  template <typename Number2, std::size_t width2>
722  template <typename Number2, std::size_t width2>
726 };
727 
728 
729 
741 template <typename Number,
742  std::size_t width =
745  make_vectorized_array(const Number &u)
746 {
748  return result;
749 }
750 
751 
752 
759 template <typename VectorizedArrayType>
760 inline DEAL_II_ALWAYS_INLINE VectorizedArrayType
761 make_vectorized_array(const typename VectorizedArrayType::value_type &u)
762 {
763  static_assert(
764  std::is_same<VectorizedArrayType,
765  VectorizedArray<typename VectorizedArrayType::value_type,
766  VectorizedArrayType::size()>>::value,
767  "VectorizedArrayType is not a VectorizedArray.");
768 
769  VectorizedArrayType result = u;
770  return result;
771 }
772 
773 
774 
786 template <typename Number, std::size_t width>
787 inline DEAL_II_ALWAYS_INLINE void
789  const std::array<Number *, width> &ptrs,
790  const unsigned int offset)
791 {
792  for (unsigned int v = 0; v < width; ++v)
793  out.data[v] = ptrs[v][offset];
794 }
795 
796 
797 
823 template <typename Number, std::size_t width>
824 inline DEAL_II_ALWAYS_INLINE void
825 vectorized_load_and_transpose(const unsigned int n_entries,
826  const Number * in,
827  const unsigned int * offsets,
829 {
830  for (unsigned int i = 0; i < n_entries; ++i)
831  for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
832  out[i][v] = in[offsets[v] + i];
833 }
834 
835 
847 template <typename Number, std::size_t width>
848 inline DEAL_II_ALWAYS_INLINE void
849 vectorized_load_and_transpose(const unsigned int n_entries,
850  const std::array<Number *, width> &in,
852 {
853  for (unsigned int i = 0; i < n_entries; ++i)
854  for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
855  out[i][v] = in[v][i];
856 }
857 
858 
859 
898 template <typename Number, std::size_t width>
899 inline DEAL_II_ALWAYS_INLINE void
900 vectorized_transpose_and_store(const bool add_into,
901  const unsigned int n_entries,
903  const unsigned int * offsets,
904  Number * out)
905 {
906  if (add_into)
907  for (unsigned int i = 0; i < n_entries; ++i)
908  for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
909  out[offsets[v] + i] += in[i][v];
910  else
911  for (unsigned int i = 0; i < n_entries; ++i)
912  for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
913  out[offsets[v] + i] = in[i][v];
914 }
915 
916 
928 template <typename Number, std::size_t width>
929 inline DEAL_II_ALWAYS_INLINE void
930 vectorized_transpose_and_store(const bool add_into,
931  const unsigned int n_entries,
933  std::array<Number *, width> & out)
934 {
935  if (add_into)
936  for (unsigned int i = 0; i < n_entries; ++i)
937  for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
938  out[v][i] += in[i][v];
939  else
940  for (unsigned int i = 0; i < n_entries; ++i)
941  for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
942  out[v][i] = in[i][v];
943 }
944 
945 
948 #ifndef DOXYGEN
949 
950 // for safety, also check that __AVX512F__ is defined in case the user manually
951 // set some conflicting compile flags which prevent compilation
952 
953 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && defined(__AVX512F__)
954 
958 template <>
959 class VectorizedArray<double, 8>
960  : public VectorizedArrayBase<VectorizedArray<double, 8>, 8>
961 {
962 public:
966  using value_type = double;
967 
972  VectorizedArray() = default;
973 
977  VectorizedArray(const double scalar)
978  {
979  this->operator=(scalar);
980  }
981 
985  template <typename U>
986  VectorizedArray(const std::initializer_list<U> &list)
988  {}
989 
995  operator=(const double x)
996  {
997  data = _mm512_set1_pd(x);
998  return *this;
999  }
1000 
1005  double &
1006  operator[](const unsigned int comp)
1007  {
1008  AssertIndexRange(comp, 8);
1009  return *(reinterpret_cast<double *>(&data) + comp);
1010  }
1011 
1016  const double &
1017  operator[](const unsigned int comp) const
1018  {
1019  AssertIndexRange(comp, 8);
1020  return *(reinterpret_cast<const double *>(&data) + comp);
1021  }
1022 
1027  VectorizedArray &
1028  operator+=(const VectorizedArray &vec)
1029  {
1030  // if the compiler supports vector arithmetic, we can simply use +=
1031  // operator on the given data type. this allows the compiler to combine
1032  // additions with multiplication (fused multiply-add) if those
1033  // instructions are available. Otherwise, we need to use the built-in
1034  // intrinsic command for __m512d
1035 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1036  data += vec.data;
1037 # else
1038  data = _mm512_add_pd(data, vec.data);
1039 # endif
1040  return *this;
1041  }
1042 
1047  VectorizedArray &
1048  operator-=(const VectorizedArray &vec)
1049  {
1050 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1051  data -= vec.data;
1052 # else
1053  data = _mm512_sub_pd(data, vec.data);
1054 # endif
1055  return *this;
1056  }
1061  VectorizedArray &
1062  operator*=(const VectorizedArray &vec)
1063  {
1064 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1065  data *= vec.data;
1066 # else
1067  data = _mm512_mul_pd(data, vec.data);
1068 # endif
1069  return *this;
1070  }
1071 
1076  VectorizedArray &
1077  operator/=(const VectorizedArray &vec)
1078  {
1079 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1080  data /= vec.data;
1081 # else
1082  data = _mm512_div_pd(data, vec.data);
1083 # endif
1084  return *this;
1085  }
1086 
1093  void
1094  load(const double *ptr)
1095  {
1096  data = _mm512_loadu_pd(ptr);
1097  }
1098 
1100  void
1101  load(const float *ptr)
1102  {
1103  data = _mm512_cvtps_pd(_mm256_loadu_ps(ptr));
1104  }
1105 
1113  void
1114  store(double *ptr) const
1115  {
1116  _mm512_storeu_pd(ptr, data);
1117  }
1118 
1120  void
1121  store(float *ptr) const
1122  {
1123  _mm256_storeu_ps(ptr, _mm512_cvtpd_ps(data));
1124  }
1125 
1131  void
1132  streaming_store(double *ptr) const
1133  {
1134  Assert(reinterpret_cast<std::size_t>(ptr) % 64 == 0,
1135  ExcMessage("Memory not aligned"));
1136  _mm512_stream_pd(ptr, data);
1137  }
1138 
1152  void
1153  gather(const double *base_ptr, const unsigned int *offsets)
1154  {
1155  // unfortunately, there does not appear to be a 256 bit integer load, so
1156  // do it by some reinterpret casts here. this is allowed because the Intel
1157  // API allows aliasing between different vector types.
1158  const __m256 index_val =
1159  _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
1160  const __m256i index = *reinterpret_cast<const __m256i *>(&index_val);
1161 
1162  // work around a warning with gcc-12 about an uninitialized initial state
1163  // for gather by starting with a zero guess, even though all lanes will be
1164  // overwritten
1165  __m512d zero = {};
1166  __mmask8 mask = 0xFF;
1167 
1168  data = _mm512_mask_i32gather_pd(zero, mask, index, base_ptr, 8);
1169  }
1170 
1184  void
1185  scatter(const unsigned int *offsets, double *base_ptr) const
1186  {
1187  for (unsigned int i = 0; i < 8; ++i)
1188  for (unsigned int j = i + 1; j < 8; ++j)
1189  Assert(offsets[i] != offsets[j],
1190  ExcMessage("Result of scatter undefined if two offset elements"
1191  " point to the same position"));
1192 
1193  // unfortunately, there does not appear to be a 256 bit integer load, so
1194  // do it by some reinterpret casts here. this is allowed because the Intel
1195  // API allows aliasing between different vector types.
1196  const __m256 index_val =
1197  _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
1198  const __m256i index = *reinterpret_cast<const __m256i *>(&index_val);
1199  _mm512_i32scatter_pd(base_ptr, index, data, 8);
1200  }
1201 
1207  __m512d data;
1208 
1209 private:
1216  get_sqrt() const
1217  {
1218  VectorizedArray res;
1219  res.data = _mm512_sqrt_pd(data);
1220  return res;
1221  }
1222 
1229  get_abs() const
1230  {
1231  // to compute the absolute value, perform bitwise andnot with -0. This
1232  // will leave all value and exponent bits unchanged but force the sign
1233  // value to +. Since there is no andnot for AVX512, we interpret the data
1234  // as 64 bit integers and do the andnot on those types (note that andnot
1235  // is a bitwise operation so the data type does not matter)
1236  __m512d mask = _mm512_set1_pd(-0.);
1237  VectorizedArray res;
1238  res.data = reinterpret_cast<__m512d>(
1239  _mm512_andnot_epi64(reinterpret_cast<__m512i>(mask),
1240  reinterpret_cast<__m512i>(data)));
1241  return res;
1242  }
1243 
1250  get_max(const VectorizedArray &other) const
1251  {
1252  VectorizedArray res;
1253  res.data = _mm512_max_pd(data, other.data);
1254  return res;
1255  }
1256 
1263  get_min(const VectorizedArray &other) const
1264  {
1265  VectorizedArray res;
1266  res.data = _mm512_min_pd(data, other.data);
1267  return res;
1268  }
1269 
1270  // Make a few functions friends.
1271  template <typename Number2, std::size_t width2>
1274  template <typename Number2, std::size_t width2>
1277  template <typename Number2, std::size_t width2>
1281  template <typename Number2, std::size_t width2>
1285 };
1286 
1287 
1288 
1292 template <>
1293 inline DEAL_II_ALWAYS_INLINE void
1294 vectorized_load_and_transpose(const unsigned int n_entries,
1295  const double * in,
1296  const unsigned int * offsets,
1298 {
1299  // do not do full transpose because the code is long and will most
1300  // likely not pay off because many processors have two load units
1301  // (for the top 8 instructions) but only 1 permute unit (for the 8
1302  // shuffle/unpack instructions). rather start the transposition on the
1303  // vectorized array of half the size with 256 bits
1304  const unsigned int n_chunks = n_entries / 4;
1305  for (unsigned int i = 0; i < n_chunks; ++i)
1306  {
1307  __m512d t0, t1, t2, t3 = {};
1308 
1309  t0 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[0] + 4 * i), 0);
1310  t0 = _mm512_insertf64x4(t0, _mm256_loadu_pd(in + offsets[2] + 4 * i), 1);
1311  t1 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[1] + 4 * i), 0);
1312  t1 = _mm512_insertf64x4(t1, _mm256_loadu_pd(in + offsets[3] + 4 * i), 1);
1313  t2 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[4] + 4 * i), 0);
1314  t2 = _mm512_insertf64x4(t2, _mm256_loadu_pd(in + offsets[6] + 4 * i), 1);
1315  t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[5] + 4 * i), 0);
1316  t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[7] + 4 * i), 1);
1317 
1318  __m512d v0 = _mm512_shuffle_f64x2(t0, t2, 0x88);
1319  __m512d v1 = _mm512_shuffle_f64x2(t0, t2, 0xdd);
1320  __m512d v2 = _mm512_shuffle_f64x2(t1, t3, 0x88);
1321  __m512d v3 = _mm512_shuffle_f64x2(t1, t3, 0xdd);
1322  out[4 * i + 0].data = _mm512_unpacklo_pd(v0, v2);
1323  out[4 * i + 1].data = _mm512_unpackhi_pd(v0, v2);
1324  out[4 * i + 2].data = _mm512_unpacklo_pd(v1, v3);
1325  out[4 * i + 3].data = _mm512_unpackhi_pd(v1, v3);
1326  }
1327  // remainder loop of work that does not divide by 4
1328  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1329  out[i].gather(in + i, offsets);
1330 }
1331 
1332 
1333 
1337 template <>
1338 inline DEAL_II_ALWAYS_INLINE void
1339 vectorized_load_and_transpose(const unsigned int n_entries,
1340  const std::array<double *, 8> &in,
1342 {
1343  const unsigned int n_chunks = n_entries / 4;
1344  for (unsigned int i = 0; i < n_chunks; ++i)
1345  {
1346  __m512d t0, t1, t2, t3 = {};
1347 
1348  t0 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[0] + 4 * i), 0);
1349  t0 = _mm512_insertf64x4(t0, _mm256_loadu_pd(in[2] + 4 * i), 1);
1350  t1 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[1] + 4 * i), 0);
1351  t1 = _mm512_insertf64x4(t1, _mm256_loadu_pd(in[3] + 4 * i), 1);
1352  t2 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[4] + 4 * i), 0);
1353  t2 = _mm512_insertf64x4(t2, _mm256_loadu_pd(in[6] + 4 * i), 1);
1354  t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[5] + 4 * i), 0);
1355  t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[7] + 4 * i), 1);
1356 
1357  __m512d v0 = _mm512_shuffle_f64x2(t0, t2, 0x88);
1358  __m512d v1 = _mm512_shuffle_f64x2(t0, t2, 0xdd);
1359  __m512d v2 = _mm512_shuffle_f64x2(t1, t3, 0x88);
1360  __m512d v3 = _mm512_shuffle_f64x2(t1, t3, 0xdd);
1361  out[4 * i + 0].data = _mm512_unpacklo_pd(v0, v2);
1362  out[4 * i + 1].data = _mm512_unpackhi_pd(v0, v2);
1363  out[4 * i + 2].data = _mm512_unpacklo_pd(v1, v3);
1364  out[4 * i + 3].data = _mm512_unpackhi_pd(v1, v3);
1365  }
1366 
1367  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1368  gather(out[i], in, i);
1369 }
1370 
1371 
1372 
1376 template <>
1377 inline DEAL_II_ALWAYS_INLINE void
1378 vectorized_transpose_and_store(const bool add_into,
1379  const unsigned int n_entries,
1380  const VectorizedArray<double, 8> *in,
1381  const unsigned int * offsets,
1382  double * out)
1383 {
1384  // as for the load, we split the store operations into 256 bit units to
1385  // better balance between code size, shuffle instructions, and stores
1386  const unsigned int n_chunks = n_entries / 4;
1387  __m512i mask1 = _mm512_set_epi64(0xd, 0xc, 0x5, 0x4, 0x9, 0x8, 0x1, 0x0);
1388  __m512i mask2 = _mm512_set_epi64(0xf, 0xe, 0x7, 0x6, 0xb, 0xa, 0x3, 0x2);
1389  for (unsigned int i = 0; i < n_chunks; ++i)
1390  {
1391  __m512d t0 = _mm512_unpacklo_pd(in[i * 4].data, in[i * 4 + 1].data);
1392  __m512d t1 = _mm512_unpackhi_pd(in[i * 4].data, in[i * 4 + 1].data);
1393  __m512d t2 = _mm512_unpacklo_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
1394  __m512d t3 = _mm512_unpackhi_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
1395  __m512d v0 = _mm512_permutex2var_pd(t0, mask1, t2);
1396  __m512d v1 = _mm512_permutex2var_pd(t0, mask2, t2);
1397  __m512d v2 = _mm512_permutex2var_pd(t1, mask1, t3);
1398  __m512d v3 = _mm512_permutex2var_pd(t1, mask2, t3);
1399  __m256d res0 = _mm512_extractf64x4_pd(v0, 0);
1400  __m256d res4 = _mm512_extractf64x4_pd(v0, 1);
1401  __m256d res1 = _mm512_extractf64x4_pd(v2, 0);
1402  __m256d res5 = _mm512_extractf64x4_pd(v2, 1);
1403  __m256d res2 = _mm512_extractf64x4_pd(v1, 0);
1404  __m256d res6 = _mm512_extractf64x4_pd(v1, 1);
1405  __m256d res3 = _mm512_extractf64x4_pd(v3, 0);
1406  __m256d res7 = _mm512_extractf64x4_pd(v3, 1);
1407 
1408  // Cannot use the same store instructions in both paths of the 'if'
1409  // because the compiler cannot know that there is no aliasing
1410  // between pointers
1411  if (add_into)
1412  {
1413  res0 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[0]), res0);
1414  _mm256_storeu_pd(out + 4 * i + offsets[0], res0);
1415  res1 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[1]), res1);
1416  _mm256_storeu_pd(out + 4 * i + offsets[1], res1);
1417  res2 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[2]), res2);
1418  _mm256_storeu_pd(out + 4 * i + offsets[2], res2);
1419  res3 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[3]), res3);
1420  _mm256_storeu_pd(out + 4 * i + offsets[3], res3);
1421  res4 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[4]), res4);
1422  _mm256_storeu_pd(out + 4 * i + offsets[4], res4);
1423  res5 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[5]), res5);
1424  _mm256_storeu_pd(out + 4 * i + offsets[5], res5);
1425  res6 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[6]), res6);
1426  _mm256_storeu_pd(out + 4 * i + offsets[6], res6);
1427  res7 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[7]), res7);
1428  _mm256_storeu_pd(out + 4 * i + offsets[7], res7);
1429  }
1430  else
1431  {
1432  _mm256_storeu_pd(out + 4 * i + offsets[0], res0);
1433  _mm256_storeu_pd(out + 4 * i + offsets[1], res1);
1434  _mm256_storeu_pd(out + 4 * i + offsets[2], res2);
1435  _mm256_storeu_pd(out + 4 * i + offsets[3], res3);
1436  _mm256_storeu_pd(out + 4 * i + offsets[4], res4);
1437  _mm256_storeu_pd(out + 4 * i + offsets[5], res5);
1438  _mm256_storeu_pd(out + 4 * i + offsets[6], res6);
1439  _mm256_storeu_pd(out + 4 * i + offsets[7], res7);
1440  }
1441  }
1442 
1443  // remainder loop of work that does not divide by 4
1444  if (add_into)
1445  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1446  for (unsigned int v = 0; v < 8; ++v)
1447  out[offsets[v] + i] += in[i][v];
1448  else
1449  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1450  for (unsigned int v = 0; v < 8; ++v)
1451  out[offsets[v] + i] = in[i][v];
1452 }
1453 
1454 
1455 
1459 template <>
1460 inline DEAL_II_ALWAYS_INLINE void
1461 vectorized_transpose_and_store(const bool add_into,
1462  const unsigned int n_entries,
1463  const VectorizedArray<double, 8> *in,
1464  std::array<double *, 8> & out)
1465 {
1466  // see the comments in the vectorized_transpose_and_store above
1467 
1468  const unsigned int n_chunks = n_entries / 4;
1469  __m512i mask1 = _mm512_set_epi64(0xd, 0xc, 0x5, 0x4, 0x9, 0x8, 0x1, 0x0);
1470  __m512i mask2 = _mm512_set_epi64(0xf, 0xe, 0x7, 0x6, 0xb, 0xa, 0x3, 0x2);
1471  for (unsigned int i = 0; i < n_chunks; ++i)
1472  {
1473  __m512d t0 = _mm512_unpacklo_pd(in[i * 4].data, in[i * 4 + 1].data);
1474  __m512d t1 = _mm512_unpackhi_pd(in[i * 4].data, in[i * 4 + 1].data);
1475  __m512d t2 = _mm512_unpacklo_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
1476  __m512d t3 = _mm512_unpackhi_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
1477  __m512d v0 = _mm512_permutex2var_pd(t0, mask1, t2);
1478  __m512d v1 = _mm512_permutex2var_pd(t0, mask2, t2);
1479  __m512d v2 = _mm512_permutex2var_pd(t1, mask1, t3);
1480  __m512d v3 = _mm512_permutex2var_pd(t1, mask2, t3);
1481  __m256d res0 = _mm512_extractf64x4_pd(v0, 0);
1482  __m256d res4 = _mm512_extractf64x4_pd(v0, 1);
1483  __m256d res1 = _mm512_extractf64x4_pd(v2, 0);
1484  __m256d res5 = _mm512_extractf64x4_pd(v2, 1);
1485  __m256d res2 = _mm512_extractf64x4_pd(v1, 0);
1486  __m256d res6 = _mm512_extractf64x4_pd(v1, 1);
1487  __m256d res3 = _mm512_extractf64x4_pd(v3, 0);
1488  __m256d res7 = _mm512_extractf64x4_pd(v3, 1);
1489 
1490  if (add_into)
1491  {
1492  res0 = _mm256_add_pd(_mm256_loadu_pd(out[0] + 4 * i), res0);
1493  _mm256_storeu_pd(out[0] + 4 * i, res0);
1494  res1 = _mm256_add_pd(_mm256_loadu_pd(out[1] + 4 * i), res1);
1495  _mm256_storeu_pd(out[1] + 4 * i, res1);
1496  res2 = _mm256_add_pd(_mm256_loadu_pd(out[2] + 4 * i), res2);
1497  _mm256_storeu_pd(out[2] + 4 * i, res2);
1498  res3 = _mm256_add_pd(_mm256_loadu_pd(out[3] + 4 * i), res3);
1499  _mm256_storeu_pd(out[3] + 4 * i, res3);
1500  res4 = _mm256_add_pd(_mm256_loadu_pd(out[4] + 4 * i), res4);
1501  _mm256_storeu_pd(out[4] + 4 * i, res4);
1502  res5 = _mm256_add_pd(_mm256_loadu_pd(out[5] + 4 * i), res5);
1503  _mm256_storeu_pd(out[5] + 4 * i, res5);
1504  res6 = _mm256_add_pd(_mm256_loadu_pd(out[6] + 4 * i), res6);
1505  _mm256_storeu_pd(out[6] + 4 * i, res6);
1506  res7 = _mm256_add_pd(_mm256_loadu_pd(out[7] + 4 * i), res7);
1507  _mm256_storeu_pd(out[7] + 4 * i, res7);
1508  }
1509  else
1510  {
1511  _mm256_storeu_pd(out[0] + 4 * i, res0);
1512  _mm256_storeu_pd(out[1] + 4 * i, res1);
1513  _mm256_storeu_pd(out[2] + 4 * i, res2);
1514  _mm256_storeu_pd(out[3] + 4 * i, res3);
1515  _mm256_storeu_pd(out[4] + 4 * i, res4);
1516  _mm256_storeu_pd(out[5] + 4 * i, res5);
1517  _mm256_storeu_pd(out[6] + 4 * i, res6);
1518  _mm256_storeu_pd(out[7] + 4 * i, res7);
1519  }
1520  }
1521 
1522  if (add_into)
1523  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1524  for (unsigned int v = 0; v < 8; ++v)
1525  out[v][i] += in[i][v];
1526  else
1527  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1528  for (unsigned int v = 0; v < 8; ++v)
1529  out[v][i] = in[i][v];
1530 }
1531 
1532 
1533 
1537 template <>
1538 class VectorizedArray<float, 16>
1539  : public VectorizedArrayBase<VectorizedArray<float, 16>, 16>
1540 {
1541 public:
1545  using value_type = float;
1546 
1551  VectorizedArray() = default;
1552 
1556  VectorizedArray(const float scalar)
1557  {
1558  this->operator=(scalar);
1559  }
1560 
1564  template <typename U>
1565  VectorizedArray(const std::initializer_list<U> &list)
1566  : VectorizedArrayBase<VectorizedArray<float, 16>, 16>(list)
1567  {}
1568 
1573  VectorizedArray &
1574  operator=(const float x)
1575  {
1576  data = _mm512_set1_ps(x);
1577  return *this;
1578  }
1579 
1584  float &
1585  operator[](const unsigned int comp)
1586  {
1587  AssertIndexRange(comp, 16);
1588  return *(reinterpret_cast<float *>(&data) + comp);
1589  }
1590 
1595  const float &
1596  operator[](const unsigned int comp) const
1597  {
1598  AssertIndexRange(comp, 16);
1599  return *(reinterpret_cast<const float *>(&data) + comp);
1600  }
1601 
1606  VectorizedArray &
1607  operator+=(const VectorizedArray &vec)
1608  {
1609  // if the compiler supports vector arithmetic, we can simply use +=
1610  // operator on the given data type. this allows the compiler to combine
1611  // additions with multiplication (fused multiply-add) if those
1612  // instructions are available. Otherwise, we need to use the built-in
1613  // intrinsic command for __m512d
1614 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1615  data += vec.data;
1616 # else
1617  data = _mm512_add_ps(data, vec.data);
1618 # endif
1619  return *this;
1620  }
1621 
1626  VectorizedArray &
1627  operator-=(const VectorizedArray &vec)
1628  {
1629 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1630  data -= vec.data;
1631 # else
1632  data = _mm512_sub_ps(data, vec.data);
1633 # endif
1634  return *this;
1635  }
1640  VectorizedArray &
1641  operator*=(const VectorizedArray &vec)
1642  {
1643 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1644  data *= vec.data;
1645 # else
1646  data = _mm512_mul_ps(data, vec.data);
1647 # endif
1648  return *this;
1649  }
1650 
1655  VectorizedArray &
1656  operator/=(const VectorizedArray &vec)
1657  {
1658 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1659  data /= vec.data;
1660 # else
1661  data = _mm512_div_ps(data, vec.data);
1662 # endif
1663  return *this;
1664  }
1665 
1672  void
1673  load(const float *ptr)
1674  {
1675  data = _mm512_loadu_ps(ptr);
1676  }
1677 
1685  void
1686  store(float *ptr) const
1687  {
1688  _mm512_storeu_ps(ptr, data);
1689  }
1690 
1696  void
1697  streaming_store(float *ptr) const
1698  {
1699  Assert(reinterpret_cast<std::size_t>(ptr) % 64 == 0,
1700  ExcMessage("Memory not aligned"));
1701  _mm512_stream_ps(ptr, data);
1702  }
1703 
1717  void
1718  gather(const float *base_ptr, const unsigned int *offsets)
1719  {
1720  // unfortunately, there does not appear to be a 512 bit integer load, so
1721  // do it by some reinterpret casts here. this is allowed because the Intel
1722  // API allows aliasing between different vector types.
1723  const __m512 index_val =
1724  _mm512_loadu_ps(reinterpret_cast<const float *>(offsets));
1725  const __m512i index = *reinterpret_cast<const __m512i *>(&index_val);
1726 
1727  // work around a warning with gcc-12 about an uninitialized initial state
1728  // for gather by starting with a zero guess, even though all lanes will be
1729  // overwritten
1730  __m512 zero = {};
1731  __mmask16 mask = 0xFFFF;
1732 
1733  data = _mm512_mask_i32gather_ps(zero, mask, index, base_ptr, 4);
1734  }
1735 
1749  void
1750  scatter(const unsigned int *offsets, float *base_ptr) const
1751  {
1752  for (unsigned int i = 0; i < 16; ++i)
1753  for (unsigned int j = i + 1; j < 16; ++j)
1754  Assert(offsets[i] != offsets[j],
1755  ExcMessage("Result of scatter undefined if two offset elements"
1756  " point to the same position"));
1757 
1758  // unfortunately, there does not appear to be a 512 bit integer load, so
1759  // do it by some reinterpret casts here. this is allowed because the Intel
1760  // API allows aliasing between different vector types.
1761  const __m512 index_val =
1762  _mm512_loadu_ps(reinterpret_cast<const float *>(offsets));
1763  const __m512i index = *reinterpret_cast<const __m512i *>(&index_val);
1764  _mm512_i32scatter_ps(base_ptr, index, data, 4);
1765  }
1766 
1772  __m512 data;
1773 
1774 private:
1781  get_sqrt() const
1782  {
1783  VectorizedArray res;
1784  res.data = _mm512_sqrt_ps(data);
1785  return res;
1786  }
1787 
1794  get_abs() const
1795  {
1796  // to compute the absolute value, perform bitwise andnot with -0. This
1797  // will leave all value and exponent bits unchanged but force the sign
1798  // value to +. Since there is no andnot for AVX512, we interpret the data
1799  // as 32 bit integers and do the andnot on those types (note that andnot
1800  // is a bitwise operation so the data type does not matter)
1801  __m512 mask = _mm512_set1_ps(-0.f);
1802  VectorizedArray res;
1803  res.data = reinterpret_cast<__m512>(
1804  _mm512_andnot_epi32(reinterpret_cast<__m512i>(mask),
1805  reinterpret_cast<__m512i>(data)));
1806  return res;
1807  }
1808 
1815  get_max(const VectorizedArray &other) const
1816  {
1817  VectorizedArray res;
1818  res.data = _mm512_max_ps(data, other.data);
1819  return res;
1820  }
1821 
1828  get_min(const VectorizedArray &other) const
1829  {
1830  VectorizedArray res;
1831  res.data = _mm512_min_ps(data, other.data);
1832  return res;
1833  }
1834 
1835  // Make a few functions friends.
1836  template <typename Number2, std::size_t width2>
1839  template <typename Number2, std::size_t width2>
1842  template <typename Number2, std::size_t width2>
1846  template <typename Number2, std::size_t width2>
1850 };
1851 
1852 
1853 
1857 template <>
1858 inline DEAL_II_ALWAYS_INLINE void
1859 vectorized_load_and_transpose(const unsigned int n_entries,
1860  const float * in,
1861  const unsigned int * offsets,
1863 {
1864  // Similar to the double case, we perform the work on smaller entities. In
1865  // this case, we start from 128 bit arrays and insert them into a full 512
1866  // bit index. This reduces the code size and register pressure because we do
1867  // shuffles on 4 numbers rather than 16.
1868  const unsigned int n_chunks = n_entries / 4;
1869 
1870  // To avoid warnings about uninitialized variables, need to initialize one
1871  // variable to a pre-exisiting value in out, which will never get used in
1872  // the end. Keep the initialization outside the loop because of a bug in
1873  // gcc-9.1 which generates a "vmovapd" instruction instead of "vmovupd" in
1874  // case t3 is initialized to zero (inside/outside of loop), see
1875  // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90991
1876  __m512 t0, t1, t2, t3;
1877  if (n_chunks > 0)
1878  t3 = out[0].data;
1879  for (unsigned int i = 0; i < n_chunks; ++i)
1880  {
1881  t0 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[0] + 4 * i), 0);
1882  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[4] + 4 * i), 1);
1883  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[8] + 4 * i), 2);
1884  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[12] + 4 * i), 3);
1885  t1 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[1] + 4 * i), 0);
1886  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[5] + 4 * i), 1);
1887  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[9] + 4 * i), 2);
1888  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[13] + 4 * i), 3);
1889  t2 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[2] + 4 * i), 0);
1890  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[6] + 4 * i), 1);
1891  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[10] + 4 * i), 2);
1892  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[14] + 4 * i), 3);
1893  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[3] + 4 * i), 0);
1894  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[7] + 4 * i), 1);
1895  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[11] + 4 * i), 2);
1896  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[15] + 4 * i), 3);
1897 
1898  __m512 v0 = _mm512_shuffle_ps(t0, t1, 0x44);
1899  __m512 v1 = _mm512_shuffle_ps(t0, t1, 0xee);
1900  __m512 v2 = _mm512_shuffle_ps(t2, t3, 0x44);
1901  __m512 v3 = _mm512_shuffle_ps(t2, t3, 0xee);
1902 
1903  out[4 * i + 0].data = _mm512_shuffle_ps(v0, v2, 0x88);
1904  out[4 * i + 1].data = _mm512_shuffle_ps(v0, v2, 0xdd);
1905  out[4 * i + 2].data = _mm512_shuffle_ps(v1, v3, 0x88);
1906  out[4 * i + 3].data = _mm512_shuffle_ps(v1, v3, 0xdd);
1907  }
1908 
1909  // remainder loop of work that does not divide by 4
1910  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1911  out[i].gather(in + i, offsets);
1912 }
1913 
1914 
1915 
1919 template <>
1920 inline DEAL_II_ALWAYS_INLINE void
1921 vectorized_load_and_transpose(const unsigned int n_entries,
1922  const std::array<float *, 16> &in,
1924 {
1925  // see the comments in the vectorized_load_and_transpose above
1926 
1927  const unsigned int n_chunks = n_entries / 4;
1928 
1929  __m512 t0, t1, t2, t3;
1930  if (n_chunks > 0)
1931  t3 = out[0].data;
1932  for (unsigned int i = 0; i < n_chunks; ++i)
1933  {
1934  t0 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[0] + 4 * i), 0);
1935  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[4] + 4 * i), 1);
1936  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[8] + 4 * i), 2);
1937  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[12] + 4 * i), 3);
1938  t1 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[1] + 4 * i), 0);
1939  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[5] + 4 * i), 1);
1940  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[9] + 4 * i), 2);
1941  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[13] + 4 * i), 3);
1942  t2 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[2] + 4 * i), 0);
1943  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[6] + 4 * i), 1);
1944  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[10] + 4 * i), 2);
1945  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[14] + 4 * i), 3);
1946  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[3] + 4 * i), 0);
1947  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[7] + 4 * i), 1);
1948  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[11] + 4 * i), 2);
1949  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[15] + 4 * i), 3);
1950 
1951  __m512 v0 = _mm512_shuffle_ps(t0, t1, 0x44);
1952  __m512 v1 = _mm512_shuffle_ps(t0, t1, 0xee);
1953  __m512 v2 = _mm512_shuffle_ps(t2, t3, 0x44);
1954  __m512 v3 = _mm512_shuffle_ps(t2, t3, 0xee);
1955 
1956  out[4 * i + 0].data = _mm512_shuffle_ps(v0, v2, 0x88);
1957  out[4 * i + 1].data = _mm512_shuffle_ps(v0, v2, 0xdd);
1958  out[4 * i + 2].data = _mm512_shuffle_ps(v1, v3, 0x88);
1959  out[4 * i + 3].data = _mm512_shuffle_ps(v1, v3, 0xdd);
1960  }
1961 
1962  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1963  gather(out[i], in, i);
1964 }
1965 
1966 
1967 
1971 template <>
1972 inline DEAL_II_ALWAYS_INLINE void
1973 vectorized_transpose_and_store(const bool add_into,
1974  const unsigned int n_entries,
1975  const VectorizedArray<float, 16> *in,
1976  const unsigned int * offsets,
1977  float * out)
1978 {
1979  const unsigned int n_chunks = n_entries / 4;
1980  for (unsigned int i = 0; i < n_chunks; ++i)
1981  {
1982  __m512 t0 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0x44);
1983  __m512 t1 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0xee);
1984  __m512 t2 =
1985  _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0x44);
1986  __m512 t3 =
1987  _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0xee);
1988  __m512 u0 = _mm512_shuffle_ps(t0, t2, 0x88);
1989  __m512 u1 = _mm512_shuffle_ps(t0, t2, 0xdd);
1990  __m512 u2 = _mm512_shuffle_ps(t1, t3, 0x88);
1991  __m512 u3 = _mm512_shuffle_ps(t1, t3, 0xdd);
1992 
1993  __m128 res0 = _mm512_extractf32x4_ps(u0, 0);
1994  __m128 res4 = _mm512_extractf32x4_ps(u0, 1);
1995  __m128 res8 = _mm512_extractf32x4_ps(u0, 2);
1996  __m128 res12 = _mm512_extractf32x4_ps(u0, 3);
1997  __m128 res1 = _mm512_extractf32x4_ps(u1, 0);
1998  __m128 res5 = _mm512_extractf32x4_ps(u1, 1);
1999  __m128 res9 = _mm512_extractf32x4_ps(u1, 2);
2000  __m128 res13 = _mm512_extractf32x4_ps(u1, 3);
2001  __m128 res2 = _mm512_extractf32x4_ps(u2, 0);
2002  __m128 res6 = _mm512_extractf32x4_ps(u2, 1);
2003  __m128 res10 = _mm512_extractf32x4_ps(u2, 2);
2004  __m128 res14 = _mm512_extractf32x4_ps(u2, 3);
2005  __m128 res3 = _mm512_extractf32x4_ps(u3, 0);
2006  __m128 res7 = _mm512_extractf32x4_ps(u3, 1);
2007  __m128 res11 = _mm512_extractf32x4_ps(u3, 2);
2008  __m128 res15 = _mm512_extractf32x4_ps(u3, 3);
2009 
2010  // Cannot use the same store instructions in both paths of the 'if'
2011  // because the compiler cannot know that there is no aliasing between
2012  // pointers
2013  if (add_into)
2014  {
2015  res0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), res0);
2016  _mm_storeu_ps(out + 4 * i + offsets[0], res0);
2017  res1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), res1);
2018  _mm_storeu_ps(out + 4 * i + offsets[1], res1);
2019  res2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), res2);
2020  _mm_storeu_ps(out + 4 * i + offsets[2], res2);
2021  res3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), res3);
2022  _mm_storeu_ps(out + 4 * i + offsets[3], res3);
2023  res4 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[4]), res4);
2024  _mm_storeu_ps(out + 4 * i + offsets[4], res4);
2025  res5 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[5]), res5);
2026  _mm_storeu_ps(out + 4 * i + offsets[5], res5);
2027  res6 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[6]), res6);
2028  _mm_storeu_ps(out + 4 * i + offsets[6], res6);
2029  res7 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[7]), res7);
2030  _mm_storeu_ps(out + 4 * i + offsets[7], res7);
2031  res8 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[8]), res8);
2032  _mm_storeu_ps(out + 4 * i + offsets[8], res8);
2033  res9 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[9]), res9);
2034  _mm_storeu_ps(out + 4 * i + offsets[9], res9);
2035  res10 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[10]), res10);
2036  _mm_storeu_ps(out + 4 * i + offsets[10], res10);
2037  res11 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[11]), res11);
2038  _mm_storeu_ps(out + 4 * i + offsets[11], res11);
2039  res12 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[12]), res12);
2040  _mm_storeu_ps(out + 4 * i + offsets[12], res12);
2041  res13 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[13]), res13);
2042  _mm_storeu_ps(out + 4 * i + offsets[13], res13);
2043  res14 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[14]), res14);
2044  _mm_storeu_ps(out + 4 * i + offsets[14], res14);
2045  res15 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[15]), res15);
2046  _mm_storeu_ps(out + 4 * i + offsets[15], res15);
2047  }
2048  else
2049  {
2050  _mm_storeu_ps(out + 4 * i + offsets[0], res0);
2051  _mm_storeu_ps(out + 4 * i + offsets[1], res1);
2052  _mm_storeu_ps(out + 4 * i + offsets[2], res2);
2053  _mm_storeu_ps(out + 4 * i + offsets[3], res3);
2054  _mm_storeu_ps(out + 4 * i + offsets[4], res4);
2055  _mm_storeu_ps(out + 4 * i + offsets[5], res5);
2056  _mm_storeu_ps(out + 4 * i + offsets[6], res6);
2057  _mm_storeu_ps(out + 4 * i + offsets[7], res7);
2058  _mm_storeu_ps(out + 4 * i + offsets[8], res8);
2059  _mm_storeu_ps(out + 4 * i + offsets[9], res9);
2060  _mm_storeu_ps(out + 4 * i + offsets[10], res10);
2061  _mm_storeu_ps(out + 4 * i + offsets[11], res11);
2062  _mm_storeu_ps(out + 4 * i + offsets[12], res12);
2063  _mm_storeu_ps(out + 4 * i + offsets[13], res13);
2064  _mm_storeu_ps(out + 4 * i + offsets[14], res14);
2065  _mm_storeu_ps(out + 4 * i + offsets[15], res15);
2066  }
2067  }
2068 
2069  // remainder loop of work that does not divide by 4
2070  if (add_into)
2071  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2072  for (unsigned int v = 0; v < 16; ++v)
2073  out[offsets[v] + i] += in[i][v];
2074  else
2075  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2076  for (unsigned int v = 0; v < 16; ++v)
2077  out[offsets[v] + i] = in[i][v];
2078 }
2079 
2080 
2081 
2085 template <>
2086 inline DEAL_II_ALWAYS_INLINE void
2087 vectorized_transpose_and_store(const bool add_into,
2088  const unsigned int n_entries,
2089  const VectorizedArray<float, 16> *in,
2090  std::array<float *, 16> & out)
2091 {
2092  // see the comments in the vectorized_transpose_and_store above
2093 
2094  const unsigned int n_chunks = n_entries / 4;
2095  for (unsigned int i = 0; i < n_chunks; ++i)
2096  {
2097  __m512 t0 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0x44);
2098  __m512 t1 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0xee);
2099  __m512 t2 =
2100  _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0x44);
2101  __m512 t3 =
2102  _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0xee);
2103  __m512 u0 = _mm512_shuffle_ps(t0, t2, 0x88);
2104  __m512 u1 = _mm512_shuffle_ps(t0, t2, 0xdd);
2105  __m512 u2 = _mm512_shuffle_ps(t1, t3, 0x88);
2106  __m512 u3 = _mm512_shuffle_ps(t1, t3, 0xdd);
2107 
2108  __m128 res0 = _mm512_extractf32x4_ps(u0, 0);
2109  __m128 res4 = _mm512_extractf32x4_ps(u0, 1);
2110  __m128 res8 = _mm512_extractf32x4_ps(u0, 2);
2111  __m128 res12 = _mm512_extractf32x4_ps(u0, 3);
2112  __m128 res1 = _mm512_extractf32x4_ps(u1, 0);
2113  __m128 res5 = _mm512_extractf32x4_ps(u1, 1);
2114  __m128 res9 = _mm512_extractf32x4_ps(u1, 2);
2115  __m128 res13 = _mm512_extractf32x4_ps(u1, 3);
2116  __m128 res2 = _mm512_extractf32x4_ps(u2, 0);
2117  __m128 res6 = _mm512_extractf32x4_ps(u2, 1);
2118  __m128 res10 = _mm512_extractf32x4_ps(u2, 2);
2119  __m128 res14 = _mm512_extractf32x4_ps(u2, 3);
2120  __m128 res3 = _mm512_extractf32x4_ps(u3, 0);
2121  __m128 res7 = _mm512_extractf32x4_ps(u3, 1);
2122  __m128 res11 = _mm512_extractf32x4_ps(u3, 2);
2123  __m128 res15 = _mm512_extractf32x4_ps(u3, 3);
2124 
2125  if (add_into)
2126  {
2127  res0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), res0);
2128  _mm_storeu_ps(out[0] + 4 * i, res0);
2129  res1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), res1);
2130  _mm_storeu_ps(out[1] + 4 * i, res1);
2131  res2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), res2);
2132  _mm_storeu_ps(out[2] + 4 * i, res2);
2133  res3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), res3);
2134  _mm_storeu_ps(out[3] + 4 * i, res3);
2135  res4 = _mm_add_ps(_mm_loadu_ps(out[4] + 4 * i), res4);
2136  _mm_storeu_ps(out[4] + 4 * i, res4);
2137  res5 = _mm_add_ps(_mm_loadu_ps(out[5] + 4 * i), res5);
2138  _mm_storeu_ps(out[5] + 4 * i, res5);
2139  res6 = _mm_add_ps(_mm_loadu_ps(out[6] + 4 * i), res6);
2140  _mm_storeu_ps(out[6] + 4 * i, res6);
2141  res7 = _mm_add_ps(_mm_loadu_ps(out[7] + 4 * i), res7);
2142  _mm_storeu_ps(out[7] + 4 * i, res7);
2143  res8 = _mm_add_ps(_mm_loadu_ps(out[8] + 4 * i), res8);
2144  _mm_storeu_ps(out[8] + 4 * i, res8);
2145  res9 = _mm_add_ps(_mm_loadu_ps(out[9] + 4 * i), res9);
2146  _mm_storeu_ps(out[9] + 4 * i, res9);
2147  res10 = _mm_add_ps(_mm_loadu_ps(out[10] + 4 * i), res10);
2148  _mm_storeu_ps(out[10] + 4 * i, res10);
2149  res11 = _mm_add_ps(_mm_loadu_ps(out[11] + 4 * i), res11);
2150  _mm_storeu_ps(out[11] + 4 * i, res11);
2151  res12 = _mm_add_ps(_mm_loadu_ps(out[12] + 4 * i), res12);
2152  _mm_storeu_ps(out[12] + 4 * i, res12);
2153  res13 = _mm_add_ps(_mm_loadu_ps(out[13] + 4 * i), res13);
2154  _mm_storeu_ps(out[13] + 4 * i, res13);
2155  res14 = _mm_add_ps(_mm_loadu_ps(out[14] + 4 * i), res14);
2156  _mm_storeu_ps(out[14] + 4 * i, res14);
2157  res15 = _mm_add_ps(_mm_loadu_ps(out[15] + 4 * i), res15);
2158  _mm_storeu_ps(out[15] + 4 * i, res15);
2159  }
2160  else
2161  {
2162  _mm_storeu_ps(out[0] + 4 * i, res0);
2163  _mm_storeu_ps(out[1] + 4 * i, res1);
2164  _mm_storeu_ps(out[2] + 4 * i, res2);
2165  _mm_storeu_ps(out[3] + 4 * i, res3);
2166  _mm_storeu_ps(out[4] + 4 * i, res4);
2167  _mm_storeu_ps(out[5] + 4 * i, res5);
2168  _mm_storeu_ps(out[6] + 4 * i, res6);
2169  _mm_storeu_ps(out[7] + 4 * i, res7);
2170  _mm_storeu_ps(out[8] + 4 * i, res8);
2171  _mm_storeu_ps(out[9] + 4 * i, res9);
2172  _mm_storeu_ps(out[10] + 4 * i, res10);
2173  _mm_storeu_ps(out[11] + 4 * i, res11);
2174  _mm_storeu_ps(out[12] + 4 * i, res12);
2175  _mm_storeu_ps(out[13] + 4 * i, res13);
2176  _mm_storeu_ps(out[14] + 4 * i, res14);
2177  _mm_storeu_ps(out[15] + 4 * i, res15);
2178  }
2179  }
2180 
2181  if (add_into)
2182  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2183  for (unsigned int v = 0; v < 16; ++v)
2184  out[v][i] += in[i][v];
2185  else
2186  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2187  for (unsigned int v = 0; v < 16; ++v)
2188  out[v][i] = in[i][v];
2189 }
2190 
2191 # endif
2192 
2193 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
2194 
2198 template <>
2199 class VectorizedArray<double, 4>
2200  : public VectorizedArrayBase<VectorizedArray<double, 4>, 4>
2201 {
2202 public:
2206  using value_type = double;
2207 
2212  VectorizedArray() = default;
2213 
2217  VectorizedArray(const double scalar)
2218  {
2219  this->operator=(scalar);
2220  }
2221 
2225  template <typename U>
2226  VectorizedArray(const std::initializer_list<U> &list)
2228  {}
2229 
2234  VectorizedArray &
2235  operator=(const double x)
2236  {
2237  data = _mm256_set1_pd(x);
2238  return *this;
2239  }
2240 
2245  double &
2246  operator[](const unsigned int comp)
2247  {
2248  AssertIndexRange(comp, 4);
2249  return *(reinterpret_cast<double *>(&data) + comp);
2250  }
2251 
2256  const double &
2257  operator[](const unsigned int comp) const
2258  {
2259  AssertIndexRange(comp, 4);
2260  return *(reinterpret_cast<const double *>(&data) + comp);
2261  }
2262 
2267  VectorizedArray &
2268  operator+=(const VectorizedArray &vec)
2269  {
2270  // if the compiler supports vector arithmetic, we can simply use +=
2271  // operator on the given data type. this allows the compiler to combine
2272  // additions with multiplication (fused multiply-add) if those
2273  // instructions are available. Otherwise, we need to use the built-in
2274  // intrinsic command for __m256d
2275 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2276  data += vec.data;
2277 # else
2278  data = _mm256_add_pd(data, vec.data);
2279 # endif
2280  return *this;
2281  }
2282 
2287  VectorizedArray &
2288  operator-=(const VectorizedArray &vec)
2289  {
2290 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2291  data -= vec.data;
2292 # else
2293  data = _mm256_sub_pd(data, vec.data);
2294 # endif
2295  return *this;
2296  }
2301  VectorizedArray &
2302  operator*=(const VectorizedArray &vec)
2303  {
2304 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2305  data *= vec.data;
2306 # else
2307  data = _mm256_mul_pd(data, vec.data);
2308 # endif
2309  return *this;
2310  }
2311 
2316  VectorizedArray &
2317  operator/=(const VectorizedArray &vec)
2318  {
2319 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2320  data /= vec.data;
2321 # else
2322  data = _mm256_div_pd(data, vec.data);
2323 # endif
2324  return *this;
2325  }
2326 
2333  void
2334  load(const double *ptr)
2335  {
2336  data = _mm256_loadu_pd(ptr);
2337  }
2338 
2340  void
2341  load(const float *ptr)
2342  {
2343  data = _mm256_cvtps_pd(_mm_loadu_ps(ptr));
2344  }
2345 
2353  void
2354  store(double *ptr) const
2355  {
2356  _mm256_storeu_pd(ptr, data);
2357  }
2358 
2360  void
2361  store(float *ptr) const
2362  {
2363  _mm_storeu_ps(ptr, _mm256_cvtpd_ps(data));
2364  }
2365 
2371  void
2372  streaming_store(double *ptr) const
2373  {
2374  Assert(reinterpret_cast<std::size_t>(ptr) % 32 == 0,
2375  ExcMessage("Memory not aligned"));
2376  _mm256_stream_pd(ptr, data);
2377  }
2378 
2392  void
2393  gather(const double *base_ptr, const unsigned int *offsets)
2394  {
2395 # ifdef __AVX2__
2396  // unfortunately, there does not appear to be a 128 bit integer load, so
2397  // do it by some reinterpret casts here. this is allowed because the Intel
2398  // API allows aliasing between different vector types.
2399  const __m128 index_val =
2400  _mm_loadu_ps(reinterpret_cast<const float *>(offsets));
2401  const __m128i index = *reinterpret_cast<const __m128i *>(&index_val);
2402 
2403  // work around a warning with gcc-12 about an uninitialized initial state
2404  // for gather by starting with a zero guess, even though all lanes will be
2405  // overwritten
2406  __m256d zero = _mm256_setzero_pd();
2407  __m256d mask = _mm256_cmp_pd(zero, zero, _CMP_EQ_OQ);
2408 
2409  data = _mm256_mask_i32gather_pd(zero, base_ptr, index, mask, 8);
2410 # else
2411  for (unsigned int i = 0; i < 4; ++i)
2412  *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
2413 # endif
2414  }
2415 
2429  void
2430  scatter(const unsigned int *offsets, double *base_ptr) const
2431  {
2432  // no scatter operation in AVX/AVX2
2433  for (unsigned int i = 0; i < 4; ++i)
2434  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
2435  }
2436 
2442  __m256d data;
2443 
2444 private:
2451  get_sqrt() const
2452  {
2453  VectorizedArray res;
2454  res.data = _mm256_sqrt_pd(data);
2455  return res;
2456  }
2457 
2464  get_abs() const
2465  {
2466  // to compute the absolute value, perform bitwise andnot with -0. This
2467  // will leave all value and exponent bits unchanged but force the sign
2468  // value to +.
2469  __m256d mask = _mm256_set1_pd(-0.);
2470  VectorizedArray res;
2471  res.data = _mm256_andnot_pd(mask, data);
2472  return res;
2473  }
2474 
2481  get_max(const VectorizedArray &other) const
2482  {
2483  VectorizedArray res;
2484  res.data = _mm256_max_pd(data, other.data);
2485  return res;
2486  }
2487 
2494  get_min(const VectorizedArray &other) const
2495  {
2496  VectorizedArray res;
2497  res.data = _mm256_min_pd(data, other.data);
2498  return res;
2499  }
2500 
2501  // Make a few functions friends.
2502  template <typename Number2, std::size_t width2>
2505  template <typename Number2, std::size_t width2>
2508  template <typename Number2, std::size_t width2>
2512  template <typename Number2, std::size_t width2>
2516 };
2517 
2518 
2519 
2523 template <>
2524 inline DEAL_II_ALWAYS_INLINE void
2525 vectorized_load_and_transpose(const unsigned int n_entries,
2526  const double * in,
2527  const unsigned int * offsets,
2529 {
2530  const unsigned int n_chunks = n_entries / 4;
2531  const double * in0 = in + offsets[0];
2532  const double * in1 = in + offsets[1];
2533  const double * in2 = in + offsets[2];
2534  const double * in3 = in + offsets[3];
2535 
2536  for (unsigned int i = 0; i < n_chunks; ++i)
2537  {
2538  __m256d u0 = _mm256_loadu_pd(in0 + 4 * i);
2539  __m256d u1 = _mm256_loadu_pd(in1 + 4 * i);
2540  __m256d u2 = _mm256_loadu_pd(in2 + 4 * i);
2541  __m256d u3 = _mm256_loadu_pd(in3 + 4 * i);
2542  __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2543  __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2544  __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2545  __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2546  out[4 * i + 0].data = _mm256_unpacklo_pd(t0, t1);
2547  out[4 * i + 1].data = _mm256_unpackhi_pd(t0, t1);
2548  out[4 * i + 2].data = _mm256_unpacklo_pd(t2, t3);
2549  out[4 * i + 3].data = _mm256_unpackhi_pd(t2, t3);
2550  }
2551 
2552  // remainder loop of work that does not divide by 4
2553  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2554  out[i].gather(in + i, offsets);
2555 }
2556 
2557 
2558 
2562 template <>
2563 inline DEAL_II_ALWAYS_INLINE void
2564 vectorized_load_and_transpose(const unsigned int n_entries,
2565  const std::array<double *, 4> &in,
2567 {
2568  // see the comments in the vectorized_load_and_transpose above
2569 
2570  const unsigned int n_chunks = n_entries / 4;
2571  const double * in0 = in[0];
2572  const double * in1 = in[1];
2573  const double * in2 = in[2];
2574  const double * in3 = in[3];
2575 
2576  for (unsigned int i = 0; i < n_chunks; ++i)
2577  {
2578  __m256d u0 = _mm256_loadu_pd(in0 + 4 * i);
2579  __m256d u1 = _mm256_loadu_pd(in1 + 4 * i);
2580  __m256d u2 = _mm256_loadu_pd(in2 + 4 * i);
2581  __m256d u3 = _mm256_loadu_pd(in3 + 4 * i);
2582  __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2583  __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2584  __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2585  __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2586  out[4 * i + 0].data = _mm256_unpacklo_pd(t0, t1);
2587  out[4 * i + 1].data = _mm256_unpackhi_pd(t0, t1);
2588  out[4 * i + 2].data = _mm256_unpacklo_pd(t2, t3);
2589  out[4 * i + 3].data = _mm256_unpackhi_pd(t2, t3);
2590  }
2591 
2592  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2593  gather(out[i], in, i);
2594 }
2595 
2596 
2597 
2601 template <>
2602 inline DEAL_II_ALWAYS_INLINE void
2603 vectorized_transpose_and_store(const bool add_into,
2604  const unsigned int n_entries,
2605  const VectorizedArray<double, 4> *in,
2606  const unsigned int * offsets,
2607  double * out)
2608 {
2609  const unsigned int n_chunks = n_entries / 4;
2610  double * out0 = out + offsets[0];
2611  double * out1 = out + offsets[1];
2612  double * out2 = out + offsets[2];
2613  double * out3 = out + offsets[3];
2614  for (unsigned int i = 0; i < n_chunks; ++i)
2615  {
2616  __m256d u0 = in[4 * i + 0].data;
2617  __m256d u1 = in[4 * i + 1].data;
2618  __m256d u2 = in[4 * i + 2].data;
2619  __m256d u3 = in[4 * i + 3].data;
2620  __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2621  __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2622  __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2623  __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2624  __m256d res0 = _mm256_unpacklo_pd(t0, t1);
2625  __m256d res1 = _mm256_unpackhi_pd(t0, t1);
2626  __m256d res2 = _mm256_unpacklo_pd(t2, t3);
2627  __m256d res3 = _mm256_unpackhi_pd(t2, t3);
2628 
2629  // Cannot use the same store instructions in both paths of the 'if'
2630  // because the compiler cannot know that there is no aliasing between
2631  // pointers
2632  if (add_into)
2633  {
2634  res0 = _mm256_add_pd(_mm256_loadu_pd(out0 + 4 * i), res0);
2635  _mm256_storeu_pd(out0 + 4 * i, res0);
2636  res1 = _mm256_add_pd(_mm256_loadu_pd(out1 + 4 * i), res1);
2637  _mm256_storeu_pd(out1 + 4 * i, res1);
2638  res2 = _mm256_add_pd(_mm256_loadu_pd(out2 + 4 * i), res2);
2639  _mm256_storeu_pd(out2 + 4 * i, res2);
2640  res3 = _mm256_add_pd(_mm256_loadu_pd(out3 + 4 * i), res3);
2641  _mm256_storeu_pd(out3 + 4 * i, res3);
2642  }
2643  else
2644  {
2645  _mm256_storeu_pd(out0 + 4 * i, res0);
2646  _mm256_storeu_pd(out1 + 4 * i, res1);
2647  _mm256_storeu_pd(out2 + 4 * i, res2);
2648  _mm256_storeu_pd(out3 + 4 * i, res3);
2649  }
2650  }
2651 
2652  // remainder loop of work that does not divide by 4
2653  if (add_into)
2654  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2655  for (unsigned int v = 0; v < 4; ++v)
2656  out[offsets[v] + i] += in[i][v];
2657  else
2658  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2659  for (unsigned int v = 0; v < 4; ++v)
2660  out[offsets[v] + i] = in[i][v];
2661 }
2662 
2663 
2664 
2668 template <>
2669 inline DEAL_II_ALWAYS_INLINE void
2670 vectorized_transpose_and_store(const bool add_into,
2671  const unsigned int n_entries,
2672  const VectorizedArray<double, 4> *in,
2673  std::array<double *, 4> & out)
2674 {
2675  // see the comments in the vectorized_transpose_and_store above
2676 
2677  const unsigned int n_chunks = n_entries / 4;
2678  double * out0 = out[0];
2679  double * out1 = out[1];
2680  double * out2 = out[2];
2681  double * out3 = out[3];
2682  for (unsigned int i = 0; i < n_chunks; ++i)
2683  {
2684  __m256d u0 = in[4 * i + 0].data;
2685  __m256d u1 = in[4 * i + 1].data;
2686  __m256d u2 = in[4 * i + 2].data;
2687  __m256d u3 = in[4 * i + 3].data;
2688  __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2689  __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2690  __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2691  __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2692  __m256d res0 = _mm256_unpacklo_pd(t0, t1);
2693  __m256d res1 = _mm256_unpackhi_pd(t0, t1);
2694  __m256d res2 = _mm256_unpacklo_pd(t2, t3);
2695  __m256d res3 = _mm256_unpackhi_pd(t2, t3);
2696 
2697  // Cannot use the same store instructions in both paths of the 'if'
2698  // because the compiler cannot know that there is no aliasing between
2699  // pointers
2700  if (add_into)
2701  {
2702  res0 = _mm256_add_pd(_mm256_loadu_pd(out0 + 4 * i), res0);
2703  _mm256_storeu_pd(out0 + 4 * i, res0);
2704  res1 = _mm256_add_pd(_mm256_loadu_pd(out1 + 4 * i), res1);
2705  _mm256_storeu_pd(out1 + 4 * i, res1);
2706  res2 = _mm256_add_pd(_mm256_loadu_pd(out2 + 4 * i), res2);
2707  _mm256_storeu_pd(out2 + 4 * i, res2);
2708  res3 = _mm256_add_pd(_mm256_loadu_pd(out3 + 4 * i), res3);
2709  _mm256_storeu_pd(out3 + 4 * i, res3);
2710  }
2711  else
2712  {
2713  _mm256_storeu_pd(out0 + 4 * i, res0);
2714  _mm256_storeu_pd(out1 + 4 * i, res1);
2715  _mm256_storeu_pd(out2 + 4 * i, res2);
2716  _mm256_storeu_pd(out3 + 4 * i, res3);
2717  }
2718  }
2719 
2720  // remainder loop of work that does not divide by 4
2721  if (add_into)
2722  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2723  for (unsigned int v = 0; v < 4; ++v)
2724  out[v][i] += in[i][v];
2725  else
2726  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2727  for (unsigned int v = 0; v < 4; ++v)
2728  out[v][i] = in[i][v];
2729 }
2730 
2731 
2732 
2736 template <>
2737 class VectorizedArray<float, 8>
2738  : public VectorizedArrayBase<VectorizedArray<float, 8>, 8>
2739 {
2740 public:
2744  using value_type = float;
2745 
2750  VectorizedArray() = default;
2751 
2755  VectorizedArray(const float scalar)
2756  {
2757  this->operator=(scalar);
2758  }
2759 
2763  template <typename U>
2764  VectorizedArray(const std::initializer_list<U> &list)
2765  : VectorizedArrayBase<VectorizedArray<float, 8>, 8>(list)
2766  {}
2767 
2772  VectorizedArray &
2773  operator=(const float x)
2774  {
2775  data = _mm256_set1_ps(x);
2776  return *this;
2777  }
2778 
2783  float &
2784  operator[](const unsigned int comp)
2785  {
2786  AssertIndexRange(comp, 8);
2787  return *(reinterpret_cast<float *>(&data) + comp);
2788  }
2789 
2794  const float &
2795  operator[](const unsigned int comp) const
2796  {
2797  AssertIndexRange(comp, 8);
2798  return *(reinterpret_cast<const float *>(&data) + comp);
2799  }
2800 
2805  VectorizedArray &
2806  operator+=(const VectorizedArray &vec)
2807  {
2808  // if the compiler supports vector arithmetic, we can simply use +=
2809  // operator on the given data type. this allows the compiler to combine
2810  // additions with multiplication (fused multiply-add) if those
2811  // instructions are available. Otherwise, we need to use the built-in
2812  // intrinsic command for __m256d
2813 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2814  data += vec.data;
2815 # else
2816  data = _mm256_add_ps(data, vec.data);
2817 # endif
2818  return *this;
2819  }
2820 
2825  VectorizedArray &
2826  operator-=(const VectorizedArray &vec)
2827  {
2828 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2829  data -= vec.data;
2830 # else
2831  data = _mm256_sub_ps(data, vec.data);
2832 # endif
2833  return *this;
2834  }
2839  VectorizedArray &
2840  operator*=(const VectorizedArray &vec)
2841  {
2842 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2843  data *= vec.data;
2844 # else
2845  data = _mm256_mul_ps(data, vec.data);
2846 # endif
2847  return *this;
2848  }
2849 
2854  VectorizedArray &
2855  operator/=(const VectorizedArray &vec)
2856  {
2857 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2858  data /= vec.data;
2859 # else
2860  data = _mm256_div_ps(data, vec.data);
2861 # endif
2862  return *this;
2863  }
2864 
2871  void
2872  load(const float *ptr)
2873  {
2874  data = _mm256_loadu_ps(ptr);
2875  }
2876 
2884  void
2885  store(float *ptr) const
2886  {
2887  _mm256_storeu_ps(ptr, data);
2888  }
2889 
2895  void
2896  streaming_store(float *ptr) const
2897  {
2898  Assert(reinterpret_cast<std::size_t>(ptr) % 32 == 0,
2899  ExcMessage("Memory not aligned"));
2900  _mm256_stream_ps(ptr, data);
2901  }
2902 
2916  void
2917  gather(const float *base_ptr, const unsigned int *offsets)
2918  {
2919 # ifdef __AVX2__
2920  // unfortunately, there does not appear to be a 256 bit integer load, so
2921  // do it by some reinterpret casts here. this is allowed because the Intel
2922  // API allows aliasing between different vector types.
2923  const __m256 index_val =
2924  _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
2925  const __m256i index = *reinterpret_cast<const __m256i *>(&index_val);
2926 
2927  // work around a warning with gcc-12 about an uninitialized initial state
2928  // for gather by starting with a zero guess, even though all lanes will be
2929  // overwritten
2930  __m256 zero = _mm256_setzero_ps();
2931  __m256 mask = _mm256_cmp_ps(zero, zero, _CMP_EQ_OQ);
2932 
2933  data = _mm256_mask_i32gather_ps(zero, base_ptr, index, mask, 4);
2934 # else
2935  for (unsigned int i = 0; i < 8; ++i)
2936  *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
2937 # endif
2938  }
2939 
2953  void
2954  scatter(const unsigned int *offsets, float *base_ptr) const
2955  {
2956  // no scatter operation in AVX/AVX2
2957  for (unsigned int i = 0; i < 8; ++i)
2958  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
2959  }
2960 
2966  __m256 data;
2967 
2968 private:
2975  get_sqrt() const
2976  {
2977  VectorizedArray res;
2978  res.data = _mm256_sqrt_ps(data);
2979  return res;
2980  }
2981 
2988  get_abs() const
2989  {
2990  // to compute the absolute value, perform bitwise andnot with -0. This
2991  // will leave all value and exponent bits unchanged but force the sign
2992  // value to +.
2993  __m256 mask = _mm256_set1_ps(-0.f);
2994  VectorizedArray res;
2995  res.data = _mm256_andnot_ps(mask, data);
2996  return res;
2997  }
2998 
3005  get_max(const VectorizedArray &other) const
3006  {
3007  VectorizedArray res;
3008  res.data = _mm256_max_ps(data, other.data);
3009  return res;
3010  }
3011 
3018  get_min(const VectorizedArray &other) const
3019  {
3020  VectorizedArray res;
3021  res.data = _mm256_min_ps(data, other.data);
3022  return res;
3023  }
3024 
3025  // Make a few functions friends.
3026  template <typename Number2, std::size_t width2>
3029  template <typename Number2, std::size_t width2>
3032  template <typename Number2, std::size_t width2>
3036  template <typename Number2, std::size_t width2>
3040 };
3041 
3042 
3043 
3047 template <>
3048 inline DEAL_II_ALWAYS_INLINE void
3049 vectorized_load_and_transpose(const unsigned int n_entries,
3050  const float * in,
3051  const unsigned int * offsets,
3053 {
3054  const unsigned int n_chunks = n_entries / 4;
3055  for (unsigned int i = 0; i < n_chunks; ++i)
3056  {
3057  // To avoid warnings about uninitialized variables, need to initialize
3058  // one variable with zero before using it.
3059  __m256 t0, t1, t2, t3 = {};
3060  t0 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[0]), 0);
3061  t0 = _mm256_insertf128_ps(t0, _mm_loadu_ps(in + 4 * i + offsets[4]), 1);
3062  t1 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[1]), 0);
3063  t1 = _mm256_insertf128_ps(t1, _mm_loadu_ps(in + 4 * i + offsets[5]), 1);
3064  t2 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[2]), 0);
3065  t2 = _mm256_insertf128_ps(t2, _mm_loadu_ps(in + 4 * i + offsets[6]), 1);
3066  t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[3]), 0);
3067  t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[7]), 1);
3068 
3069  __m256 v0 = _mm256_shuffle_ps(t0, t1, 0x44);
3070  __m256 v1 = _mm256_shuffle_ps(t0, t1, 0xee);
3071  __m256 v2 = _mm256_shuffle_ps(t2, t3, 0x44);
3072  __m256 v3 = _mm256_shuffle_ps(t2, t3, 0xee);
3073  out[4 * i + 0].data = _mm256_shuffle_ps(v0, v2, 0x88);
3074  out[4 * i + 1].data = _mm256_shuffle_ps(v0, v2, 0xdd);
3075  out[4 * i + 2].data = _mm256_shuffle_ps(v1, v3, 0x88);
3076  out[4 * i + 3].data = _mm256_shuffle_ps(v1, v3, 0xdd);
3077  }
3078 
3079  // remainder loop of work that does not divide by 4
3080  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3081  out[i].gather(in + i, offsets);
3082 }
3083 
3084 
3085 
3089 template <>
3090 inline DEAL_II_ALWAYS_INLINE void
3091 vectorized_load_and_transpose(const unsigned int n_entries,
3092  const std::array<float *, 8> &in,
3094 {
3095  // see the comments in the vectorized_load_and_transpose above
3096 
3097  const unsigned int n_chunks = n_entries / 4;
3098  for (unsigned int i = 0; i < n_chunks; ++i)
3099  {
3100  __m256 t0, t1, t2, t3 = {};
3101  t0 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[0] + 4 * i), 0);
3102  t0 = _mm256_insertf128_ps(t0, _mm_loadu_ps(in[4] + 4 * i), 1);
3103  t1 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[1] + 4 * i), 0);
3104  t1 = _mm256_insertf128_ps(t1, _mm_loadu_ps(in[5] + 4 * i), 1);
3105  t2 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[2] + 4 * i), 0);
3106  t2 = _mm256_insertf128_ps(t2, _mm_loadu_ps(in[6] + 4 * i), 1);
3107  t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[3] + 4 * i), 0);
3108  t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[7] + 4 * i), 1);
3109 
3110  __m256 v0 = _mm256_shuffle_ps(t0, t1, 0x44);
3111  __m256 v1 = _mm256_shuffle_ps(t0, t1, 0xee);
3112  __m256 v2 = _mm256_shuffle_ps(t2, t3, 0x44);
3113  __m256 v3 = _mm256_shuffle_ps(t2, t3, 0xee);
3114  out[4 * i + 0].data = _mm256_shuffle_ps(v0, v2, 0x88);
3115  out[4 * i + 1].data = _mm256_shuffle_ps(v0, v2, 0xdd);
3116  out[4 * i + 2].data = _mm256_shuffle_ps(v1, v3, 0x88);
3117  out[4 * i + 3].data = _mm256_shuffle_ps(v1, v3, 0xdd);
3118  }
3119 
3120  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3121  gather(out[i], in, i);
3122 }
3123 
3124 
3125 
3129 template <>
3130 inline DEAL_II_ALWAYS_INLINE void
3131 vectorized_transpose_and_store(const bool add_into,
3132  const unsigned int n_entries,
3133  const VectorizedArray<float, 8> *in,
3134  const unsigned int * offsets,
3135  float * out)
3136 {
3137  const unsigned int n_chunks = n_entries / 4;
3138  for (unsigned int i = 0; i < n_chunks; ++i)
3139  {
3140  __m256 u0 = in[4 * i + 0].data;
3141  __m256 u1 = in[4 * i + 1].data;
3142  __m256 u2 = in[4 * i + 2].data;
3143  __m256 u3 = in[4 * i + 3].data;
3144  __m256 t0 = _mm256_shuffle_ps(u0, u1, 0x44);
3145  __m256 t1 = _mm256_shuffle_ps(u0, u1, 0xee);
3146  __m256 t2 = _mm256_shuffle_ps(u2, u3, 0x44);
3147  __m256 t3 = _mm256_shuffle_ps(u2, u3, 0xee);
3148  u0 = _mm256_shuffle_ps(t0, t2, 0x88);
3149  u1 = _mm256_shuffle_ps(t0, t2, 0xdd);
3150  u2 = _mm256_shuffle_ps(t1, t3, 0x88);
3151  u3 = _mm256_shuffle_ps(t1, t3, 0xdd);
3152  __m128 res0 = _mm256_extractf128_ps(u0, 0);
3153  __m128 res4 = _mm256_extractf128_ps(u0, 1);
3154  __m128 res1 = _mm256_extractf128_ps(u1, 0);
3155  __m128 res5 = _mm256_extractf128_ps(u1, 1);
3156  __m128 res2 = _mm256_extractf128_ps(u2, 0);
3157  __m128 res6 = _mm256_extractf128_ps(u2, 1);
3158  __m128 res3 = _mm256_extractf128_ps(u3, 0);
3159  __m128 res7 = _mm256_extractf128_ps(u3, 1);
3160 
3161  // Cannot use the same store instructions in both paths of the 'if'
3162  // because the compiler cannot know that there is no aliasing between
3163  // pointers
3164  if (add_into)
3165  {
3166  res0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), res0);
3167  _mm_storeu_ps(out + 4 * i + offsets[0], res0);
3168  res1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), res1);
3169  _mm_storeu_ps(out + 4 * i + offsets[1], res1);
3170  res2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), res2);
3171  _mm_storeu_ps(out + 4 * i + offsets[2], res2);
3172  res3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), res3);
3173  _mm_storeu_ps(out + 4 * i + offsets[3], res3);
3174  res4 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[4]), res4);
3175  _mm_storeu_ps(out + 4 * i + offsets[4], res4);
3176  res5 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[5]), res5);
3177  _mm_storeu_ps(out + 4 * i + offsets[5], res5);
3178  res6 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[6]), res6);
3179  _mm_storeu_ps(out + 4 * i + offsets[6], res6);
3180  res7 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[7]), res7);
3181  _mm_storeu_ps(out + 4 * i + offsets[7], res7);
3182  }
3183  else
3184  {
3185  _mm_storeu_ps(out + 4 * i + offsets[0], res0);
3186  _mm_storeu_ps(out + 4 * i + offsets[1], res1);
3187  _mm_storeu_ps(out + 4 * i + offsets[2], res2);
3188  _mm_storeu_ps(out + 4 * i + offsets[3], res3);
3189  _mm_storeu_ps(out + 4 * i + offsets[4], res4);
3190  _mm_storeu_ps(out + 4 * i + offsets[5], res5);
3191  _mm_storeu_ps(out + 4 * i + offsets[6], res6);
3192  _mm_storeu_ps(out + 4 * i + offsets[7], res7);
3193  }
3194  }
3195 
3196  // remainder loop of work that does not divide by 4
3197  if (add_into)
3198  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3199  for (unsigned int v = 0; v < 8; ++v)
3200  out[offsets[v] + i] += in[i][v];
3201  else
3202  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3203  for (unsigned int v = 0; v < 8; ++v)
3204  out[offsets[v] + i] = in[i][v];
3205 }
3206 
3207 
3208 
3212 template <>
3213 inline DEAL_II_ALWAYS_INLINE void
3214 vectorized_transpose_and_store(const bool add_into,
3215  const unsigned int n_entries,
3216  const VectorizedArray<float, 8> *in,
3217  std::array<float *, 8> & out)
3218 {
3219  // see the comments in the vectorized_transpose_and_store above
3220 
3221  const unsigned int n_chunks = n_entries / 4;
3222  for (unsigned int i = 0; i < n_chunks; ++i)
3223  {
3224  __m256 u0 = in[4 * i + 0].data;
3225  __m256 u1 = in[4 * i + 1].data;
3226  __m256 u2 = in[4 * i + 2].data;
3227  __m256 u3 = in[4 * i + 3].data;
3228  __m256 t0 = _mm256_shuffle_ps(u0, u1, 0x44);
3229  __m256 t1 = _mm256_shuffle_ps(u0, u1, 0xee);
3230  __m256 t2 = _mm256_shuffle_ps(u2, u3, 0x44);
3231  __m256 t3 = _mm256_shuffle_ps(u2, u3, 0xee);
3232  u0 = _mm256_shuffle_ps(t0, t2, 0x88);
3233  u1 = _mm256_shuffle_ps(t0, t2, 0xdd);
3234  u2 = _mm256_shuffle_ps(t1, t3, 0x88);
3235  u3 = _mm256_shuffle_ps(t1, t3, 0xdd);
3236  __m128 res0 = _mm256_extractf128_ps(u0, 0);
3237  __m128 res4 = _mm256_extractf128_ps(u0, 1);
3238  __m128 res1 = _mm256_extractf128_ps(u1, 0);
3239  __m128 res5 = _mm256_extractf128_ps(u1, 1);
3240  __m128 res2 = _mm256_extractf128_ps(u2, 0);
3241  __m128 res6 = _mm256_extractf128_ps(u2, 1);
3242  __m128 res3 = _mm256_extractf128_ps(u3, 0);
3243  __m128 res7 = _mm256_extractf128_ps(u3, 1);
3244 
3245  if (add_into)
3246  {
3247  res0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), res0);
3248  _mm_storeu_ps(out[0] + 4 * i, res0);
3249  res1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), res1);
3250  _mm_storeu_ps(out[1] + 4 * i, res1);
3251  res2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), res2);
3252  _mm_storeu_ps(out[2] + 4 * i, res2);
3253  res3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), res3);
3254  _mm_storeu_ps(out[3] + 4 * i, res3);
3255  res4 = _mm_add_ps(_mm_loadu_ps(out[4] + 4 * i), res4);
3256  _mm_storeu_ps(out[4] + 4 * i, res4);
3257  res5 = _mm_add_ps(_mm_loadu_ps(out[5] + 4 * i), res5);
3258  _mm_storeu_ps(out[5] + 4 * i, res5);
3259  res6 = _mm_add_ps(_mm_loadu_ps(out[6] + 4 * i), res6);
3260  _mm_storeu_ps(out[6] + 4 * i, res6);
3261  res7 = _mm_add_ps(_mm_loadu_ps(out[7] + 4 * i), res7);
3262  _mm_storeu_ps(out[7] + 4 * i, res7);
3263  }
3264  else
3265  {
3266  _mm_storeu_ps(out[0] + 4 * i, res0);
3267  _mm_storeu_ps(out[1] + 4 * i, res1);
3268  _mm_storeu_ps(out[2] + 4 * i, res2);
3269  _mm_storeu_ps(out[3] + 4 * i, res3);
3270  _mm_storeu_ps(out[4] + 4 * i, res4);
3271  _mm_storeu_ps(out[5] + 4 * i, res5);
3272  _mm_storeu_ps(out[6] + 4 * i, res6);
3273  _mm_storeu_ps(out[7] + 4 * i, res7);
3274  }
3275  }
3276 
3277  if (add_into)
3278  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3279  for (unsigned int v = 0; v < 8; ++v)
3280  out[v][i] += in[i][v];
3281  else
3282  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3283  for (unsigned int v = 0; v < 8; ++v)
3284  out[v][i] = in[i][v];
3285 }
3286 
3287 # endif
3288 
3289 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__SSE2__)
3290 
3294 template <>
3295 class VectorizedArray<double, 2>
3296  : public VectorizedArrayBase<VectorizedArray<double, 2>, 2>
3297 {
3298 public:
3302  using value_type = double;
3303 
3308  VectorizedArray() = default;
3309 
3313  VectorizedArray(const double scalar)
3314  {
3315  this->operator=(scalar);
3316  }
3317 
3321  template <typename U>
3322  VectorizedArray(const std::initializer_list<U> &list)
3324  {}
3325 
3330  VectorizedArray &
3331  operator=(const double x)
3332  {
3333  data = _mm_set1_pd(x);
3334  return *this;
3335  }
3336 
3341  double &
3342  operator[](const unsigned int comp)
3343  {
3344  AssertIndexRange(comp, 2);
3345  return *(reinterpret_cast<double *>(&data) + comp);
3346  }
3347 
3352  const double &
3353  operator[](const unsigned int comp) const
3354  {
3355  AssertIndexRange(comp, 2);
3356  return *(reinterpret_cast<const double *>(&data) + comp);
3357  }
3358 
3363  VectorizedArray &
3364  operator+=(const VectorizedArray &vec)
3365  {
3366 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3367  data += vec.data;
3368 # else
3369  data = _mm_add_pd(data, vec.data);
3370 # endif
3371  return *this;
3372  }
3373 
3378  VectorizedArray &
3379  operator-=(const VectorizedArray &vec)
3380  {
3381 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3382  data -= vec.data;
3383 # else
3384  data = _mm_sub_pd(data, vec.data);
3385 # endif
3386  return *this;
3387  }
3388 
3393  VectorizedArray &
3394  operator*=(const VectorizedArray &vec)
3395  {
3396 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3397  data *= vec.data;
3398 # else
3399  data = _mm_mul_pd(data, vec.data);
3400 # endif
3401  return *this;
3402  }
3403 
3408  VectorizedArray &
3409  operator/=(const VectorizedArray &vec)
3410  {
3411 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3412  data /= vec.data;
3413 # else
3414  data = _mm_div_pd(data, vec.data);
3415 # endif
3416  return *this;
3417  }
3418 
3425  void
3426  load(const double *ptr)
3427  {
3428  data = _mm_loadu_pd(ptr);
3429  }
3430 
3432  void
3433  load(const float *ptr)
3434  {
3436  for (unsigned int i = 0; i < 2; ++i)
3437  data[i] = ptr[i];
3438  }
3439 
3447  void
3448  store(double *ptr) const
3449  {
3450  _mm_storeu_pd(ptr, data);
3451  }
3452 
3454  void
3455  store(float *ptr) const
3456  {
3458  for (unsigned int i = 0; i < 2; ++i)
3459  ptr[i] = data[i];
3460  }
3461 
3467  void
3468  streaming_store(double *ptr) const
3469  {
3470  Assert(reinterpret_cast<std::size_t>(ptr) % 16 == 0,
3471  ExcMessage("Memory not aligned"));
3472  _mm_stream_pd(ptr, data);
3473  }
3474 
3488  void
3489  gather(const double *base_ptr, const unsigned int *offsets)
3490  {
3491  for (unsigned int i = 0; i < 2; ++i)
3492  *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
3493  }
3494 
3508  void
3509  scatter(const unsigned int *offsets, double *base_ptr) const
3510  {
3511  for (unsigned int i = 0; i < 2; ++i)
3512  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
3513  }
3514 
3520  __m128d data;
3521 
3522 private:
3529  get_sqrt() const
3530  {
3531  VectorizedArray res;
3532  res.data = _mm_sqrt_pd(data);
3533  return res;
3534  }
3535 
3542  get_abs() const
3543  {
3544  // to compute the absolute value, perform
3545  // bitwise andnot with -0. This will leave all
3546  // value and exponent bits unchanged but force
3547  // the sign value to +.
3548  __m128d mask = _mm_set1_pd(-0.);
3549  VectorizedArray res;
3550  res.data = _mm_andnot_pd(mask, data);
3551  return res;
3552  }
3553 
3560  get_max(const VectorizedArray &other) const
3561  {
3562  VectorizedArray res;
3563  res.data = _mm_max_pd(data, other.data);
3564  return res;
3565  }
3566 
3573  get_min(const VectorizedArray &other) const
3574  {
3575  VectorizedArray res;
3576  res.data = _mm_min_pd(data, other.data);
3577  return res;
3578  }
3579 
3580  // Make a few functions friends.
3581  template <typename Number2, std::size_t width2>
3584  template <typename Number2, std::size_t width2>
3587  template <typename Number2, std::size_t width2>
3591  template <typename Number2, std::size_t width2>
3595 };
3596 
3597 
3598 
3602 template <>
3603 inline DEAL_II_ALWAYS_INLINE void
3604 vectorized_load_and_transpose(const unsigned int n_entries,
3605  const double * in,
3606  const unsigned int * offsets,
3608 {
3609  const unsigned int n_chunks = n_entries / 2;
3610  for (unsigned int i = 0; i < n_chunks; ++i)
3611  {
3612  __m128d u0 = _mm_loadu_pd(in + 2 * i + offsets[0]);
3613  __m128d u1 = _mm_loadu_pd(in + 2 * i + offsets[1]);
3614  out[2 * i + 0].data = _mm_unpacklo_pd(u0, u1);
3615  out[2 * i + 1].data = _mm_unpackhi_pd(u0, u1);
3616  }
3617 
3618  // remainder loop of work that does not divide by 2
3619  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3620  for (unsigned int v = 0; v < 2; ++v)
3621  out[i][v] = in[offsets[v] + i];
3622 }
3623 
3624 
3625 
3629 template <>
3630 inline DEAL_II_ALWAYS_INLINE void
3631 vectorized_load_and_transpose(const unsigned int n_entries,
3632  const std::array<double *, 2> &in,
3634 {
3635  // see the comments in the vectorized_load_and_transpose above
3636 
3637  const unsigned int n_chunks = n_entries / 2;
3638  for (unsigned int i = 0; i < n_chunks; ++i)
3639  {
3640  __m128d u0 = _mm_loadu_pd(in[0] + 2 * i);
3641  __m128d u1 = _mm_loadu_pd(in[1] + 2 * i);
3642  out[2 * i + 0].data = _mm_unpacklo_pd(u0, u1);
3643  out[2 * i + 1].data = _mm_unpackhi_pd(u0, u1);
3644  }
3645 
3646  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3647  for (unsigned int v = 0; v < 2; ++v)
3648  out[i][v] = in[v][i];
3649 }
3650 
3651 
3652 
3656 template <>
3657 inline DEAL_II_ALWAYS_INLINE void
3658 vectorized_transpose_and_store(const bool add_into,
3659  const unsigned int n_entries,
3660  const VectorizedArray<double, 2> *in,
3661  const unsigned int * offsets,
3662  double * out)
3663 {
3664  const unsigned int n_chunks = n_entries / 2;
3665  if (add_into)
3666  {
3667  for (unsigned int i = 0; i < n_chunks; ++i)
3668  {
3669  __m128d u0 = in[2 * i + 0].data;
3670  __m128d u1 = in[2 * i + 1].data;
3671  __m128d res0 = _mm_unpacklo_pd(u0, u1);
3672  __m128d res1 = _mm_unpackhi_pd(u0, u1);
3673  _mm_storeu_pd(out + 2 * i + offsets[0],
3674  _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[0]),
3675  res0));
3676  _mm_storeu_pd(out + 2 * i + offsets[1],
3677  _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[1]),
3678  res1));
3679  }
3680  // remainder loop of work that does not divide by 2
3681  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3682  for (unsigned int v = 0; v < 2; ++v)
3683  out[offsets[v] + i] += in[i][v];
3684  }
3685  else
3686  {
3687  for (unsigned int i = 0; i < n_chunks; ++i)
3688  {
3689  __m128d u0 = in[2 * i + 0].data;
3690  __m128d u1 = in[2 * i + 1].data;
3691  __m128d res0 = _mm_unpacklo_pd(u0, u1);
3692  __m128d res1 = _mm_unpackhi_pd(u0, u1);
3693  _mm_storeu_pd(out + 2 * i + offsets[0], res0);
3694  _mm_storeu_pd(out + 2 * i + offsets[1], res1);
3695  }
3696  // remainder loop of work that does not divide by 2
3697  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3698  for (unsigned int v = 0; v < 2; ++v)
3699  out[offsets[v] + i] = in[i][v];
3700  }
3701 }
3702 
3703 
3704 
3708 template <>
3709 inline DEAL_II_ALWAYS_INLINE void
3710 vectorized_transpose_and_store(const bool add_into,
3711  const unsigned int n_entries,
3712  const VectorizedArray<double, 2> *in,
3713  std::array<double *, 2> & out)
3714 {
3715  // see the comments in the vectorized_transpose_and_store above
3716 
3717  const unsigned int n_chunks = n_entries / 2;
3718  if (add_into)
3719  {
3720  for (unsigned int i = 0; i < n_chunks; ++i)
3721  {
3722  __m128d u0 = in[2 * i + 0].data;
3723  __m128d u1 = in[2 * i + 1].data;
3724  __m128d res0 = _mm_unpacklo_pd(u0, u1);
3725  __m128d res1 = _mm_unpackhi_pd(u0, u1);
3726  _mm_storeu_pd(out[0] + 2 * i,
3727  _mm_add_pd(_mm_loadu_pd(out[0] + 2 * i), res0));
3728  _mm_storeu_pd(out[1] + 2 * i,
3729  _mm_add_pd(_mm_loadu_pd(out[1] + 2 * i), res1));
3730  }
3731 
3732  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3733  for (unsigned int v = 0; v < 2; ++v)
3734  out[v][i] += in[i][v];
3735  }
3736  else
3737  {
3738  for (unsigned int i = 0; i < n_chunks; ++i)
3739  {
3740  __m128d u0 = in[2 * i + 0].data;
3741  __m128d u1 = in[2 * i + 1].data;
3742  __m128d res0 = _mm_unpacklo_pd(u0, u1);
3743  __m128d res1 = _mm_unpackhi_pd(u0, u1);
3744  _mm_storeu_pd(out[0] + 2 * i, res0);
3745  _mm_storeu_pd(out[1] + 2 * i, res1);
3746  }
3747 
3748  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3749  for (unsigned int v = 0; v < 2; ++v)
3750  out[v][i] = in[i][v];
3751  }
3752 }
3753 
3754 
3755 
3759 template <>
3760 class VectorizedArray<float, 4>
3761  : public VectorizedArrayBase<VectorizedArray<float, 4>, 4>
3762 {
3763 public:
3767  using value_type = float;
3768 
3777  VectorizedArray() = default;
3778 
3782  VectorizedArray(const float scalar)
3783  {
3784  this->operator=(scalar);
3785  }
3786 
3790  template <typename U>
3791  VectorizedArray(const std::initializer_list<U> &list)
3792  : VectorizedArrayBase<VectorizedArray<float, 4>, 4>(list)
3793  {}
3794 
3796  VectorizedArray &
3797  operator=(const float x)
3798  {
3799  data = _mm_set1_ps(x);
3800  return *this;
3801  }
3802 
3807  float &
3808  operator[](const unsigned int comp)
3809  {
3810  AssertIndexRange(comp, 4);
3811  return *(reinterpret_cast<float *>(&data) + comp);
3812  }
3813 
3818  const float &
3819  operator[](const unsigned int comp) const
3820  {
3821  AssertIndexRange(comp, 4);
3822  return *(reinterpret_cast<const float *>(&data) + comp);
3823  }
3824 
3829  VectorizedArray &
3830  operator+=(const VectorizedArray &vec)
3831  {
3832 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3833  data += vec.data;
3834 # else
3835  data = _mm_add_ps(data, vec.data);
3836 # endif
3837  return *this;
3838  }
3839 
3844  VectorizedArray &
3845  operator-=(const VectorizedArray &vec)
3846  {
3847 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3848  data -= vec.data;
3849 # else
3850  data = _mm_sub_ps(data, vec.data);
3851 # endif
3852  return *this;
3853  }
3854 
3859  VectorizedArray &
3860  operator*=(const VectorizedArray &vec)
3861  {
3862 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3863  data *= vec.data;
3864 # else
3865  data = _mm_mul_ps(data, vec.data);
3866 # endif
3867  return *this;
3868  }
3869 
3874  VectorizedArray &
3875  operator/=(const VectorizedArray &vec)
3876  {
3877 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3878  data /= vec.data;
3879 # else
3880  data = _mm_div_ps(data, vec.data);
3881 # endif
3882  return *this;
3883  }
3884 
3891  void
3892  load(const float *ptr)
3893  {
3894  data = _mm_loadu_ps(ptr);
3895  }
3896 
3904  void
3905  store(float *ptr) const
3906  {
3907  _mm_storeu_ps(ptr, data);
3908  }
3909 
3915  void
3916  streaming_store(float *ptr) const
3917  {
3918  Assert(reinterpret_cast<std::size_t>(ptr) % 16 == 0,
3919  ExcMessage("Memory not aligned"));
3920  _mm_stream_ps(ptr, data);
3921  }
3922 
3936  void
3937  gather(const float *base_ptr, const unsigned int *offsets)
3938  {
3939  for (unsigned int i = 0; i < 4; ++i)
3940  *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
3941  }
3942 
3956  void
3957  scatter(const unsigned int *offsets, float *base_ptr) const
3958  {
3959  for (unsigned int i = 0; i < 4; ++i)
3960  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
3961  }
3962 
3968  __m128 data;
3969 
3970 private:
3977  get_sqrt() const
3978  {
3979  VectorizedArray res;
3980  res.data = _mm_sqrt_ps(data);
3981  return res;
3982  }
3983 
3990  get_abs() const
3991  {
3992  // to compute the absolute value, perform bitwise andnot with -0. This
3993  // will leave all value and exponent bits unchanged but force the sign
3994  // value to +.
3995  __m128 mask = _mm_set1_ps(-0.f);
3996  VectorizedArray res;
3997  res.data = _mm_andnot_ps(mask, data);
3998  return res;
3999  }
4000 
4007  get_max(const VectorizedArray &other) const
4008  {
4009  VectorizedArray res;
4010  res.data = _mm_max_ps(data, other.data);
4011  return res;
4012  }
4013 
4020  get_min(const VectorizedArray &other) const
4021  {
4022  VectorizedArray res;
4023  res.data = _mm_min_ps(data, other.data);
4024  return res;
4025  }
4026 
4027  // Make a few functions friends.
4028  template <typename Number2, std::size_t width2>
4031  template <typename Number2, std::size_t width2>
4034  template <typename Number2, std::size_t width2>
4038  template <typename Number2, std::size_t width2>
4042 };
4043 
4044 
4045 
4049 template <>
4050 inline DEAL_II_ALWAYS_INLINE void
4051 vectorized_load_and_transpose(const unsigned int n_entries,
4052  const float * in,
4053  const unsigned int * offsets,
4055 {
4056  const unsigned int n_chunks = n_entries / 4;
4057  for (unsigned int i = 0; i < n_chunks; ++i)
4058  {
4059  __m128 u0 = _mm_loadu_ps(in + 4 * i + offsets[0]);
4060  __m128 u1 = _mm_loadu_ps(in + 4 * i + offsets[1]);
4061  __m128 u2 = _mm_loadu_ps(in + 4 * i + offsets[2]);
4062  __m128 u3 = _mm_loadu_ps(in + 4 * i + offsets[3]);
4063  __m128 v0 = _mm_shuffle_ps(u0, u1, 0x44);
4064  __m128 v1 = _mm_shuffle_ps(u0, u1, 0xee);
4065  __m128 v2 = _mm_shuffle_ps(u2, u3, 0x44);
4066  __m128 v3 = _mm_shuffle_ps(u2, u3, 0xee);
4067  out[4 * i + 0].data = _mm_shuffle_ps(v0, v2, 0x88);
4068  out[4 * i + 1].data = _mm_shuffle_ps(v0, v2, 0xdd);
4069  out[4 * i + 2].data = _mm_shuffle_ps(v1, v3, 0x88);
4070  out[4 * i + 3].data = _mm_shuffle_ps(v1, v3, 0xdd);
4071  }
4072 
4073  // remainder loop of work that does not divide by 4
4074  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4075  for (unsigned int v = 0; v < 4; ++v)
4076  out[i][v] = in[offsets[v] + i];
4077 }
4078 
4079 
4080 
4084 template <>
4085 inline DEAL_II_ALWAYS_INLINE void
4086 vectorized_load_and_transpose(const unsigned int n_entries,
4087  const std::array<float *, 4> &in,
4089 {
4090  // see the comments in the vectorized_load_and_transpose above
4091 
4092  const unsigned int n_chunks = n_entries / 4;
4093  for (unsigned int i = 0; i < n_chunks; ++i)
4094  {
4095  __m128 u0 = _mm_loadu_ps(in[0] + 4 * i);
4096  __m128 u1 = _mm_loadu_ps(in[1] + 4 * i);
4097  __m128 u2 = _mm_loadu_ps(in[2] + 4 * i);
4098  __m128 u3 = _mm_loadu_ps(in[3] + 4 * i);
4099  __m128 v0 = _mm_shuffle_ps(u0, u1, 0x44);
4100  __m128 v1 = _mm_shuffle_ps(u0, u1, 0xee);
4101  __m128 v2 = _mm_shuffle_ps(u2, u3, 0x44);
4102  __m128 v3 = _mm_shuffle_ps(u2, u3, 0xee);
4103  out[4 * i + 0].data = _mm_shuffle_ps(v0, v2, 0x88);
4104  out[4 * i + 1].data = _mm_shuffle_ps(v0, v2, 0xdd);
4105  out[4 * i + 2].data = _mm_shuffle_ps(v1, v3, 0x88);
4106  out[4 * i + 3].data = _mm_shuffle_ps(v1, v3, 0xdd);
4107  }
4108 
4109  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4110  for (unsigned int v = 0; v < 4; ++v)
4111  out[i][v] = in[v][i];
4112 }
4113 
4114 
4115 
4119 template <>
4120 inline DEAL_II_ALWAYS_INLINE void
4121 vectorized_transpose_and_store(const bool add_into,
4122  const unsigned int n_entries,
4123  const VectorizedArray<float, 4> *in,
4124  const unsigned int * offsets,
4125  float * out)
4126 {
4127  const unsigned int n_chunks = n_entries / 4;
4128  for (unsigned int i = 0; i < n_chunks; ++i)
4129  {
4130  __m128 u0 = in[4 * i + 0].data;
4131  __m128 u1 = in[4 * i + 1].data;
4132  __m128 u2 = in[4 * i + 2].data;
4133  __m128 u3 = in[4 * i + 3].data;
4134  __m128 t0 = _mm_shuffle_ps(u0, u1, 0x44);
4135  __m128 t1 = _mm_shuffle_ps(u0, u1, 0xee);
4136  __m128 t2 = _mm_shuffle_ps(u2, u3, 0x44);
4137  __m128 t3 = _mm_shuffle_ps(u2, u3, 0xee);
4138  u0 = _mm_shuffle_ps(t0, t2, 0x88);
4139  u1 = _mm_shuffle_ps(t0, t2, 0xdd);
4140  u2 = _mm_shuffle_ps(t1, t3, 0x88);
4141  u3 = _mm_shuffle_ps(t1, t3, 0xdd);
4142 
4143  // Cannot use the same store instructions in both paths of the 'if'
4144  // because the compiler cannot know that there is no aliasing between
4145  // pointers
4146  if (add_into)
4147  {
4148  u0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), u0);
4149  _mm_storeu_ps(out + 4 * i + offsets[0], u0);
4150  u1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), u1);
4151  _mm_storeu_ps(out + 4 * i + offsets[1], u1);
4152  u2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), u2);
4153  _mm_storeu_ps(out + 4 * i + offsets[2], u2);
4154  u3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), u3);
4155  _mm_storeu_ps(out + 4 * i + offsets[3], u3);
4156  }
4157  else
4158  {
4159  _mm_storeu_ps(out + 4 * i + offsets[0], u0);
4160  _mm_storeu_ps(out + 4 * i + offsets[1], u1);
4161  _mm_storeu_ps(out + 4 * i + offsets[2], u2);
4162  _mm_storeu_ps(out + 4 * i + offsets[3], u3);
4163  }
4164  }
4165 
4166  // remainder loop of work that does not divide by 4
4167  if (add_into)
4168  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4169  for (unsigned int v = 0; v < 4; ++v)
4170  out[offsets[v] + i] += in[i][v];
4171  else
4172  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4173  for (unsigned int v = 0; v < 4; ++v)
4174  out[offsets[v] + i] = in[i][v];
4175 }
4176 
4177 
4178 
4182 template <>
4183 inline DEAL_II_ALWAYS_INLINE void
4184 vectorized_transpose_and_store(const bool add_into,
4185  const unsigned int n_entries,
4186  const VectorizedArray<float, 4> *in,
4187  std::array<float *, 4> & out)
4188 {
4189  // see the comments in the vectorized_transpose_and_store above
4190 
4191  const unsigned int n_chunks = n_entries / 4;
4192  for (unsigned int i = 0; i < n_chunks; ++i)
4193  {
4194  __m128 u0 = in[4 * i + 0].data;
4195  __m128 u1 = in[4 * i + 1].data;
4196  __m128 u2 = in[4 * i + 2].data;
4197  __m128 u3 = in[4 * i + 3].data;
4198  __m128 t0 = _mm_shuffle_ps(u0, u1, 0x44);
4199  __m128 t1 = _mm_shuffle_ps(u0, u1, 0xee);
4200  __m128 t2 = _mm_shuffle_ps(u2, u3, 0x44);
4201  __m128 t3 = _mm_shuffle_ps(u2, u3, 0xee);
4202  u0 = _mm_shuffle_ps(t0, t2, 0x88);
4203  u1 = _mm_shuffle_ps(t0, t2, 0xdd);
4204  u2 = _mm_shuffle_ps(t1, t3, 0x88);
4205  u3 = _mm_shuffle_ps(t1, t3, 0xdd);
4206 
4207  if (add_into)
4208  {
4209  u0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), u0);
4210  _mm_storeu_ps(out[0] + 4 * i, u0);
4211  u1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), u1);
4212  _mm_storeu_ps(out[1] + 4 * i, u1);
4213  u2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), u2);
4214  _mm_storeu_ps(out[2] + 4 * i, u2);
4215  u3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), u3);
4216  _mm_storeu_ps(out[3] + 4 * i, u3);
4217  }
4218  else
4219  {
4220  _mm_storeu_ps(out[0] + 4 * i, u0);
4221  _mm_storeu_ps(out[1] + 4 * i, u1);
4222  _mm_storeu_ps(out[2] + 4 * i, u2);
4223  _mm_storeu_ps(out[3] + 4 * i, u3);
4224  }
4225  }
4226 
4227  if (add_into)
4228  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4229  for (unsigned int v = 0; v < 4; ++v)
4230  out[v][i] += in[i][v];
4231  else
4232  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4233  for (unsigned int v = 0; v < 4; ++v)
4234  out[v][i] = in[i][v];
4235 }
4236 
4237 
4238 
4239 # endif // if DEAL_II_VECTORIZATION_WIDTH_IN_BITS > 0 && defined(__SSE2__)
4240 
4241 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__ALTIVEC__) && \
4242  defined(__VSX__)
4243 
4244 template <>
4245 class VectorizedArray<double, 2>
4246  : public VectorizedArrayBase<VectorizedArray<double, 2>, 2>
4247 {
4248 public:
4252  using value_type = double;
4253 
4258  VectorizedArray() = default;
4259 
4263  VectorizedArray(const double scalar)
4264  {
4265  this->operator=(scalar);
4266  }
4267 
4271  template <typename U>
4272  VectorizedArray(const std::initializer_list<U> &list)
4274  {}
4275 
4280  VectorizedArray &
4281  operator=(const double x)
4282  {
4283  data = vec_splats(x);
4284 
4285  // Some compilers believe that vec_splats sets 'x', but that's not true.
4286  // They then warn about setting a variable and not using it. Suppress the
4287  // warning by "using" the variable:
4288  (void)x;
4289  return *this;
4290  }
4291 
4296  double &
4297  operator[](const unsigned int comp)
4298  {
4299  AssertIndexRange(comp, 2);
4300  return *(reinterpret_cast<double *>(&data) + comp);
4301  }
4302 
4307  const double &
4308  operator[](const unsigned int comp) const
4309  {
4310  AssertIndexRange(comp, 2);
4311  return *(reinterpret_cast<const double *>(&data) + comp);
4312  }
4313 
4318  VectorizedArray &
4319  operator+=(const VectorizedArray &vec)
4320  {
4321  data = vec_add(data, vec.data);
4322  return *this;
4323  }
4324 
4329  VectorizedArray &
4330  operator-=(const VectorizedArray &vec)
4331  {
4332  data = vec_sub(data, vec.data);
4333  return *this;
4334  }
4335 
4340  VectorizedArray &
4341  operator*=(const VectorizedArray &vec)
4342  {
4343  data = vec_mul(data, vec.data);
4344  return *this;
4345  }
4346 
4351  VectorizedArray &
4352  operator/=(const VectorizedArray &vec)
4353  {
4354  data = vec_div(data, vec.data);
4355  return *this;
4356  }
4357 
4363  void
4364  load(const double *ptr)
4365  {
4366  data = vec_vsx_ld(0, ptr);
4367  }
4368 
4374  void
4375  store(double *ptr) const
4376  {
4377  vec_vsx_st(data, 0, ptr);
4378  }
4379 
4384  void
4385  streaming_store(double *ptr) const
4386  {
4387  store(ptr);
4388  }
4389 
4394  void
4395  gather(const double *base_ptr, const unsigned int *offsets)
4396  {
4397  for (unsigned int i = 0; i < 2; ++i)
4398  *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
4399  }
4400 
4405  void
4406  scatter(const unsigned int *offsets, double *base_ptr) const
4407  {
4408  for (unsigned int i = 0; i < 2; ++i)
4409  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
4410  }
4411 
4417  __vector double data;
4418 
4419 private:
4426  get_sqrt() const
4427  {
4428  VectorizedArray res;
4429  res.data = vec_sqrt(data);
4430  return res;
4431  }
4432 
4439  get_abs() const
4440  {
4441  VectorizedArray res;
4442  res.data = vec_abs(data);
4443  return res;
4444  }
4445 
4452  get_max(const VectorizedArray &other) const
4453  {
4454  VectorizedArray res;
4455  res.data = vec_max(data, other.data);
4456  return res;
4457  }
4458 
4465  get_min(const VectorizedArray &other) const
4466  {
4467  VectorizedArray res;
4468  res.data = vec_min(data, other.data);
4469  return res;
4470  }
4471 
4472  // Make a few functions friends.
4473  template <typename Number2, std::size_t width2>
4476  template <typename Number2, std::size_t width2>
4479  template <typename Number2, std::size_t width2>
4483  template <typename Number2, std::size_t width2>
4487 };
4488 
4489 
4490 
4491 template <>
4492 class VectorizedArray<float, 4>
4493  : public VectorizedArrayBase<VectorizedArray<float, 4>, 4>
4494 {
4495 public:
4499  using value_type = float;
4500 
4505  VectorizedArray() = default;
4506 
4510  VectorizedArray(const float scalar)
4511  {
4512  this->operator=(scalar);
4513  }
4514 
4518  template <typename U>
4519  VectorizedArray(const std::initializer_list<U> &list)
4520  : VectorizedArrayBase<VectorizedArray<float, 4>, 4>(list)
4521  {}
4522 
4527  VectorizedArray &
4528  operator=(const float x)
4529  {
4530  data = vec_splats(x);
4531 
4532  // Some compilers believe that vec_splats sets 'x', but that's not true.
4533  // They then warn about setting a variable and not using it. Suppress the
4534  // warning by "using" the variable:
4535  (void)x;
4536  return *this;
4537  }
4538 
4543  float &
4544  operator[](const unsigned int comp)
4545  {
4546  AssertIndexRange(comp, 4);
4547  return *(reinterpret_cast<float *>(&data) + comp);
4548  }
4549 
4554  const float &
4555  operator[](const unsigned int comp) const
4556  {
4557  AssertIndexRange(comp, 4);
4558  return *(reinterpret_cast<const float *>(&data) + comp);
4559  }
4560 
4565  VectorizedArray &
4566  operator+=(const VectorizedArray &vec)
4567  {
4568  data = vec_add(data, vec.data);
4569  return *this;
4570  }
4571 
4576  VectorizedArray &
4577  operator-=(const VectorizedArray &vec)
4578  {
4579  data = vec_sub(data, vec.data);
4580  return *this;
4581  }
4582 
4587  VectorizedArray &
4588  operator*=(const VectorizedArray &vec)
4589  {
4590  data = vec_mul(data, vec.data);
4591  return *this;
4592  }
4593 
4598  VectorizedArray &
4599  operator/=(const VectorizedArray &vec)
4600  {
4601  data = vec_div(data, vec.data);
4602  return *this;
4603  }
4604 
4610  void
4611  load(const float *ptr)
4612  {
4613  data = vec_vsx_ld(0, ptr);
4614  }
4615 
4621  void
4622  store(float *ptr) const
4623  {
4624  vec_vsx_st(data, 0, ptr);
4625  }
4626 
4631  void
4632  streaming_store(float *ptr) const
4633  {
4634  store(ptr);
4635  }
4636 
4641  void
4642  gather(const float *base_ptr, const unsigned int *offsets)
4643  {
4644  for (unsigned int i = 0; i < 4; ++i)
4645  *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
4646  }
4647 
4652  void
4653  scatter(const unsigned int *offsets, float *base_ptr) const
4654  {
4655  for (unsigned int i = 0; i < 4; ++i)
4656  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
4657  }
4658 
4664  __vector float data;
4665 
4666 private:
4673  get_sqrt() const
4674  {
4675  VectorizedArray res;
4676  res.data = vec_sqrt(data);
4677  return res;
4678  }
4679 
4686  get_abs() const
4687  {
4688  VectorizedArray res;
4689  res.data = vec_abs(data);
4690  return res;
4691  }
4692 
4699  get_max(const VectorizedArray &other) const
4700  {
4701  VectorizedArray res;
4702  res.data = vec_max(data, other.data);
4703  return res;
4704  }
4705 
4712  get_min(const VectorizedArray &other) const
4713  {
4714  VectorizedArray res;
4715  res.data = vec_min(data, other.data);
4716  return res;
4717  }
4718 
4719  // Make a few functions friends.
4720  template <typename Number2, std::size_t width2>
4723  template <typename Number2, std::size_t width2>
4726  template <typename Number2, std::size_t width2>
4730  template <typename Number2, std::size_t width2>
4734 };
4735 
4736 # endif // if DEAL_II_VECTORIZATION_LEVEL >=1 && defined(__ALTIVEC__) &&
4737  // defined(__VSX__)
4738 
4739 
4740 #endif // DOXYGEN
4741 
4752 template <typename Number, std::size_t width>
4753 inline DEAL_II_ALWAYS_INLINE bool
4755  const VectorizedArray<Number, width> &rhs)
4756 {
4757  for (unsigned int i = 0; i < VectorizedArray<Number, width>::size(); ++i)
4758  if (lhs[i] != rhs[i])
4759  return false;
4760 
4761  return true;
4762 }
4763 
4764 
4770 template <typename Number, std::size_t width>
4774 {
4776  return tmp += v;
4777 }
4778 
4784 template <typename Number, std::size_t width>
4788 {
4790  return tmp -= v;
4791 }
4792 
4798 template <typename Number, std::size_t width>
4802 {
4804  return tmp *= v;
4805 }
4806 
4812 template <typename Number, std::size_t width>
4816 {
4818  return tmp /= v;
4819 }
4820 
4827 template <typename Number, std::size_t width>
4829 operator+(const Number &u, const VectorizedArray<Number, width> &v)
4830 {
4832  return tmp += v;
4833 }
4834 
4843 template <std::size_t width>
4845 operator+(const double u, const VectorizedArray<float, width> &v)
4846 {
4848  return tmp += v;
4849 }
4850 
4857 template <typename Number, std::size_t width>
4859 operator+(const VectorizedArray<Number, width> &v, const Number &u)
4860 {
4861  return u + v;
4862 }
4863 
4872 template <std::size_t width>
4874 operator+(const VectorizedArray<float, width> &v, const double u)
4875 {
4876  return u + v;
4877 }
4878 
4885 template <typename Number, std::size_t width>
4887 operator-(const Number &u, const VectorizedArray<Number, width> &v)
4888 {
4890  return tmp -= v;
4891 }
4892 
4901 template <std::size_t width>
4903 operator-(const double u, const VectorizedArray<float, width> &v)
4904 {
4905  VectorizedArray<float, width> tmp = static_cast<float>(u);
4906  return tmp -= v;
4907 }
4908 
4915 template <typename Number, std::size_t width>
4917 operator-(const VectorizedArray<Number, width> &v, const Number &u)
4918 {
4920  return v - tmp;
4921 }
4922 
4931 template <std::size_t width>
4933 operator-(const VectorizedArray<float, width> &v, const double u)
4934 {
4935  VectorizedArray<float, width> tmp = static_cast<float>(u);
4936  return v - tmp;
4937 }
4938 
4945 template <typename Number, std::size_t width>
4947 operator*(const Number &u, const VectorizedArray<Number, width> &v)
4948 {
4950  return tmp *= v;
4951 }
4952 
4961 template <std::size_t width>
4963 operator*(const double u, const VectorizedArray<float, width> &v)
4964 {
4965  VectorizedArray<float, width> tmp = static_cast<float>(u);
4966  return tmp *= v;
4967 }
4968 
4975 template <typename Number, std::size_t width>
4977 operator*(const VectorizedArray<Number, width> &v, const Number &u)
4978 {
4979  return u * v;
4980 }
4981 
4990 template <std::size_t width>
4992 operator*(const VectorizedArray<float, width> &v, const double u)
4993 {
4994  return u * v;
4995 }
4996 
5003 template <typename Number, std::size_t width>
5005 operator/(const Number &u, const VectorizedArray<Number, width> &v)
5006 {
5008  return tmp /= v;
5009 }
5010 
5019 template <std::size_t width>
5021 operator/(const double u, const VectorizedArray<float, width> &v)
5022 {
5023  VectorizedArray<float, width> tmp = static_cast<float>(u);
5024  return tmp /= v;
5025 }
5026 
5033 template <typename Number, std::size_t width>
5035 operator/(const VectorizedArray<Number, width> &v, const Number &u)
5036 {
5038  return v / tmp;
5039 }
5040 
5049 template <std::size_t width>
5051 operator/(const VectorizedArray<float, width> &v, const double u)
5052 {
5053  VectorizedArray<float, width> tmp = static_cast<float>(u);
5054  return v / tmp;
5055 }
5056 
5062 template <typename Number, std::size_t width>
5065 {
5066  return u;
5067 }
5068 
5074 template <typename Number, std::size_t width>
5077 {
5078  // to get a negative sign, subtract the input from zero (could also
5079  // multiply by -1, but this one is slightly simpler)
5080  return VectorizedArray<Number, width>() - u;
5081 }
5082 
5088 template <typename Number, std::size_t width>
5089 inline std::ostream &
5090 operator<<(std::ostream &out, const VectorizedArray<Number, width> &p)
5091 {
5092  constexpr unsigned int n = VectorizedArray<Number, width>::size();
5093  for (unsigned int i = 0; i < n - 1; ++i)
5094  out << p[i] << ' ';
5095  out << p[n - 1];
5096 
5097  return out;
5098 }
5099 
5114 enum class SIMDComparison : int
5115 {
5116 #if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
5117  equal = _CMP_EQ_OQ,
5118  not_equal = _CMP_NEQ_OQ,
5119  less_than = _CMP_LT_OQ,
5120  less_than_or_equal = _CMP_LE_OQ,
5121  greater_than = _CMP_GT_OQ,
5122  greater_than_or_equal = _CMP_GE_OQ
5123 #else
5124  equal,
5125  not_equal,
5126  less_than,
5128  greater_than,
5130 #endif
5131 };
5132 
5133 
5197 template <SIMDComparison predicate, typename Number>
5198 DEAL_II_ALWAYS_INLINE inline Number
5199 compare_and_apply_mask(const Number &left,
5200  const Number &right,
5201  const Number &true_value,
5202  const Number &false_value)
5203 {
5204  bool mask;
5205  switch (predicate)
5206  {
5207  case SIMDComparison::equal:
5208  mask = (left == right);
5209  break;
5211  mask = (left != right);
5212  break;
5214  mask = (left < right);
5215  break;
5217  mask = (left <= right);
5218  break;
5220  mask = (left > right);
5221  break;
5223  mask = (left >= right);
5224  break;
5225  }
5226 
5227  return mask ? true_value : false_value;
5228 }
5229 
5230 
5235 template <SIMDComparison predicate, typename Number>
5238  const VectorizedArray<Number, 1> &right,
5239  const VectorizedArray<Number, 1> &true_value,
5240  const VectorizedArray<Number, 1> &false_value)
5241 {
5243  result.data = compare_and_apply_mask<predicate, Number>(left.data,
5244  right.data,
5245  true_value.data,
5246  false_value.data);
5247  return result;
5248 }
5249 
5252 #ifndef DOXYGEN
5253 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && defined(__AVX512F__)
5254 
5255 template <SIMDComparison predicate>
5258  const VectorizedArray<float, 16> &right,
5259  const VectorizedArray<float, 16> &true_values,
5260  const VectorizedArray<float, 16> &false_values)
5261 {
5262  const __mmask16 mask =
5263  _mm512_cmp_ps_mask(left.data, right.data, static_cast<int>(predicate));
5265  result.data = _mm512_mask_mov_ps(false_values.data, mask, true_values.data);
5266  return result;
5267 }
5268 
5269 
5270 
5271 template <SIMDComparison predicate>
5274  const VectorizedArray<double, 8> &right,
5275  const VectorizedArray<double, 8> &true_values,
5276  const VectorizedArray<double, 8> &false_values)
5277 {
5278  const __mmask16 mask =
5279  _mm512_cmp_pd_mask(left.data, right.data, static_cast<int>(predicate));
5281  result.data = _mm512_mask_mov_pd(false_values.data, mask, true_values.data);
5282  return result;
5283 }
5284 
5285 # endif
5286 
5287 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
5288 
5289 template <SIMDComparison predicate>
5292  const VectorizedArray<float, 8> &right,
5293  const VectorizedArray<float, 8> &true_values,
5294  const VectorizedArray<float, 8> &false_values)
5295 {
5296  const auto mask =
5297  _mm256_cmp_ps(left.data, right.data, static_cast<int>(predicate));
5298 
5300  result.data = _mm256_blendv_ps(false_values.data, true_values.data, mask);
5301  return result;
5302 }
5303 
5304 
5305 template <SIMDComparison predicate>
5308  const VectorizedArray<double, 4> &right,
5309  const VectorizedArray<double, 4> &true_values,
5310  const VectorizedArray<double, 4> &false_values)
5311 {
5312  const auto mask =
5313  _mm256_cmp_pd(left.data, right.data, static_cast<int>(predicate));
5314 
5316  result.data = _mm256_blendv_pd(false_values.data, true_values.data, mask);
5317  return result;
5318 }
5319 
5320 # endif
5321 
5322 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__SSE2__)
5323 
5324 template <SIMDComparison predicate>
5327  const VectorizedArray<float, 4> &right,
5328  const VectorizedArray<float, 4> &true_values,
5329  const VectorizedArray<float, 4> &false_values)
5330 {
5331  __m128 mask;
5332  switch (predicate)
5333  {
5334  case SIMDComparison::equal:
5335  mask = _mm_cmpeq_ps(left.data, right.data);
5336  break;
5338  mask = _mm_cmpneq_ps(left.data, right.data);
5339  break;
5341  mask = _mm_cmplt_ps(left.data, right.data);
5342  break;
5344  mask = _mm_cmple_ps(left.data, right.data);
5345  break;
5347  mask = _mm_cmpgt_ps(left.data, right.data);
5348  break;
5350  mask = _mm_cmpge_ps(left.data, right.data);
5351  break;
5352  }
5353 
5355  result.data = _mm_or_ps(_mm_and_ps(mask, true_values.data),
5356  _mm_andnot_ps(mask, false_values.data));
5357 
5358  return result;
5359 }
5360 
5361 
5362 template <SIMDComparison predicate>
5365  const VectorizedArray<double, 2> &right,
5366  const VectorizedArray<double, 2> &true_values,
5367  const VectorizedArray<double, 2> &false_values)
5368 {
5369  __m128d mask;
5370  switch (predicate)
5371  {
5372  case SIMDComparison::equal:
5373  mask = _mm_cmpeq_pd(left.data, right.data);
5374  break;
5376  mask = _mm_cmpneq_pd(left.data, right.data);
5377  break;
5379  mask = _mm_cmplt_pd(left.data, right.data);
5380  break;
5382  mask = _mm_cmple_pd(left.data, right.data);
5383  break;
5385  mask = _mm_cmpgt_pd(left.data, right.data);
5386  break;
5388  mask = _mm_cmpge_pd(left.data, right.data);
5389  break;
5390  }
5391 
5393  result.data = _mm_or_pd(_mm_and_pd(mask, true_values.data),
5394  _mm_andnot_pd(mask, false_values.data));
5395 
5396  return result;
5397 }
5398 
5399 # endif
5400 #endif // DOXYGEN
5401 
5402 
5403 namespace internal
5404 {
5405  template <typename T>
5407  {
5408  using value_type = T;
5409  static constexpr std::size_t width = 1;
5410 
5411  static T &
5412  get(T &value, unsigned int c)
5413  {
5414  AssertDimension(c, 0);
5415  (void)c;
5416 
5417  return value;
5418  }
5419 
5420  static const T &
5421  get(const T &value, unsigned int c)
5422  {
5423  AssertDimension(c, 0);
5424  (void)c;
5425 
5426  return value;
5427  }
5428  };
5429 
5430  template <typename T, std::size_t width_>
5432  {
5433  using value_type = T;
5434  static constexpr std::size_t width = width_;
5435 
5436  static T &
5438  {
5439  AssertIndexRange(c, width_);
5440 
5441  return values[c];
5442  }
5443 
5444  static const T &
5445  get(const VectorizedArray<T, width_> &values, unsigned int c)
5446  {
5447  AssertIndexRange(c, width_);
5448 
5449  return values[c];
5450  }
5451  };
5452 } // namespace internal
5453 
5454 
5456 
5463 namespace std
5464 {
5472  template <typename Number, std::size_t width>
5473  inline ::VectorizedArray<Number, width>
5474  sin(const ::VectorizedArray<Number, width> &x)
5475  {
5476  // put values in an array and later read in that array with an unaligned
5477  // read. This should save some instructions as compared to directly
5478  // setting the individual elements and also circumvents a compiler
5479  // optimization bug in gcc-4.6 with SSE2 (see also deal.II developers list
5480  // from April 2014, topic "matrix_free/step-48 Test").
5482  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5483  ++i)
5484  values[i] = std::sin(x[i]);
5486  out.load(&values[0]);
5487  return out;
5488  }
5489 
5490 
5491 
5499  template <typename Number, std::size_t width>
5500  inline ::VectorizedArray<Number, width>
5501  cos(const ::VectorizedArray<Number, width> &x)
5502  {
5504  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5505  ++i)
5506  values[i] = std::cos(x[i]);
5508  out.load(&values[0]);
5509  return out;
5510  }
5511 
5512 
5513 
5521  template <typename Number, std::size_t width>
5522  inline ::VectorizedArray<Number, width>
5523  tan(const ::VectorizedArray<Number, width> &x)
5524  {
5526  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5527  ++i)
5528  values[i] = std::tan(x[i]);
5530  out.load(&values[0]);
5531  return out;
5532  }
5533 
5534 
5535 
5543  template <typename Number, std::size_t width>
5544  inline ::VectorizedArray<Number, width>
5545  exp(const ::VectorizedArray<Number, width> &x)
5546  {
5548  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5549  ++i)
5550  values[i] = std::exp(x[i]);
5552  out.load(&values[0]);
5553  return out;
5554  }
5555 
5556 
5557 
5565  template <typename Number, std::size_t width>
5566  inline ::VectorizedArray<Number, width>
5567  log(const ::VectorizedArray<Number, width> &x)
5568  {
5570  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5571  ++i)
5572  values[i] = std::log(x[i]);
5574  out.load(&values[0]);
5575  return out;
5576  }
5577 
5578 
5579 
5587  template <typename Number, std::size_t width>
5588  inline ::VectorizedArray<Number, width>
5589  sqrt(const ::VectorizedArray<Number, width> &x)
5590  {
5591  return x.get_sqrt();
5592  }
5593 
5594 
5595 
5603  template <typename Number, std::size_t width>
5604  inline ::VectorizedArray<Number, width>
5605  pow(const ::VectorizedArray<Number, width> &x, const Number p)
5606  {
5608  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5609  ++i)
5610  values[i] = std::pow(x[i], p);
5612  out.load(&values[0]);
5613  return out;
5614  }
5615 
5616 
5617 
5626  template <typename Number, std::size_t width>
5627  inline ::VectorizedArray<Number, width>
5628  pow(const ::VectorizedArray<Number, width> &x,
5629  const ::VectorizedArray<Number, width> &p)
5630  {
5632  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5633  ++i)
5634  values[i] = std::pow(x[i], p[i]);
5636  out.load(&values[0]);
5637  return out;
5638  }
5639 
5640 
5641 
5649  template <typename Number, std::size_t width>
5650  inline ::VectorizedArray<Number, width>
5651  abs(const ::VectorizedArray<Number, width> &x)
5652  {
5653  return x.get_abs();
5654  }
5655 
5656 
5657 
5665  template <typename Number, std::size_t width>
5666  inline ::VectorizedArray<Number, width>
5667  max(const ::VectorizedArray<Number, width> &x,
5668  const ::VectorizedArray<Number, width> &y)
5669  {
5670  return x.get_max(y);
5671  }
5672 
5673 
5674 
5682  template <typename Number, std::size_t width>
5683  inline ::VectorizedArray<Number, width>
5684  min(const ::VectorizedArray<Number, width> &x,
5685  const ::VectorizedArray<Number, width> &y)
5686  {
5687  return x.get_min(y);
5688  }
5689 
5690 
5691 
5695  template <class T>
5696  struct iterator_traits<::VectorizedArrayIterator<T>>
5697  {
5698  using iterator_category = random_access_iterator_tag;
5699  using value_type = typename T::value_type;
5700  using difference_type = std::ptrdiff_t;
5701  };
5702 
5703 } // namespace std
5704 
5705 #endif
OutputOperator< VectorType > & operator<<(OutputOperator< VectorType > &out, unsigned int step)
Definition: operator.h:165
VectorizedArrayBase()=default
VectorizedArrayIterator< const T > begin() const
VectorizedArrayIterator< const T > end() const
VectorizedArrayIterator< T > end()
VectorizedArrayIterator< T > begin()
static constexpr std::size_t size()
VectorizedArrayBase(const std::initializer_list< U > &list)
VectorizedArrayIterator< T > operator+(const std::size_t &offset) const
VectorizedArrayIterator< T > & operator--()
std::enable_if_t<!std::is_same< U, const U >::value, typename T::value_type > & operator*()
VectorizedArrayIterator< T > & operator=(const VectorizedArrayIterator< T > &other)=default
std::ptrdiff_t operator-(const VectorizedArrayIterator< T > &other) const
bool operator==(const VectorizedArrayIterator< T > &other) const
VectorizedArrayIterator(T &data, const std::size_t lane)
VectorizedArrayIterator< T > & operator+=(const std::size_t offset)
VectorizedArrayIterator< T > & operator++()
bool operator!=(const VectorizedArrayIterator< T > &other) const
const T::value_type & operator*() const
VectorizedArray< Number, width > operator-(const VectorizedArray< Number, width > &u)
VectorizedArray< float, width > operator+(const VectorizedArray< float, width > &v, const double u)
void gather(const Number *base_ptr, const unsigned int *offsets)
void vectorized_load_and_transpose(const unsigned int n_entries, const Number *in, const unsigned int *offsets, VectorizedArray< Number, width > *out)
VectorizedArray< Number, width > operator+(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArrayType make_vectorized_array(const typename VectorizedArrayType::value_type &u)
VectorizedArray & operator+=(const VectorizedArray &vec)
VectorizedArray< Number, width > operator/(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArray get_abs() const
VectorizedArray< float, width > operator/(const VectorizedArray< float, width > &v, const double u)
VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &x, const ::VectorizedArray< Number, width > &y)
VectorizedArray< Number, width > log(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > operator*(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArray< Number, width > exp(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > operator-(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArray & operator=(const Number scalar)
VectorizedArray< float, width > operator-(const double u, const VectorizedArray< float, width > &v)
VectorizedArray< Number, width > operator+(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > operator+(const VectorizedArray< Number, width > &u)
VectorizedArray()=default
bool operator==(const VectorizedArray< Number, width > &lhs, const VectorizedArray< Number, width > &rhs)
VectorizedArray< Number, width > tan(const ::VectorizedArray< Number, width > &x)
VectorizedArray(const Number scalar)
VectorizedArray< Number, width > operator-(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray< float, width > operator*(const VectorizedArray< float, width > &v, const double u)
VectorizedArray get_max(const VectorizedArray &other) const
VectorizedArray & operator/=(const VectorizedArray &vec)
VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &x, const ::VectorizedArray< Number, width > &y)
VectorizedArray get_min(const VectorizedArray &other) const
VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &x, const Number p)
VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &x, const ::VectorizedArray< Number, width > &p)
VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &x)
void store(OtherNumber *ptr) const
VectorizedArray< float, width > operator-(const VectorizedArray< float, width > &v, const double u)
Number & operator[](const unsigned int comp)
void load(const OtherNumber *ptr)
void scatter(const unsigned int *offsets, Number *base_ptr) const
VectorizedArray & operator*=(const VectorizedArray &vec)
VectorizedArray< Number, width > operator-(const Number &u, const VectorizedArray< Number, width > &v)
const Number & operator[](const unsigned int comp) const
VectorizedArray< Number, width > operator*(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray< float, width > operator+(const double u, const VectorizedArray< float, width > &v)
VectorizedArray< Number, width > operator*(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray get_sqrt() const
VectorizedArray< Number, width > operator/(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > make_vectorized_array(const Number &u)
VectorizedArray< Number, width > operator/(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > operator+(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > cos(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &x)
void streaming_store(Number *ptr) const
VectorizedArray(const std::initializer_list< U > &list)
void vectorized_transpose_and_store(const bool add_into, const unsigned int n_entries, const VectorizedArray< Number, width > *in, const unsigned int *offsets, Number *out)
VectorizedArray< float, width > operator/(const double u, const VectorizedArray< float, width > &v)
VectorizedArray< float, width > operator*(const double u, const VectorizedArray< float, width > &v)
VectorizedArray & operator-=(const VectorizedArray &vec)
#define DEAL_II_ALWAYS_INLINE
Definition: config.h:102
#define DEAL_II_OPENMP_SIMD_PRAGMA
Definition: config.h:142
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:458
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:459
const unsigned int v0
Definition: grid_tools.cc:1048
const unsigned int v1
Definition: grid_tools.cc:1048
__global__ void vec_add(Number *val, const Number a, const size_type N)
#define Assert(cond, exc)
Definition: exceptions.h:1501
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1695
#define AssertIndexRange(index, range)
Definition: exceptions.h:1760
static ::ExceptionBase & ExcMessage(std::string arg1)
Expression fabs(const Expression &x)
static const types::blas_int zero
static const char T
::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
static T & get(VectorizedArray< T, width_ > &values, unsigned int c)
static const T & get(const VectorizedArray< T, width_ > &values, unsigned int c)
static const T & get(const T &value, unsigned int c)
static constexpr std::size_t width
static T & get(T &value, unsigned int c)
void gather(VectorizedArray< Number, width > &out, const std::array< Number *, width > &ptrs, const unsigned int offset)
SIMDComparison
Number compare_and_apply_mask(const Number &left, const Number &right, const Number &true_value, const Number &false_value)