Reference documentation for deal.II version Git c633c6cdfb 2022-01-24 16:30:41 -0500
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
vectorization.h
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2011 - 2020 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #ifndef dealii_vectorization_h
18 #define dealii_vectorization_h
19 
20 #include <deal.II/base/config.h>
21 
24 
25 #include <array>
26 #include <cmath>
27 
28 // Note:
29 // The flag DEAL_II_VECTORIZATION_WIDTH_IN_BITS is essentially constructed
30 // according to the following scheme (on x86-based architectures)
31 // #ifdef __AVX512F__
32 // #define DEAL_II_VECTORIZATION_WIDTH_IN_BITS 512
33 // #elif defined (__AVX__)
34 // #define DEAL_II_VECTORIZATION_WIDTH_IN_BITS 256
35 // #elif defined (__SSE2__)
36 // #define DEAL_II_VECTORIZATION_WIDTH_IN_BITS 128
37 // #else
38 // #define DEAL_II_VECTORIZATION_WIDTH_IN_BITS 0
39 // #endif
40 // In addition to checking the flags __AVX512F__, __AVX__ and __SSE2__, a CMake
41 // test, 'check_01_cpu_features.cmake', ensures that these feature are not only
42 // present in the compilation unit but also working properly.
43 
44 #if DEAL_II_VECTORIZATION_WIDTH_IN_BITS > 0
45 
46 // These error messages try to detect the case that deal.II was compiled with
47 // a wider instruction set extension as the current compilation unit, for
48 // example because deal.II was compiled with AVX, but a user project does not
49 // add -march=native or similar flags, making it fall to SSE2. This leads to
50 // very strange errors as the size of data structures differs between the
51 // compiled deal.II code sitting in libdeal_II.so and the user code if not
52 // detected.
53 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && !defined(__AVX__)
54 # error \
55  "Mismatch in vectorization capabilities: AVX was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native."
56 # endif
57 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && !defined(__AVX512F__)
58 # error \
59  "Mismatch in vectorization capabilities: AVX-512F was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native."
60 # endif
61 
62 # ifdef _MSC_VER
63 # include <intrin.h>
64 # elif defined(__ALTIVEC__)
65 # include <altivec.h>
66 
67 // altivec.h defines vector, pixel, bool, but we do not use them, so undefine
68 // them before they make trouble
69 # undef vector
70 # undef pixel
71 # undef bool
72 # else
73 # include <x86intrin.h>
74 # endif
75 
76 #endif
77 
78 
80 
81 
82 // Enable the EnableIfScalar type trait for VectorizedArray<Number> such
83 // that it can be used as a Number type in Tensor<rank,dim,Number>, etc.
84 
85 template <typename Number, std::size_t width>
86 struct EnableIfScalar<VectorizedArray<Number, width>>
87 {
89 };
90 
91 
92 
96 template <typename T>
98 {
99 public:
106  VectorizedArrayIterator(T &data, const std::size_t lane)
107  : data(&data)
108  , lane(lane)
109  {}
110 
114  bool
116  {
117  Assert(this->data == other.data,
118  ExcMessage(
119  "You are trying to compare iterators into different arrays."));
120  return this->lane == other.lane;
121  }
122 
126  bool
128  {
129  Assert(this->data == other.data,
130  ExcMessage(
131  "You are trying to compare iterators into different arrays."));
132  return this->lane != other.lane;
133  }
134 
139  operator=(const VectorizedArrayIterator<T> &other) = default;
140 
145  const typename T::value_type &
146  operator*() const
147  {
148  AssertIndexRange(lane, T::size());
149  return (*data)[lane];
150  }
151 
152 
157  template <typename U = T>
158  typename std::enable_if<!std::is_same<U, const U>::value,
159  typename T::value_type>::type &
161  {
162  AssertIndexRange(lane, T::size());
163  return (*data)[lane];
164  }
165 
173  {
174  AssertIndexRange(lane + 1, T::size() + 1);
175  lane++;
176  return *this;
177  }
178 
184  operator+=(const std::size_t offset)
185  {
186  AssertIndexRange(lane + offset, T::size() + 1);
187  lane += offset;
188  return *this;
189  }
190 
198  {
199  Assert(
200  lane > 0,
201  ExcMessage(
202  "You can't decrement an iterator that is already at the beginning of the range."));
203  --lane;
204  return *this;
205  }
206 
211  operator+(const std::size_t &offset) const
212  {
213  AssertIndexRange(lane + offset, T::size() + 1);
214  return VectorizedArrayIterator<T>(*data, lane + offset);
215  }
216 
220  std::ptrdiff_t
222  {
223  return static_cast<std::ptrdiff_t>(lane) -
224  static_cast<ptrdiff_t>(other.lane);
225  }
226 
227 private:
231  T *data;
232 
236  std::size_t lane;
237 };
238 
239 
240 
250 template <typename T, std::size_t width>
252 {
253 public:
257  VectorizedArrayBase() = default;
258 
262  template <typename U>
263  VectorizedArrayBase(const std::initializer_list<U> &list)
264  {
265  auto i0 = this->begin();
266  auto i1 = list.begin();
267 
268  for (; i1 != list.end(); ++i0, ++i1)
269  {
270  Assert(
271  i0 != this->end(),
272  ExcMessage(
273  "Initializer list exceeds size of this VectorizedArray object."));
274 
275  *i0 = *i1;
276  }
277 
278  for (; i0 != this->end(); ++i0)
279  {
280  *i0 = 0.0;
281  }
282  }
283 
287  static constexpr std::size_t
289  {
290  return width;
291  }
292 
298  {
299  return VectorizedArrayIterator<T>(static_cast<T &>(*this), 0);
300  }
301 
307  begin() const
308  {
309  return VectorizedArrayIterator<const T>(static_cast<const T &>(*this), 0);
310  }
311 
316  end()
317  {
318  return VectorizedArrayIterator<T>(static_cast<T &>(*this), width);
319  }
320 
326  end() const
327  {
328  return VectorizedArrayIterator<const T>(static_cast<const T &>(*this),
329  width);
330  }
331 };
332 
333 
334 
419 template <typename Number, std::size_t width>
421  : public VectorizedArrayBase<VectorizedArray<Number, width>, 1>
422 {
423 public:
427  using value_type = Number;
428 
429  static_assert(width == 1,
430  "You specified an illegal width that is not supported.");
431 
436  VectorizedArray() = default;
437 
441  VectorizedArray(const Number scalar)
442  {
443  this->operator=(scalar);
444  }
445 
449  template <typename U>
450  VectorizedArray(const std::initializer_list<U> &list)
451  : VectorizedArrayBase<VectorizedArray<Number, width>, 1>(list)
452  {}
453 
459  operator=(const Number scalar)
460  {
461  data = scalar;
462  return *this;
463  }
464 
470  Number &
471  operator[](const unsigned int comp)
472  {
473  (void)comp;
474  AssertIndexRange(comp, 1);
475  return data;
476  }
477 
483  const Number &
484  operator[](const unsigned int comp) const
485  {
486  (void)comp;
487  AssertIndexRange(comp, 1);
488  return data;
489  }
490 
497  {
498  data += vec.data;
499  return *this;
500  }
501 
508  {
509  data -= vec.data;
510  return *this;
511  }
512 
519  {
520  data *= vec.data;
521  return *this;
522  }
523 
530  {
531  data /= vec.data;
532  return *this;
533  }
534 
542  void
543  load(const Number *ptr)
544  {
545  data = *ptr;
546  }
547 
555  void
556  store(Number *ptr) const
557  {
558  *ptr = data;
559  }
560 
608  void
609  streaming_store(Number *ptr) const
610  {
611  *ptr = data;
612  }
613 
627  void
628  gather(const Number *base_ptr, const unsigned int *offsets)
629  {
630  data = base_ptr[offsets[0]];
631  }
632 
646  void
647  scatter(const unsigned int *offsets, Number *base_ptr) const
648  {
649  base_ptr[offsets[0]] = data;
650  }
651 
657  Number data;
658 
659 private:
666  get_sqrt() const
667  {
668  VectorizedArray res;
669  res.data = std::sqrt(data);
670  return res;
671  }
672 
679  get_abs() const
680  {
681  VectorizedArray res;
682  res.data = std::fabs(data);
683  return res;
684  }
685 
692  get_max(const VectorizedArray &other) const
693  {
694  VectorizedArray res;
695  res.data = std::max(data, other.data);
696  return res;
697  }
698 
705  get_min(const VectorizedArray &other) const
706  {
707  VectorizedArray res;
708  res.data = std::min(data, other.data);
709  return res;
710  }
711 
712  // Make a few functions friends.
713  template <typename Number2, std::size_t width2>
715  std::sqrt(const VectorizedArray<Number2, width2> &);
716  template <typename Number2, std::size_t width2>
718  std::abs(const VectorizedArray<Number2, width2> &);
719  template <typename Number2, std::size_t width2>
721  std::max(const VectorizedArray<Number2, width2> &,
723  template <typename Number2, std::size_t width2>
725  std::min(const VectorizedArray<Number2, width2> &,
727 };
728 
729 
730 
735 
736 
743 template <typename Number,
744  std::size_t width =
747  make_vectorized_array(const Number &u)
748 {
750  return result;
751 }
752 
753 
754 
761 template <typename VectorizedArrayType>
763 make_vectorized_array(const typename VectorizedArrayType::value_type &u)
764 {
765  static_assert(
766  std::is_same<VectorizedArrayType,
767  VectorizedArray<typename VectorizedArrayType::value_type,
768  VectorizedArrayType::size()>>::value,
769  "VectorizedArrayType is not a VectorizedArray.");
770 
771  VectorizedArrayType result = u;
772  return result;
773 }
774 
775 
776 
788 template <typename Number, std::size_t width>
789 inline DEAL_II_ALWAYS_INLINE void
791  const std::array<Number *, width> &ptrs,
792  const unsigned int offset)
793 {
794  for (unsigned int v = 0; v < width; ++v)
795  out.data[v] = ptrs[v][offset];
796 }
797 
798 
799 
825 template <typename Number, std::size_t width>
826 inline DEAL_II_ALWAYS_INLINE void
827 vectorized_load_and_transpose(const unsigned int n_entries,
828  const Number * in,
829  const unsigned int * offsets,
831 {
832  for (unsigned int i = 0; i < n_entries; ++i)
833  for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
834  out[i][v] = in[offsets[v] + i];
835 }
836 
837 
849 template <typename Number, std::size_t width>
850 inline DEAL_II_ALWAYS_INLINE void
851 vectorized_load_and_transpose(const unsigned int n_entries,
852  const std::array<Number *, width> &in,
854 {
855  for (unsigned int i = 0; i < n_entries; ++i)
856  for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
857  out[i][v] = in[v][i];
858 }
859 
860 
861 
900 template <typename Number, std::size_t width>
901 inline DEAL_II_ALWAYS_INLINE void
902 vectorized_transpose_and_store(const bool add_into,
903  const unsigned int n_entries,
905  const unsigned int * offsets,
906  Number * out)
907 {
908  if (add_into)
909  for (unsigned int i = 0; i < n_entries; ++i)
910  for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
911  out[offsets[v] + i] += in[i][v];
912  else
913  for (unsigned int i = 0; i < n_entries; ++i)
914  for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
915  out[offsets[v] + i] = in[i][v];
916 }
917 
918 
930 template <typename Number, std::size_t width>
931 inline DEAL_II_ALWAYS_INLINE void
932 vectorized_transpose_and_store(const bool add_into,
933  const unsigned int n_entries,
935  std::array<Number *, width> & out)
936 {
937  if (add_into)
938  for (unsigned int i = 0; i < n_entries; ++i)
939  for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
940  out[v][i] += in[i][v];
941  else
942  for (unsigned int i = 0; i < n_entries; ++i)
943  for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
944  out[v][i] = in[i][v];
945 }
946 
947 
949 
950 #ifndef DOXYGEN
951 
952 // for safety, also check that __AVX512F__ is defined in case the user manually
953 // set some conflicting compile flags which prevent compilation
954 
955 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && defined(__AVX512F__)
956 
960 template <>
961 class VectorizedArray<double, 8>
962  : public VectorizedArrayBase<VectorizedArray<double, 8>, 8>
963 {
964 public:
968  using value_type = double;
969 
974  VectorizedArray() = default;
975 
979  VectorizedArray(const double scalar)
980  {
981  this->operator=(scalar);
982  }
983 
987  template <typename U>
988  VectorizedArray(const std::initializer_list<U> &list)
990  {}
991 
997  operator=(const double x)
998  {
999  data = _mm512_set1_pd(x);
1000  return *this;
1001  }
1002 
1007  double &
1008  operator[](const unsigned int comp)
1009  {
1010  AssertIndexRange(comp, 8);
1011  return *(reinterpret_cast<double *>(&data) + comp);
1012  }
1013 
1018  const double &
1019  operator[](const unsigned int comp) const
1020  {
1021  AssertIndexRange(comp, 8);
1022  return *(reinterpret_cast<const double *>(&data) + comp);
1023  }
1024 
1029  VectorizedArray &
1030  operator+=(const VectorizedArray &vec)
1031  {
1032  // if the compiler supports vector arithmetic, we can simply use +=
1033  // operator on the given data type. this allows the compiler to combine
1034  // additions with multiplication (fused multiply-add) if those
1035  // instructions are available. Otherwise, we need to use the built-in
1036  // intrinsic command for __m512d
1037 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1038  data += vec.data;
1039 # else
1040  data = _mm512_add_pd(data, vec.data);
1041 # endif
1042  return *this;
1043  }
1044 
1049  VectorizedArray &
1050  operator-=(const VectorizedArray &vec)
1051  {
1052 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1053  data -= vec.data;
1054 # else
1055  data = _mm512_sub_pd(data, vec.data);
1056 # endif
1057  return *this;
1058  }
1063  VectorizedArray &
1064  operator*=(const VectorizedArray &vec)
1065  {
1066 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1067  data *= vec.data;
1068 # else
1069  data = _mm512_mul_pd(data, vec.data);
1070 # endif
1071  return *this;
1072  }
1073 
1078  VectorizedArray &
1079  operator/=(const VectorizedArray &vec)
1080  {
1081 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1082  data /= vec.data;
1083 # else
1084  data = _mm512_div_pd(data, vec.data);
1085 # endif
1086  return *this;
1087  }
1088 
1095  void
1096  load(const double *ptr)
1097  {
1098  data = _mm512_loadu_pd(ptr);
1099  }
1100 
1108  void
1109  store(double *ptr) const
1110  {
1111  _mm512_storeu_pd(ptr, data);
1112  }
1113 
1118  void
1119  streaming_store(double *ptr) const
1120  {
1121  Assert(reinterpret_cast<std::size_t>(ptr) % 64 == 0,
1122  ExcMessage("Memory not aligned"));
1123  _mm512_stream_pd(ptr, data);
1124  }
1125 
1139  void
1140  gather(const double *base_ptr, const unsigned int *offsets)
1141  {
1142  // unfortunately, there does not appear to be a 256 bit integer load, so
1143  // do it by some reinterpret casts here. this is allowed because the Intel
1144  // API allows aliasing between different vector types.
1145  const __m256 index_val =
1146  _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
1147  const __m256i index = *reinterpret_cast<const __m256i *>(&index_val);
1148  data = _mm512_i32gather_pd(index, base_ptr, 8);
1149  }
1150 
1164  void
1165  scatter(const unsigned int *offsets, double *base_ptr) const
1166  {
1167  for (unsigned int i = 0; i < 8; ++i)
1168  for (unsigned int j = i + 1; j < 8; ++j)
1169  Assert(offsets[i] != offsets[j],
1170  ExcMessage("Result of scatter undefined if two offset elements"
1171  " point to the same position"));
1172 
1173  // unfortunately, there does not appear to be a 256 bit integer load, so
1174  // do it by some reinterpret casts here. this is allowed because the Intel
1175  // API allows aliasing between different vector types.
1176  const __m256 index_val =
1177  _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
1178  const __m256i index = *reinterpret_cast<const __m256i *>(&index_val);
1179  _mm512_i32scatter_pd(base_ptr, index, data, 8);
1180  }
1181 
1187  __m512d data;
1188 
1189 private:
1196  get_sqrt() const
1197  {
1198  VectorizedArray res;
1199  res.data = _mm512_sqrt_pd(data);
1200  return res;
1201  }
1202 
1209  get_abs() const
1210  {
1211  // to compute the absolute value, perform bitwise andnot with -0. This
1212  // will leave all value and exponent bits unchanged but force the sign
1213  // value to +. Since there is no andnot for AVX512, we interpret the data
1214  // as 64 bit integers and do the andnot on those types (note that andnot
1215  // is a bitwise operation so the data type does not matter)
1216  __m512d mask = _mm512_set1_pd(-0.);
1217  VectorizedArray res;
1218  res.data = reinterpret_cast<__m512d>(
1219  _mm512_andnot_epi64(reinterpret_cast<__m512i>(mask),
1220  reinterpret_cast<__m512i>(data)));
1221  return res;
1222  }
1223 
1230  get_max(const VectorizedArray &other) const
1231  {
1232  VectorizedArray res;
1233  res.data = _mm512_max_pd(data, other.data);
1234  return res;
1235  }
1236 
1243  get_min(const VectorizedArray &other) const
1244  {
1245  VectorizedArray res;
1246  res.data = _mm512_min_pd(data, other.data);
1247  return res;
1248  }
1249 
1250  // Make a few functions friends.
1251  template <typename Number2, std::size_t width2>
1253  std::sqrt(const VectorizedArray<Number2, width2> &);
1254  template <typename Number2, std::size_t width2>
1256  std::abs(const VectorizedArray<Number2, width2> &);
1257  template <typename Number2, std::size_t width2>
1259  std::max(const VectorizedArray<Number2, width2> &,
1261  template <typename Number2, std::size_t width2>
1263  std::min(const VectorizedArray<Number2, width2> &,
1265 };
1266 
1267 
1268 
1272 template <>
1273 inline DEAL_II_ALWAYS_INLINE void
1274 vectorized_load_and_transpose(const unsigned int n_entries,
1275  const double * in,
1276  const unsigned int * offsets,
1278 {
1279  // do not do full transpose because the code is long and will most
1280  // likely not pay off because many processors have two load units
1281  // (for the top 8 instructions) but only 1 permute unit (for the 8
1282  // shuffle/unpack instructions). rather start the transposition on the
1283  // vectorized array of half the size with 256 bits
1284  const unsigned int n_chunks = n_entries / 4;
1285  for (unsigned int i = 0; i < n_chunks; ++i)
1286  {
1287  __m512d t0, t1, t2, t3 = {};
1288 
1289  t0 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[0] + 4 * i), 0);
1290  t0 = _mm512_insertf64x4(t0, _mm256_loadu_pd(in + offsets[2] + 4 * i), 1);
1291  t1 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[1] + 4 * i), 0);
1292  t1 = _mm512_insertf64x4(t1, _mm256_loadu_pd(in + offsets[3] + 4 * i), 1);
1293  t2 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[4] + 4 * i), 0);
1294  t2 = _mm512_insertf64x4(t2, _mm256_loadu_pd(in + offsets[6] + 4 * i), 1);
1295  t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[5] + 4 * i), 0);
1296  t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[7] + 4 * i), 1);
1297 
1298  __m512d v0 = _mm512_shuffle_f64x2(t0, t2, 0x88);
1299  __m512d v1 = _mm512_shuffle_f64x2(t0, t2, 0xdd);
1300  __m512d v2 = _mm512_shuffle_f64x2(t1, t3, 0x88);
1301  __m512d v3 = _mm512_shuffle_f64x2(t1, t3, 0xdd);
1302  out[4 * i + 0].data = _mm512_unpacklo_pd(v0, v2);
1303  out[4 * i + 1].data = _mm512_unpackhi_pd(v0, v2);
1304  out[4 * i + 2].data = _mm512_unpacklo_pd(v1, v3);
1305  out[4 * i + 3].data = _mm512_unpackhi_pd(v1, v3);
1306  }
1307  // remainder loop of work that does not divide by 4
1308  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1309  out[i].gather(in + i, offsets);
1310 }
1311 
1312 
1313 
1317 template <>
1318 inline DEAL_II_ALWAYS_INLINE void
1319 vectorized_load_and_transpose(const unsigned int n_entries,
1320  const std::array<double *, 8> &in,
1322 {
1323  const unsigned int n_chunks = n_entries / 4;
1324  for (unsigned int i = 0; i < n_chunks; ++i)
1325  {
1326  __m512d t0, t1, t2, t3 = {};
1327 
1328  t0 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[0] + 4 * i), 0);
1329  t0 = _mm512_insertf64x4(t0, _mm256_loadu_pd(in[2] + 4 * i), 1);
1330  t1 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[1] + 4 * i), 0);
1331  t1 = _mm512_insertf64x4(t1, _mm256_loadu_pd(in[3] + 4 * i), 1);
1332  t2 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[4] + 4 * i), 0);
1333  t2 = _mm512_insertf64x4(t2, _mm256_loadu_pd(in[6] + 4 * i), 1);
1334  t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[5] + 4 * i), 0);
1335  t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[7] + 4 * i), 1);
1336 
1337  __m512d v0 = _mm512_shuffle_f64x2(t0, t2, 0x88);
1338  __m512d v1 = _mm512_shuffle_f64x2(t0, t2, 0xdd);
1339  __m512d v2 = _mm512_shuffle_f64x2(t1, t3, 0x88);
1340  __m512d v3 = _mm512_shuffle_f64x2(t1, t3, 0xdd);
1341  out[4 * i + 0].data = _mm512_unpacklo_pd(v0, v2);
1342  out[4 * i + 1].data = _mm512_unpackhi_pd(v0, v2);
1343  out[4 * i + 2].data = _mm512_unpacklo_pd(v1, v3);
1344  out[4 * i + 3].data = _mm512_unpackhi_pd(v1, v3);
1345  }
1346 
1347  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1348  gather(out[i], in, i);
1349 }
1350 
1351 
1352 
1356 template <>
1357 inline DEAL_II_ALWAYS_INLINE void
1358 vectorized_transpose_and_store(const bool add_into,
1359  const unsigned int n_entries,
1360  const VectorizedArray<double, 8> *in,
1361  const unsigned int * offsets,
1362  double * out)
1363 {
1364  // as for the load, we split the store operations into 256 bit units to
1365  // better balance between code size, shuffle instructions, and stores
1366  const unsigned int n_chunks = n_entries / 4;
1367  __m512i mask1 = _mm512_set_epi64(0xd, 0xc, 0x5, 0x4, 0x9, 0x8, 0x1, 0x0);
1368  __m512i mask2 = _mm512_set_epi64(0xf, 0xe, 0x7, 0x6, 0xb, 0xa, 0x3, 0x2);
1369  for (unsigned int i = 0; i < n_chunks; ++i)
1370  {
1371  __m512d t0 = _mm512_unpacklo_pd(in[i * 4].data, in[i * 4 + 1].data);
1372  __m512d t1 = _mm512_unpackhi_pd(in[i * 4].data, in[i * 4 + 1].data);
1373  __m512d t2 = _mm512_unpacklo_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
1374  __m512d t3 = _mm512_unpackhi_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
1375  __m512d v0 = _mm512_permutex2var_pd(t0, mask1, t2);
1376  __m512d v1 = _mm512_permutex2var_pd(t0, mask2, t2);
1377  __m512d v2 = _mm512_permutex2var_pd(t1, mask1, t3);
1378  __m512d v3 = _mm512_permutex2var_pd(t1, mask2, t3);
1379  __m256d res0 = _mm512_extractf64x4_pd(v0, 0);
1380  __m256d res4 = _mm512_extractf64x4_pd(v0, 1);
1381  __m256d res1 = _mm512_extractf64x4_pd(v2, 0);
1382  __m256d res5 = _mm512_extractf64x4_pd(v2, 1);
1383  __m256d res2 = _mm512_extractf64x4_pd(v1, 0);
1384  __m256d res6 = _mm512_extractf64x4_pd(v1, 1);
1385  __m256d res3 = _mm512_extractf64x4_pd(v3, 0);
1386  __m256d res7 = _mm512_extractf64x4_pd(v3, 1);
1387 
1388  // Cannot use the same store instructions in both paths of the 'if'
1389  // because the compiler cannot know that there is no aliasing
1390  // between pointers
1391  if (add_into)
1392  {
1393  res0 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[0]), res0);
1394  _mm256_storeu_pd(out + 4 * i + offsets[0], res0);
1395  res1 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[1]), res1);
1396  _mm256_storeu_pd(out + 4 * i + offsets[1], res1);
1397  res2 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[2]), res2);
1398  _mm256_storeu_pd(out + 4 * i + offsets[2], res2);
1399  res3 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[3]), res3);
1400  _mm256_storeu_pd(out + 4 * i + offsets[3], res3);
1401  res4 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[4]), res4);
1402  _mm256_storeu_pd(out + 4 * i + offsets[4], res4);
1403  res5 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[5]), res5);
1404  _mm256_storeu_pd(out + 4 * i + offsets[5], res5);
1405  res6 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[6]), res6);
1406  _mm256_storeu_pd(out + 4 * i + offsets[6], res6);
1407  res7 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[7]), res7);
1408  _mm256_storeu_pd(out + 4 * i + offsets[7], res7);
1409  }
1410  else
1411  {
1412  _mm256_storeu_pd(out + 4 * i + offsets[0], res0);
1413  _mm256_storeu_pd(out + 4 * i + offsets[1], res1);
1414  _mm256_storeu_pd(out + 4 * i + offsets[2], res2);
1415  _mm256_storeu_pd(out + 4 * i + offsets[3], res3);
1416  _mm256_storeu_pd(out + 4 * i + offsets[4], res4);
1417  _mm256_storeu_pd(out + 4 * i + offsets[5], res5);
1418  _mm256_storeu_pd(out + 4 * i + offsets[6], res6);
1419  _mm256_storeu_pd(out + 4 * i + offsets[7], res7);
1420  }
1421  }
1422 
1423  // remainder loop of work that does not divide by 4
1424  if (add_into)
1425  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1426  for (unsigned int v = 0; v < 8; ++v)
1427  out[offsets[v] + i] += in[i][v];
1428  else
1429  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1430  for (unsigned int v = 0; v < 8; ++v)
1431  out[offsets[v] + i] = in[i][v];
1432 }
1433 
1434 
1435 
1439 template <>
1440 inline DEAL_II_ALWAYS_INLINE void
1441 vectorized_transpose_and_store(const bool add_into,
1442  const unsigned int n_entries,
1443  const VectorizedArray<double, 8> *in,
1444  std::array<double *, 8> & out)
1445 {
1446  // see the comments in the vectorized_transpose_and_store above
1447 
1448  const unsigned int n_chunks = n_entries / 4;
1449  __m512i mask1 = _mm512_set_epi64(0xd, 0xc, 0x5, 0x4, 0x9, 0x8, 0x1, 0x0);
1450  __m512i mask2 = _mm512_set_epi64(0xf, 0xe, 0x7, 0x6, 0xb, 0xa, 0x3, 0x2);
1451  for (unsigned int i = 0; i < n_chunks; ++i)
1452  {
1453  __m512d t0 = _mm512_unpacklo_pd(in[i * 4].data, in[i * 4 + 1].data);
1454  __m512d t1 = _mm512_unpackhi_pd(in[i * 4].data, in[i * 4 + 1].data);
1455  __m512d t2 = _mm512_unpacklo_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
1456  __m512d t3 = _mm512_unpackhi_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
1457  __m512d v0 = _mm512_permutex2var_pd(t0, mask1, t2);
1458  __m512d v1 = _mm512_permutex2var_pd(t0, mask2, t2);
1459  __m512d v2 = _mm512_permutex2var_pd(t1, mask1, t3);
1460  __m512d v3 = _mm512_permutex2var_pd(t1, mask2, t3);
1461  __m256d res0 = _mm512_extractf64x4_pd(v0, 0);
1462  __m256d res4 = _mm512_extractf64x4_pd(v0, 1);
1463  __m256d res1 = _mm512_extractf64x4_pd(v2, 0);
1464  __m256d res5 = _mm512_extractf64x4_pd(v2, 1);
1465  __m256d res2 = _mm512_extractf64x4_pd(v1, 0);
1466  __m256d res6 = _mm512_extractf64x4_pd(v1, 1);
1467  __m256d res3 = _mm512_extractf64x4_pd(v3, 0);
1468  __m256d res7 = _mm512_extractf64x4_pd(v3, 1);
1469 
1470  if (add_into)
1471  {
1472  res0 = _mm256_add_pd(_mm256_loadu_pd(out[0] + 4 * i), res0);
1473  _mm256_storeu_pd(out[0] + 4 * i, res0);
1474  res1 = _mm256_add_pd(_mm256_loadu_pd(out[1] + 4 * i), res1);
1475  _mm256_storeu_pd(out[1] + 4 * i, res1);
1476  res2 = _mm256_add_pd(_mm256_loadu_pd(out[2] + 4 * i), res2);
1477  _mm256_storeu_pd(out[2] + 4 * i, res2);
1478  res3 = _mm256_add_pd(_mm256_loadu_pd(out[3] + 4 * i), res3);
1479  _mm256_storeu_pd(out[3] + 4 * i, res3);
1480  res4 = _mm256_add_pd(_mm256_loadu_pd(out[4] + 4 * i), res4);
1481  _mm256_storeu_pd(out[4] + 4 * i, res4);
1482  res5 = _mm256_add_pd(_mm256_loadu_pd(out[5] + 4 * i), res5);
1483  _mm256_storeu_pd(out[5] + 4 * i, res5);
1484  res6 = _mm256_add_pd(_mm256_loadu_pd(out[6] + 4 * i), res6);
1485  _mm256_storeu_pd(out[6] + 4 * i, res6);
1486  res7 = _mm256_add_pd(_mm256_loadu_pd(out[7] + 4 * i), res7);
1487  _mm256_storeu_pd(out[7] + 4 * i, res7);
1488  }
1489  else
1490  {
1491  _mm256_storeu_pd(out[0] + 4 * i, res0);
1492  _mm256_storeu_pd(out[1] + 4 * i, res1);
1493  _mm256_storeu_pd(out[2] + 4 * i, res2);
1494  _mm256_storeu_pd(out[3] + 4 * i, res3);
1495  _mm256_storeu_pd(out[4] + 4 * i, res4);
1496  _mm256_storeu_pd(out[5] + 4 * i, res5);
1497  _mm256_storeu_pd(out[6] + 4 * i, res6);
1498  _mm256_storeu_pd(out[7] + 4 * i, res7);
1499  }
1500  }
1501 
1502  if (add_into)
1503  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1504  for (unsigned int v = 0; v < 8; ++v)
1505  out[v][i] += in[i][v];
1506  else
1507  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1508  for (unsigned int v = 0; v < 8; ++v)
1509  out[v][i] = in[i][v];
1510 }
1511 
1512 
1513 
1517 template <>
1518 class VectorizedArray<float, 16>
1519  : public VectorizedArrayBase<VectorizedArray<float, 16>, 16>
1520 {
1521 public:
1525  using value_type = float;
1526 
1531  VectorizedArray() = default;
1532 
1536  VectorizedArray(const float scalar)
1537  {
1538  this->operator=(scalar);
1539  }
1540 
1544  template <typename U>
1545  VectorizedArray(const std::initializer_list<U> &list)
1547  {}
1548 
1553  VectorizedArray &
1554  operator=(const float x)
1555  {
1556  data = _mm512_set1_ps(x);
1557  return *this;
1558  }
1559 
1564  float &
1565  operator[](const unsigned int comp)
1566  {
1567  AssertIndexRange(comp, 16);
1568  return *(reinterpret_cast<float *>(&data) + comp);
1569  }
1570 
1575  const float &
1576  operator[](const unsigned int comp) const
1577  {
1578  AssertIndexRange(comp, 16);
1579  return *(reinterpret_cast<const float *>(&data) + comp);
1580  }
1581 
1586  VectorizedArray &
1587  operator+=(const VectorizedArray &vec)
1588  {
1589  // if the compiler supports vector arithmetic, we can simply use +=
1590  // operator on the given data type. this allows the compiler to combine
1591  // additions with multiplication (fused multiply-add) if those
1592  // instructions are available. Otherwise, we need to use the built-in
1593  // intrinsic command for __m512d
1594 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1595  data += vec.data;
1596 # else
1597  data = _mm512_add_ps(data, vec.data);
1598 # endif
1599  return *this;
1600  }
1601 
1606  VectorizedArray &
1607  operator-=(const VectorizedArray &vec)
1608  {
1609 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1610  data -= vec.data;
1611 # else
1612  data = _mm512_sub_ps(data, vec.data);
1613 # endif
1614  return *this;
1615  }
1620  VectorizedArray &
1621  operator*=(const VectorizedArray &vec)
1622  {
1623 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1624  data *= vec.data;
1625 # else
1626  data = _mm512_mul_ps(data, vec.data);
1627 # endif
1628  return *this;
1629  }
1630 
1635  VectorizedArray &
1636  operator/=(const VectorizedArray &vec)
1637  {
1638 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1639  data /= vec.data;
1640 # else
1641  data = _mm512_div_ps(data, vec.data);
1642 # endif
1643  return *this;
1644  }
1645 
1652  void
1653  load(const float *ptr)
1654  {
1655  data = _mm512_loadu_ps(ptr);
1656  }
1657 
1665  void
1666  store(float *ptr) const
1667  {
1668  _mm512_storeu_ps(ptr, data);
1669  }
1670 
1675  void
1676  streaming_store(float *ptr) const
1677  {
1678  Assert(reinterpret_cast<std::size_t>(ptr) % 64 == 0,
1679  ExcMessage("Memory not aligned"));
1680  _mm512_stream_ps(ptr, data);
1681  }
1682 
1696  void
1697  gather(const float *base_ptr, const unsigned int *offsets)
1698  {
1699  // unfortunately, there does not appear to be a 512 bit integer load, so
1700  // do it by some reinterpret casts here. this is allowed because the Intel
1701  // API allows aliasing between different vector types.
1702  const __m512 index_val =
1703  _mm512_loadu_ps(reinterpret_cast<const float *>(offsets));
1704  const __m512i index = *reinterpret_cast<const __m512i *>(&index_val);
1705  data = _mm512_i32gather_ps(index, base_ptr, 4);
1706  }
1707 
1721  void
1722  scatter(const unsigned int *offsets, float *base_ptr) const
1723  {
1724  for (unsigned int i = 0; i < 16; ++i)
1725  for (unsigned int j = i + 1; j < 16; ++j)
1726  Assert(offsets[i] != offsets[j],
1727  ExcMessage("Result of scatter undefined if two offset elements"
1728  " point to the same position"));
1729 
1730  // unfortunately, there does not appear to be a 512 bit integer load, so
1731  // do it by some reinterpret casts here. this is allowed because the Intel
1732  // API allows aliasing between different vector types.
1733  const __m512 index_val =
1734  _mm512_loadu_ps(reinterpret_cast<const float *>(offsets));
1735  const __m512i index = *reinterpret_cast<const __m512i *>(&index_val);
1736  _mm512_i32scatter_ps(base_ptr, index, data, 4);
1737  }
1738 
1744  __m512 data;
1745 
1746 private:
1753  get_sqrt() const
1754  {
1755  VectorizedArray res;
1756  res.data = _mm512_sqrt_ps(data);
1757  return res;
1758  }
1759 
1766  get_abs() const
1767  {
1768  // to compute the absolute value, perform bitwise andnot with -0. This
1769  // will leave all value and exponent bits unchanged but force the sign
1770  // value to +. Since there is no andnot for AVX512, we interpret the data
1771  // as 32 bit integers and do the andnot on those types (note that andnot
1772  // is a bitwise operation so the data type does not matter)
1773  __m512 mask = _mm512_set1_ps(-0.f);
1774  VectorizedArray res;
1775  res.data = reinterpret_cast<__m512>(
1776  _mm512_andnot_epi32(reinterpret_cast<__m512i>(mask),
1777  reinterpret_cast<__m512i>(data)));
1778  return res;
1779  }
1780 
1787  get_max(const VectorizedArray &other) const
1788  {
1789  VectorizedArray res;
1790  res.data = _mm512_max_ps(data, other.data);
1791  return res;
1792  }
1793 
1800  get_min(const VectorizedArray &other) const
1801  {
1802  VectorizedArray res;
1803  res.data = _mm512_min_ps(data, other.data);
1804  return res;
1805  }
1806 
1807  // Make a few functions friends.
1808  template <typename Number2, std::size_t width2>
1810  std::sqrt(const VectorizedArray<Number2, width2> &);
1811  template <typename Number2, std::size_t width2>
1813  std::abs(const VectorizedArray<Number2, width2> &);
1814  template <typename Number2, std::size_t width2>
1816  std::max(const VectorizedArray<Number2, width2> &,
1818  template <typename Number2, std::size_t width2>
1820  std::min(const VectorizedArray<Number2, width2> &,
1822 };
1823 
1824 
1825 
1829 template <>
1830 inline DEAL_II_ALWAYS_INLINE void
1831 vectorized_load_and_transpose(const unsigned int n_entries,
1832  const float * in,
1833  const unsigned int * offsets,
1835 {
1836  // Similar to the double case, we perform the work on smaller entities. In
1837  // this case, we start from 128 bit arrays and insert them into a full 512
1838  // bit index. This reduces the code size and register pressure because we do
1839  // shuffles on 4 numbers rather than 16.
1840  const unsigned int n_chunks = n_entries / 4;
1841 
1842  // To avoid warnings about uninitialized variables, need to initialize one
1843  // variable to a pre-exisiting value in out, which will never get used in
1844  // the end. Keep the initialization outside the loop because of a bug in
1845  // gcc-9.1 which generates a "vmovapd" instruction instead of "vmovupd" in
1846  // case t3 is initialized to zero (inside/outside of loop), see
1847  // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90991
1848  __m512 t0, t1, t2, t3;
1849  if (n_chunks > 0)
1850  t3 = out[0].data;
1851  for (unsigned int i = 0; i < n_chunks; ++i)
1852  {
1853  t0 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[0] + 4 * i), 0);
1854  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[4] + 4 * i), 1);
1855  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[8] + 4 * i), 2);
1856  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[12] + 4 * i), 3);
1857  t1 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[1] + 4 * i), 0);
1858  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[5] + 4 * i), 1);
1859  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[9] + 4 * i), 2);
1860  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[13] + 4 * i), 3);
1861  t2 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[2] + 4 * i), 0);
1862  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[6] + 4 * i), 1);
1863  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[10] + 4 * i), 2);
1864  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[14] + 4 * i), 3);
1865  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[3] + 4 * i), 0);
1866  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[7] + 4 * i), 1);
1867  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[11] + 4 * i), 2);
1868  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[15] + 4 * i), 3);
1869 
1870  __m512 v0 = _mm512_shuffle_ps(t0, t1, 0x44);
1871  __m512 v1 = _mm512_shuffle_ps(t0, t1, 0xee);
1872  __m512 v2 = _mm512_shuffle_ps(t2, t3, 0x44);
1873  __m512 v3 = _mm512_shuffle_ps(t2, t3, 0xee);
1874 
1875  out[4 * i + 0].data = _mm512_shuffle_ps(v0, v2, 0x88);
1876  out[4 * i + 1].data = _mm512_shuffle_ps(v0, v2, 0xdd);
1877  out[4 * i + 2].data = _mm512_shuffle_ps(v1, v3, 0x88);
1878  out[4 * i + 3].data = _mm512_shuffle_ps(v1, v3, 0xdd);
1879  }
1880 
1881  // remainder loop of work that does not divide by 4
1882  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1883  out[i].gather(in + i, offsets);
1884 }
1885 
1886 
1887 
1891 template <>
1892 inline DEAL_II_ALWAYS_INLINE void
1893 vectorized_load_and_transpose(const unsigned int n_entries,
1894  const std::array<float *, 16> &in,
1896 {
1897  // see the comments in the vectorized_load_and_transpose above
1898 
1899  const unsigned int n_chunks = n_entries / 4;
1900 
1901  __m512 t0, t1, t2, t3;
1902  if (n_chunks > 0)
1903  t3 = out[0].data;
1904  for (unsigned int i = 0; i < n_chunks; ++i)
1905  {
1906  t0 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[0] + 4 * i), 0);
1907  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[4] + 4 * i), 1);
1908  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[8] + 4 * i), 2);
1909  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[12] + 4 * i), 3);
1910  t1 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[1] + 4 * i), 0);
1911  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[5] + 4 * i), 1);
1912  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[9] + 4 * i), 2);
1913  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[13] + 4 * i), 3);
1914  t2 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[2] + 4 * i), 0);
1915  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[6] + 4 * i), 1);
1916  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[10] + 4 * i), 2);
1917  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[14] + 4 * i), 3);
1918  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[3] + 4 * i), 0);
1919  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[7] + 4 * i), 1);
1920  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[11] + 4 * i), 2);
1921  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[15] + 4 * i), 3);
1922 
1923  __m512 v0 = _mm512_shuffle_ps(t0, t1, 0x44);
1924  __m512 v1 = _mm512_shuffle_ps(t0, t1, 0xee);
1925  __m512 v2 = _mm512_shuffle_ps(t2, t3, 0x44);
1926  __m512 v3 = _mm512_shuffle_ps(t2, t3, 0xee);
1927 
1928  out[4 * i + 0].data = _mm512_shuffle_ps(v0, v2, 0x88);
1929  out[4 * i + 1].data = _mm512_shuffle_ps(v0, v2, 0xdd);
1930  out[4 * i + 2].data = _mm512_shuffle_ps(v1, v3, 0x88);
1931  out[4 * i + 3].data = _mm512_shuffle_ps(v1, v3, 0xdd);
1932  }
1933 
1934  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1935  gather(out[i], in, i);
1936 }
1937 
1938 
1939 
1943 template <>
1944 inline DEAL_II_ALWAYS_INLINE void
1945 vectorized_transpose_and_store(const bool add_into,
1946  const unsigned int n_entries,
1947  const VectorizedArray<float, 16> *in,
1948  const unsigned int * offsets,
1949  float * out)
1950 {
1951  const unsigned int n_chunks = n_entries / 4;
1952  for (unsigned int i = 0; i < n_chunks; ++i)
1953  {
1954  __m512 t0 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0x44);
1955  __m512 t1 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0xee);
1956  __m512 t2 =
1957  _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0x44);
1958  __m512 t3 =
1959  _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0xee);
1960  __m512 u0 = _mm512_shuffle_ps(t0, t2, 0x88);
1961  __m512 u1 = _mm512_shuffle_ps(t0, t2, 0xdd);
1962  __m512 u2 = _mm512_shuffle_ps(t1, t3, 0x88);
1963  __m512 u3 = _mm512_shuffle_ps(t1, t3, 0xdd);
1964 
1965  __m128 res0 = _mm512_extractf32x4_ps(u0, 0);
1966  __m128 res4 = _mm512_extractf32x4_ps(u0, 1);
1967  __m128 res8 = _mm512_extractf32x4_ps(u0, 2);
1968  __m128 res12 = _mm512_extractf32x4_ps(u0, 3);
1969  __m128 res1 = _mm512_extractf32x4_ps(u1, 0);
1970  __m128 res5 = _mm512_extractf32x4_ps(u1, 1);
1971  __m128 res9 = _mm512_extractf32x4_ps(u1, 2);
1972  __m128 res13 = _mm512_extractf32x4_ps(u1, 3);
1973  __m128 res2 = _mm512_extractf32x4_ps(u2, 0);
1974  __m128 res6 = _mm512_extractf32x4_ps(u2, 1);
1975  __m128 res10 = _mm512_extractf32x4_ps(u2, 2);
1976  __m128 res14 = _mm512_extractf32x4_ps(u2, 3);
1977  __m128 res3 = _mm512_extractf32x4_ps(u3, 0);
1978  __m128 res7 = _mm512_extractf32x4_ps(u3, 1);
1979  __m128 res11 = _mm512_extractf32x4_ps(u3, 2);
1980  __m128 res15 = _mm512_extractf32x4_ps(u3, 3);
1981 
1982  // Cannot use the same store instructions in both paths of the 'if'
1983  // because the compiler cannot know that there is no aliasing between
1984  // pointers
1985  if (add_into)
1986  {
1987  res0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), res0);
1988  _mm_storeu_ps(out + 4 * i + offsets[0], res0);
1989  res1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), res1);
1990  _mm_storeu_ps(out + 4 * i + offsets[1], res1);
1991  res2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), res2);
1992  _mm_storeu_ps(out + 4 * i + offsets[2], res2);
1993  res3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), res3);
1994  _mm_storeu_ps(out + 4 * i + offsets[3], res3);
1995  res4 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[4]), res4);
1996  _mm_storeu_ps(out + 4 * i + offsets[4], res4);
1997  res5 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[5]), res5);
1998  _mm_storeu_ps(out + 4 * i + offsets[5], res5);
1999  res6 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[6]), res6);
2000  _mm_storeu_ps(out + 4 * i + offsets[6], res6);
2001  res7 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[7]), res7);
2002  _mm_storeu_ps(out + 4 * i + offsets[7], res7);
2003  res8 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[8]), res8);
2004  _mm_storeu_ps(out + 4 * i + offsets[8], res8);
2005  res9 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[9]), res9);
2006  _mm_storeu_ps(out + 4 * i + offsets[9], res9);
2007  res10 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[10]), res10);
2008  _mm_storeu_ps(out + 4 * i + offsets[10], res10);
2009  res11 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[11]), res11);
2010  _mm_storeu_ps(out + 4 * i + offsets[11], res11);
2011  res12 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[12]), res12);
2012  _mm_storeu_ps(out + 4 * i + offsets[12], res12);
2013  res13 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[13]), res13);
2014  _mm_storeu_ps(out + 4 * i + offsets[13], res13);
2015  res14 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[14]), res14);
2016  _mm_storeu_ps(out + 4 * i + offsets[14], res14);
2017  res15 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[15]), res15);
2018  _mm_storeu_ps(out + 4 * i + offsets[15], res15);
2019  }
2020  else
2021  {
2022  _mm_storeu_ps(out + 4 * i + offsets[0], res0);
2023  _mm_storeu_ps(out + 4 * i + offsets[1], res1);
2024  _mm_storeu_ps(out + 4 * i + offsets[2], res2);
2025  _mm_storeu_ps(out + 4 * i + offsets[3], res3);
2026  _mm_storeu_ps(out + 4 * i + offsets[4], res4);
2027  _mm_storeu_ps(out + 4 * i + offsets[5], res5);
2028  _mm_storeu_ps(out + 4 * i + offsets[6], res6);
2029  _mm_storeu_ps(out + 4 * i + offsets[7], res7);
2030  _mm_storeu_ps(out + 4 * i + offsets[8], res8);
2031  _mm_storeu_ps(out + 4 * i + offsets[9], res9);
2032  _mm_storeu_ps(out + 4 * i + offsets[10], res10);
2033  _mm_storeu_ps(out + 4 * i + offsets[11], res11);
2034  _mm_storeu_ps(out + 4 * i + offsets[12], res12);
2035  _mm_storeu_ps(out + 4 * i + offsets[13], res13);
2036  _mm_storeu_ps(out + 4 * i + offsets[14], res14);
2037  _mm_storeu_ps(out + 4 * i + offsets[15], res15);
2038  }
2039  }
2040 
2041  // remainder loop of work that does not divide by 4
2042  if (add_into)
2043  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2044  for (unsigned int v = 0; v < 16; ++v)
2045  out[offsets[v] + i] += in[i][v];
2046  else
2047  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2048  for (unsigned int v = 0; v < 16; ++v)
2049  out[offsets[v] + i] = in[i][v];
2050 }
2051 
2052 
2053 
2057 template <>
2058 inline DEAL_II_ALWAYS_INLINE void
2059 vectorized_transpose_and_store(const bool add_into,
2060  const unsigned int n_entries,
2061  const VectorizedArray<float, 16> *in,
2062  std::array<float *, 16> & out)
2063 {
2064  // see the comments in the vectorized_transpose_and_store above
2065 
2066  const unsigned int n_chunks = n_entries / 4;
2067  for (unsigned int i = 0; i < n_chunks; ++i)
2068  {
2069  __m512 t0 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0x44);
2070  __m512 t1 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0xee);
2071  __m512 t2 =
2072  _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0x44);
2073  __m512 t3 =
2074  _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0xee);
2075  __m512 u0 = _mm512_shuffle_ps(t0, t2, 0x88);
2076  __m512 u1 = _mm512_shuffle_ps(t0, t2, 0xdd);
2077  __m512 u2 = _mm512_shuffle_ps(t1, t3, 0x88);
2078  __m512 u3 = _mm512_shuffle_ps(t1, t3, 0xdd);
2079 
2080  __m128 res0 = _mm512_extractf32x4_ps(u0, 0);
2081  __m128 res4 = _mm512_extractf32x4_ps(u0, 1);
2082  __m128 res8 = _mm512_extractf32x4_ps(u0, 2);
2083  __m128 res12 = _mm512_extractf32x4_ps(u0, 3);
2084  __m128 res1 = _mm512_extractf32x4_ps(u1, 0);
2085  __m128 res5 = _mm512_extractf32x4_ps(u1, 1);
2086  __m128 res9 = _mm512_extractf32x4_ps(u1, 2);
2087  __m128 res13 = _mm512_extractf32x4_ps(u1, 3);
2088  __m128 res2 = _mm512_extractf32x4_ps(u2, 0);
2089  __m128 res6 = _mm512_extractf32x4_ps(u2, 1);
2090  __m128 res10 = _mm512_extractf32x4_ps(u2, 2);
2091  __m128 res14 = _mm512_extractf32x4_ps(u2, 3);
2092  __m128 res3 = _mm512_extractf32x4_ps(u3, 0);
2093  __m128 res7 = _mm512_extractf32x4_ps(u3, 1);
2094  __m128 res11 = _mm512_extractf32x4_ps(u3, 2);
2095  __m128 res15 = _mm512_extractf32x4_ps(u3, 3);
2096 
2097  if (add_into)
2098  {
2099  res0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), res0);
2100  _mm_storeu_ps(out[0] + 4 * i, res0);
2101  res1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), res1);
2102  _mm_storeu_ps(out[1] + 4 * i, res1);
2103  res2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), res2);
2104  _mm_storeu_ps(out[2] + 4 * i, res2);
2105  res3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), res3);
2106  _mm_storeu_ps(out[3] + 4 * i, res3);
2107  res4 = _mm_add_ps(_mm_loadu_ps(out[4] + 4 * i), res4);
2108  _mm_storeu_ps(out[4] + 4 * i, res4);
2109  res5 = _mm_add_ps(_mm_loadu_ps(out[5] + 4 * i), res5);
2110  _mm_storeu_ps(out[5] + 4 * i, res5);
2111  res6 = _mm_add_ps(_mm_loadu_ps(out[6] + 4 * i), res6);
2112  _mm_storeu_ps(out[6] + 4 * i, res6);
2113  res7 = _mm_add_ps(_mm_loadu_ps(out[7] + 4 * i), res7);
2114  _mm_storeu_ps(out[7] + 4 * i, res7);
2115  res8 = _mm_add_ps(_mm_loadu_ps(out[8] + 4 * i), res8);
2116  _mm_storeu_ps(out[8] + 4 * i, res8);
2117  res9 = _mm_add_ps(_mm_loadu_ps(out[9] + 4 * i), res9);
2118  _mm_storeu_ps(out[9] + 4 * i, res9);
2119  res10 = _mm_add_ps(_mm_loadu_ps(out[10] + 4 * i), res10);
2120  _mm_storeu_ps(out[10] + 4 * i, res10);
2121  res11 = _mm_add_ps(_mm_loadu_ps(out[11] + 4 * i), res11);
2122  _mm_storeu_ps(out[11] + 4 * i, res11);
2123  res12 = _mm_add_ps(_mm_loadu_ps(out[12] + 4 * i), res12);
2124  _mm_storeu_ps(out[12] + 4 * i, res12);
2125  res13 = _mm_add_ps(_mm_loadu_ps(out[13] + 4 * i), res13);
2126  _mm_storeu_ps(out[13] + 4 * i, res13);
2127  res14 = _mm_add_ps(_mm_loadu_ps(out[14] + 4 * i), res14);
2128  _mm_storeu_ps(out[14] + 4 * i, res14);
2129  res15 = _mm_add_ps(_mm_loadu_ps(out[15] + 4 * i), res15);
2130  _mm_storeu_ps(out[15] + 4 * i, res15);
2131  }
2132  else
2133  {
2134  _mm_storeu_ps(out[0] + 4 * i, res0);
2135  _mm_storeu_ps(out[1] + 4 * i, res1);
2136  _mm_storeu_ps(out[2] + 4 * i, res2);
2137  _mm_storeu_ps(out[3] + 4 * i, res3);
2138  _mm_storeu_ps(out[4] + 4 * i, res4);
2139  _mm_storeu_ps(out[5] + 4 * i, res5);
2140  _mm_storeu_ps(out[6] + 4 * i, res6);
2141  _mm_storeu_ps(out[7] + 4 * i, res7);
2142  _mm_storeu_ps(out[8] + 4 * i, res8);
2143  _mm_storeu_ps(out[9] + 4 * i, res9);
2144  _mm_storeu_ps(out[10] + 4 * i, res10);
2145  _mm_storeu_ps(out[11] + 4 * i, res11);
2146  _mm_storeu_ps(out[12] + 4 * i, res12);
2147  _mm_storeu_ps(out[13] + 4 * i, res13);
2148  _mm_storeu_ps(out[14] + 4 * i, res14);
2149  _mm_storeu_ps(out[15] + 4 * i, res15);
2150  }
2151  }
2152 
2153  if (add_into)
2154  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2155  for (unsigned int v = 0; v < 16; ++v)
2156  out[v][i] += in[i][v];
2157  else
2158  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2159  for (unsigned int v = 0; v < 16; ++v)
2160  out[v][i] = in[i][v];
2161 }
2162 
2163 # endif
2164 
2165 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
2166 
2170 template <>
2171 class VectorizedArray<double, 4>
2172  : public VectorizedArrayBase<VectorizedArray<double, 4>, 4>
2173 {
2174 public:
2178  using value_type = double;
2179 
2184  VectorizedArray() = default;
2185 
2189  VectorizedArray(const double scalar)
2190  {
2191  this->operator=(scalar);
2192  }
2193 
2197  template <typename U>
2198  VectorizedArray(const std::initializer_list<U> &list)
2200  {}
2201 
2206  VectorizedArray &
2207  operator=(const double x)
2208  {
2209  data = _mm256_set1_pd(x);
2210  return *this;
2211  }
2212 
2217  double &
2218  operator[](const unsigned int comp)
2219  {
2220  AssertIndexRange(comp, 4);
2221  return *(reinterpret_cast<double *>(&data) + comp);
2222  }
2223 
2228  const double &
2229  operator[](const unsigned int comp) const
2230  {
2231  AssertIndexRange(comp, 4);
2232  return *(reinterpret_cast<const double *>(&data) + comp);
2233  }
2234 
2239  VectorizedArray &
2240  operator+=(const VectorizedArray &vec)
2241  {
2242  // if the compiler supports vector arithmetic, we can simply use +=
2243  // operator on the given data type. this allows the compiler to combine
2244  // additions with multiplication (fused multiply-add) if those
2245  // instructions are available. Otherwise, we need to use the built-in
2246  // intrinsic command for __m256d
2247 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2248  data += vec.data;
2249 # else
2250  data = _mm256_add_pd(data, vec.data);
2251 # endif
2252  return *this;
2253  }
2254 
2259  VectorizedArray &
2260  operator-=(const VectorizedArray &vec)
2261  {
2262 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2263  data -= vec.data;
2264 # else
2265  data = _mm256_sub_pd(data, vec.data);
2266 # endif
2267  return *this;
2268  }
2273  VectorizedArray &
2274  operator*=(const VectorizedArray &vec)
2275  {
2276 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2277  data *= vec.data;
2278 # else
2279  data = _mm256_mul_pd(data, vec.data);
2280 # endif
2281  return *this;
2282  }
2283 
2288  VectorizedArray &
2289  operator/=(const VectorizedArray &vec)
2290  {
2291 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2292  data /= vec.data;
2293 # else
2294  data = _mm256_div_pd(data, vec.data);
2295 # endif
2296  return *this;
2297  }
2298 
2305  void
2306  load(const double *ptr)
2307  {
2308  data = _mm256_loadu_pd(ptr);
2309  }
2310 
2318  void
2319  store(double *ptr) const
2320  {
2321  _mm256_storeu_pd(ptr, data);
2322  }
2323 
2328  void
2329  streaming_store(double *ptr) const
2330  {
2331  Assert(reinterpret_cast<std::size_t>(ptr) % 32 == 0,
2332  ExcMessage("Memory not aligned"));
2333  _mm256_stream_pd(ptr, data);
2334  }
2335 
2349  void
2350  gather(const double *base_ptr, const unsigned int *offsets)
2351  {
2352 # ifdef __AVX2__
2353  // unfortunately, there does not appear to be a 128 bit integer load, so
2354  // do it by some reinterpret casts here. this is allowed because the Intel
2355  // API allows aliasing between different vector types.
2356  const __m128 index_val =
2357  _mm_loadu_ps(reinterpret_cast<const float *>(offsets));
2358  const __m128i index = *reinterpret_cast<const __m128i *>(&index_val);
2359  data = _mm256_i32gather_pd(base_ptr, index, 8);
2360 # else
2361  for (unsigned int i = 0; i < 4; ++i)
2362  *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
2363 # endif
2364  }
2365 
2379  void
2380  scatter(const unsigned int *offsets, double *base_ptr) const
2381  {
2382  // no scatter operation in AVX/AVX2
2383  for (unsigned int i = 0; i < 4; ++i)
2384  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
2385  }
2386 
2392  __m256d data;
2393 
2394 private:
2401  get_sqrt() const
2402  {
2403  VectorizedArray res;
2404  res.data = _mm256_sqrt_pd(data);
2405  return res;
2406  }
2407 
2414  get_abs() const
2415  {
2416  // to compute the absolute value, perform bitwise andnot with -0. This
2417  // will leave all value and exponent bits unchanged but force the sign
2418  // value to +.
2419  __m256d mask = _mm256_set1_pd(-0.);
2420  VectorizedArray res;
2421  res.data = _mm256_andnot_pd(mask, data);
2422  return res;
2423  }
2424 
2431  get_max(const VectorizedArray &other) const
2432  {
2433  VectorizedArray res;
2434  res.data = _mm256_max_pd(data, other.data);
2435  return res;
2436  }
2437 
2444  get_min(const VectorizedArray &other) const
2445  {
2446  VectorizedArray res;
2447  res.data = _mm256_min_pd(data, other.data);
2448  return res;
2449  }
2450 
2451  // Make a few functions friends.
2452  template <typename Number2, std::size_t width2>
2454  std::sqrt(const VectorizedArray<Number2, width2> &);
2455  template <typename Number2, std::size_t width2>
2457  std::abs(const VectorizedArray<Number2, width2> &);
2458  template <typename Number2, std::size_t width2>
2460  std::max(const VectorizedArray<Number2, width2> &,
2462  template <typename Number2, std::size_t width2>
2464  std::min(const VectorizedArray<Number2, width2> &,
2466 };
2467 
2468 
2469 
2473 template <>
2474 inline DEAL_II_ALWAYS_INLINE void
2475 vectorized_load_and_transpose(const unsigned int n_entries,
2476  const double * in,
2477  const unsigned int * offsets,
2479 {
2480  const unsigned int n_chunks = n_entries / 4;
2481  const double * in0 = in + offsets[0];
2482  const double * in1 = in + offsets[1];
2483  const double * in2 = in + offsets[2];
2484  const double * in3 = in + offsets[3];
2485 
2486  for (unsigned int i = 0; i < n_chunks; ++i)
2487  {
2488  __m256d u0 = _mm256_loadu_pd(in0 + 4 * i);
2489  __m256d u1 = _mm256_loadu_pd(in1 + 4 * i);
2490  __m256d u2 = _mm256_loadu_pd(in2 + 4 * i);
2491  __m256d u3 = _mm256_loadu_pd(in3 + 4 * i);
2492  __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2493  __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2494  __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2495  __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2496  out[4 * i + 0].data = _mm256_unpacklo_pd(t0, t1);
2497  out[4 * i + 1].data = _mm256_unpackhi_pd(t0, t1);
2498  out[4 * i + 2].data = _mm256_unpacklo_pd(t2, t3);
2499  out[4 * i + 3].data = _mm256_unpackhi_pd(t2, t3);
2500  }
2501 
2502  // remainder loop of work that does not divide by 4
2503  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2504  out[i].gather(in + i, offsets);
2505 }
2506 
2507 
2508 
2512 template <>
2513 inline DEAL_II_ALWAYS_INLINE void
2514 vectorized_load_and_transpose(const unsigned int n_entries,
2515  const std::array<double *, 4> &in,
2517 {
2518  // see the comments in the vectorized_load_and_transpose above
2519 
2520  const unsigned int n_chunks = n_entries / 4;
2521  const double * in0 = in[0];
2522  const double * in1 = in[1];
2523  const double * in2 = in[2];
2524  const double * in3 = in[3];
2525 
2526  for (unsigned int i = 0; i < n_chunks; ++i)
2527  {
2528  __m256d u0 = _mm256_loadu_pd(in0 + 4 * i);
2529  __m256d u1 = _mm256_loadu_pd(in1 + 4 * i);
2530  __m256d u2 = _mm256_loadu_pd(in2 + 4 * i);
2531  __m256d u3 = _mm256_loadu_pd(in3 + 4 * i);
2532  __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2533  __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2534  __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2535  __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2536  out[4 * i + 0].data = _mm256_unpacklo_pd(t0, t1);
2537  out[4 * i + 1].data = _mm256_unpackhi_pd(t0, t1);
2538  out[4 * i + 2].data = _mm256_unpacklo_pd(t2, t3);
2539  out[4 * i + 3].data = _mm256_unpackhi_pd(t2, t3);
2540  }
2541 
2542  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2543  gather(out[i], in, i);
2544 }
2545 
2546 
2547 
2551 template <>
2552 inline DEAL_II_ALWAYS_INLINE void
2553 vectorized_transpose_and_store(const bool add_into,
2554  const unsigned int n_entries,
2555  const VectorizedArray<double, 4> *in,
2556  const unsigned int * offsets,
2557  double * out)
2558 {
2559  const unsigned int n_chunks = n_entries / 4;
2560  double * out0 = out + offsets[0];
2561  double * out1 = out + offsets[1];
2562  double * out2 = out + offsets[2];
2563  double * out3 = out + offsets[3];
2564  for (unsigned int i = 0; i < n_chunks; ++i)
2565  {
2566  __m256d u0 = in[4 * i + 0].data;
2567  __m256d u1 = in[4 * i + 1].data;
2568  __m256d u2 = in[4 * i + 2].data;
2569  __m256d u3 = in[4 * i + 3].data;
2570  __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2571  __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2572  __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2573  __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2574  __m256d res0 = _mm256_unpacklo_pd(t0, t1);
2575  __m256d res1 = _mm256_unpackhi_pd(t0, t1);
2576  __m256d res2 = _mm256_unpacklo_pd(t2, t3);
2577  __m256d res3 = _mm256_unpackhi_pd(t2, t3);
2578 
2579  // Cannot use the same store instructions in both paths of the 'if'
2580  // because the compiler cannot know that there is no aliasing between
2581  // pointers
2582  if (add_into)
2583  {
2584  res0 = _mm256_add_pd(_mm256_loadu_pd(out0 + 4 * i), res0);
2585  _mm256_storeu_pd(out0 + 4 * i, res0);
2586  res1 = _mm256_add_pd(_mm256_loadu_pd(out1 + 4 * i), res1);
2587  _mm256_storeu_pd(out1 + 4 * i, res1);
2588  res2 = _mm256_add_pd(_mm256_loadu_pd(out2 + 4 * i), res2);
2589  _mm256_storeu_pd(out2 + 4 * i, res2);
2590  res3 = _mm256_add_pd(_mm256_loadu_pd(out3 + 4 * i), res3);
2591  _mm256_storeu_pd(out3 + 4 * i, res3);
2592  }
2593  else
2594  {
2595  _mm256_storeu_pd(out0 + 4 * i, res0);
2596  _mm256_storeu_pd(out1 + 4 * i, res1);
2597  _mm256_storeu_pd(out2 + 4 * i, res2);
2598  _mm256_storeu_pd(out3 + 4 * i, res3);
2599  }
2600  }
2601 
2602  // remainder loop of work that does not divide by 4
2603  if (add_into)
2604  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2605  for (unsigned int v = 0; v < 4; ++v)
2606  out[offsets[v] + i] += in[i][v];
2607  else
2608  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2609  for (unsigned int v = 0; v < 4; ++v)
2610  out[offsets[v] + i] = in[i][v];
2611 }
2612 
2613 
2614 
2618 template <>
2619 inline DEAL_II_ALWAYS_INLINE void
2620 vectorized_transpose_and_store(const bool add_into,
2621  const unsigned int n_entries,
2622  const VectorizedArray<double, 4> *in,
2623  std::array<double *, 4> & out)
2624 {
2625  // see the comments in the vectorized_transpose_and_store above
2626 
2627  const unsigned int n_chunks = n_entries / 4;
2628  double * out0 = out[0];
2629  double * out1 = out[1];
2630  double * out2 = out[2];
2631  double * out3 = out[3];
2632  for (unsigned int i = 0; i < n_chunks; ++i)
2633  {
2634  __m256d u0 = in[4 * i + 0].data;
2635  __m256d u1 = in[4 * i + 1].data;
2636  __m256d u2 = in[4 * i + 2].data;
2637  __m256d u3 = in[4 * i + 3].data;
2638  __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2639  __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2640  __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2641  __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2642  __m256d res0 = _mm256_unpacklo_pd(t0, t1);
2643  __m256d res1 = _mm256_unpackhi_pd(t0, t1);
2644  __m256d res2 = _mm256_unpacklo_pd(t2, t3);
2645  __m256d res3 = _mm256_unpackhi_pd(t2, t3);
2646 
2647  // Cannot use the same store instructions in both paths of the 'if'
2648  // because the compiler cannot know that there is no aliasing between
2649  // pointers
2650  if (add_into)
2651  {
2652  res0 = _mm256_add_pd(_mm256_loadu_pd(out0 + 4 * i), res0);
2653  _mm256_storeu_pd(out0 + 4 * i, res0);
2654  res1 = _mm256_add_pd(_mm256_loadu_pd(out1 + 4 * i), res1);
2655  _mm256_storeu_pd(out1 + 4 * i, res1);
2656  res2 = _mm256_add_pd(_mm256_loadu_pd(out2 + 4 * i), res2);
2657  _mm256_storeu_pd(out2 + 4 * i, res2);
2658  res3 = _mm256_add_pd(_mm256_loadu_pd(out3 + 4 * i), res3);
2659  _mm256_storeu_pd(out3 + 4 * i, res3);
2660  }
2661  else
2662  {
2663  _mm256_storeu_pd(out0 + 4 * i, res0);
2664  _mm256_storeu_pd(out1 + 4 * i, res1);
2665  _mm256_storeu_pd(out2 + 4 * i, res2);
2666  _mm256_storeu_pd(out3 + 4 * i, res3);
2667  }
2668  }
2669 
2670  // remainder loop of work that does not divide by 4
2671  if (add_into)
2672  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2673  for (unsigned int v = 0; v < 4; ++v)
2674  out[v][i] += in[i][v];
2675  else
2676  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2677  for (unsigned int v = 0; v < 4; ++v)
2678  out[v][i] = in[i][v];
2679 }
2680 
2681 
2682 
2686 template <>
2687 class VectorizedArray<float, 8>
2688  : public VectorizedArrayBase<VectorizedArray<float, 8>, 8>
2689 {
2690 public:
2694  using value_type = float;
2695 
2700  VectorizedArray() = default;
2701 
2705  VectorizedArray(const float scalar)
2706  {
2707  this->operator=(scalar);
2708  }
2709 
2713  template <typename U>
2714  VectorizedArray(const std::initializer_list<U> &list)
2716  {}
2717 
2722  VectorizedArray &
2723  operator=(const float x)
2724  {
2725  data = _mm256_set1_ps(x);
2726  return *this;
2727  }
2728 
2733  float &
2734  operator[](const unsigned int comp)
2735  {
2736  AssertIndexRange(comp, 8);
2737  return *(reinterpret_cast<float *>(&data) + comp);
2738  }
2739 
2744  const float &
2745  operator[](const unsigned int comp) const
2746  {
2747  AssertIndexRange(comp, 8);
2748  return *(reinterpret_cast<const float *>(&data) + comp);
2749  }
2750 
2755  VectorizedArray &
2756  operator+=(const VectorizedArray &vec)
2757  {
2758  // if the compiler supports vector arithmetic, we can simply use +=
2759  // operator on the given data type. this allows the compiler to combine
2760  // additions with multiplication (fused multiply-add) if those
2761  // instructions are available. Otherwise, we need to use the built-in
2762  // intrinsic command for __m256d
2763 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2764  data += vec.data;
2765 # else
2766  data = _mm256_add_ps(data, vec.data);
2767 # endif
2768  return *this;
2769  }
2770 
2775  VectorizedArray &
2776  operator-=(const VectorizedArray &vec)
2777  {
2778 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2779  data -= vec.data;
2780 # else
2781  data = _mm256_sub_ps(data, vec.data);
2782 # endif
2783  return *this;
2784  }
2789  VectorizedArray &
2790  operator*=(const VectorizedArray &vec)
2791  {
2792 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2793  data *= vec.data;
2794 # else
2795  data = _mm256_mul_ps(data, vec.data);
2796 # endif
2797  return *this;
2798  }
2799 
2804  VectorizedArray &
2805  operator/=(const VectorizedArray &vec)
2806  {
2807 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2808  data /= vec.data;
2809 # else
2810  data = _mm256_div_ps(data, vec.data);
2811 # endif
2812  return *this;
2813  }
2814 
2821  void
2822  load(const float *ptr)
2823  {
2824  data = _mm256_loadu_ps(ptr);
2825  }
2826 
2834  void
2835  store(float *ptr) const
2836  {
2837  _mm256_storeu_ps(ptr, data);
2838  }
2839 
2844  void
2845  streaming_store(float *ptr) const
2846  {
2847  Assert(reinterpret_cast<std::size_t>(ptr) % 32 == 0,
2848  ExcMessage("Memory not aligned"));
2849  _mm256_stream_ps(ptr, data);
2850  }
2851 
2865  void
2866  gather(const float *base_ptr, const unsigned int *offsets)
2867  {
2868 # ifdef __AVX2__
2869  // unfortunately, there does not appear to be a 256 bit integer load, so
2870  // do it by some reinterpret casts here. this is allowed because the Intel
2871  // API allows aliasing between different vector types.
2872  const __m256 index_val =
2873  _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
2874  const __m256i index = *reinterpret_cast<const __m256i *>(&index_val);
2875  data = _mm256_i32gather_ps(base_ptr, index, 4);
2876 # else
2877  for (unsigned int i = 0; i < 8; ++i)
2878  *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
2879 # endif
2880  }
2881 
2895  void
2896  scatter(const unsigned int *offsets, float *base_ptr) const
2897  {
2898  // no scatter operation in AVX/AVX2
2899  for (unsigned int i = 0; i < 8; ++i)
2900  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
2901  }
2902 
2908  __m256 data;
2909 
2910 private:
2917  get_sqrt() const
2918  {
2919  VectorizedArray res;
2920  res.data = _mm256_sqrt_ps(data);
2921  return res;
2922  }
2923 
2930  get_abs() const
2931  {
2932  // to compute the absolute value, perform bitwise andnot with -0. This
2933  // will leave all value and exponent bits unchanged but force the sign
2934  // value to +.
2935  __m256 mask = _mm256_set1_ps(-0.f);
2936  VectorizedArray res;
2937  res.data = _mm256_andnot_ps(mask, data);
2938  return res;
2939  }
2940 
2947  get_max(const VectorizedArray &other) const
2948  {
2949  VectorizedArray res;
2950  res.data = _mm256_max_ps(data, other.data);
2951  return res;
2952  }
2953 
2960  get_min(const VectorizedArray &other) const
2961  {
2962  VectorizedArray res;
2963  res.data = _mm256_min_ps(data, other.data);
2964  return res;
2965  }
2966 
2967  // Make a few functions friends.
2968  template <typename Number2, std::size_t width2>
2970  std::sqrt(const VectorizedArray<Number2, width2> &);
2971  template <typename Number2, std::size_t width2>
2973  std::abs(const VectorizedArray<Number2, width2> &);
2974  template <typename Number2, std::size_t width2>
2976  std::max(const VectorizedArray<Number2, width2> &,
2978  template <typename Number2, std::size_t width2>
2980  std::min(const VectorizedArray<Number2, width2> &,
2982 };
2983 
2984 
2985 
2989 template <>
2990 inline DEAL_II_ALWAYS_INLINE void
2991 vectorized_load_and_transpose(const unsigned int n_entries,
2992  const float * in,
2993  const unsigned int * offsets,
2995 {
2996  const unsigned int n_chunks = n_entries / 4;
2997  for (unsigned int i = 0; i < n_chunks; ++i)
2998  {
2999  // To avoid warnings about uninitialized variables, need to initialize
3000  // one variable with zero before using it.
3001  __m256 t0, t1, t2, t3 = {};
3002  t0 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[0]), 0);
3003  t0 = _mm256_insertf128_ps(t0, _mm_loadu_ps(in + 4 * i + offsets[4]), 1);
3004  t1 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[1]), 0);
3005  t1 = _mm256_insertf128_ps(t1, _mm_loadu_ps(in + 4 * i + offsets[5]), 1);
3006  t2 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[2]), 0);
3007  t2 = _mm256_insertf128_ps(t2, _mm_loadu_ps(in + 4 * i + offsets[6]), 1);
3008  t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[3]), 0);
3009  t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[7]), 1);
3010 
3011  __m256 v0 = _mm256_shuffle_ps(t0, t1, 0x44);
3012  __m256 v1 = _mm256_shuffle_ps(t0, t1, 0xee);
3013  __m256 v2 = _mm256_shuffle_ps(t2, t3, 0x44);
3014  __m256 v3 = _mm256_shuffle_ps(t2, t3, 0xee);
3015  out[4 * i + 0].data = _mm256_shuffle_ps(v0, v2, 0x88);
3016  out[4 * i + 1].data = _mm256_shuffle_ps(v0, v2, 0xdd);
3017  out[4 * i + 2].data = _mm256_shuffle_ps(v1, v3, 0x88);
3018  out[4 * i + 3].data = _mm256_shuffle_ps(v1, v3, 0xdd);
3019  }
3020 
3021  // remainder loop of work that does not divide by 4
3022  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3023  out[i].gather(in + i, offsets);
3024 }
3025 
3026 
3027 
3031 template <>
3032 inline DEAL_II_ALWAYS_INLINE void
3033 vectorized_load_and_transpose(const unsigned int n_entries,
3034  const std::array<float *, 8> &in,
3036 {
3037  // see the comments in the vectorized_load_and_transpose above
3038 
3039  const unsigned int n_chunks = n_entries / 4;
3040  for (unsigned int i = 0; i < n_chunks; ++i)
3041  {
3042  __m256 t0, t1, t2, t3 = {};
3043  t0 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[0] + 4 * i), 0);
3044  t0 = _mm256_insertf128_ps(t0, _mm_loadu_ps(in[4] + 4 * i), 1);
3045  t1 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[1] + 4 * i), 0);
3046  t1 = _mm256_insertf128_ps(t1, _mm_loadu_ps(in[5] + 4 * i), 1);
3047  t2 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[2] + 4 * i), 0);
3048  t2 = _mm256_insertf128_ps(t2, _mm_loadu_ps(in[6] + 4 * i), 1);
3049  t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[3] + 4 * i), 0);
3050  t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[7] + 4 * i), 1);
3051 
3052  __m256 v0 = _mm256_shuffle_ps(t0, t1, 0x44);
3053  __m256 v1 = _mm256_shuffle_ps(t0, t1, 0xee);
3054  __m256 v2 = _mm256_shuffle_ps(t2, t3, 0x44);
3055  __m256 v3 = _mm256_shuffle_ps(t2, t3, 0xee);
3056  out[4 * i + 0].data = _mm256_shuffle_ps(v0, v2, 0x88);
3057  out[4 * i + 1].data = _mm256_shuffle_ps(v0, v2, 0xdd);
3058  out[4 * i + 2].data = _mm256_shuffle_ps(v1, v3, 0x88);
3059  out[4 * i + 3].data = _mm256_shuffle_ps(v1, v3, 0xdd);
3060  }
3061 
3062  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3063  gather(out[i], in, i);
3064 }
3065 
3066 
3067 
3071 template <>
3072 inline DEAL_II_ALWAYS_INLINE void
3073 vectorized_transpose_and_store(const bool add_into,
3074  const unsigned int n_entries,
3075  const VectorizedArray<float, 8> *in,
3076  const unsigned int * offsets,
3077  float * out)
3078 {
3079  const unsigned int n_chunks = n_entries / 4;
3080  for (unsigned int i = 0; i < n_chunks; ++i)
3081  {
3082  __m256 u0 = in[4 * i + 0].data;
3083  __m256 u1 = in[4 * i + 1].data;
3084  __m256 u2 = in[4 * i + 2].data;
3085  __m256 u3 = in[4 * i + 3].data;
3086  __m256 t0 = _mm256_shuffle_ps(u0, u1, 0x44);
3087  __m256 t1 = _mm256_shuffle_ps(u0, u1, 0xee);
3088  __m256 t2 = _mm256_shuffle_ps(u2, u3, 0x44);
3089  __m256 t3 = _mm256_shuffle_ps(u2, u3, 0xee);
3090  u0 = _mm256_shuffle_ps(t0, t2, 0x88);
3091  u1 = _mm256_shuffle_ps(t0, t2, 0xdd);
3092  u2 = _mm256_shuffle_ps(t1, t3, 0x88);
3093  u3 = _mm256_shuffle_ps(t1, t3, 0xdd);
3094  __m128 res0 = _mm256_extractf128_ps(u0, 0);
3095  __m128 res4 = _mm256_extractf128_ps(u0, 1);
3096  __m128 res1 = _mm256_extractf128_ps(u1, 0);
3097  __m128 res5 = _mm256_extractf128_ps(u1, 1);
3098  __m128 res2 = _mm256_extractf128_ps(u2, 0);
3099  __m128 res6 = _mm256_extractf128_ps(u2, 1);
3100  __m128 res3 = _mm256_extractf128_ps(u3, 0);
3101  __m128 res7 = _mm256_extractf128_ps(u3, 1);
3102 
3103  // Cannot use the same store instructions in both paths of the 'if'
3104  // because the compiler cannot know that there is no aliasing between
3105  // pointers
3106  if (add_into)
3107  {
3108  res0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), res0);
3109  _mm_storeu_ps(out + 4 * i + offsets[0], res0);
3110  res1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), res1);
3111  _mm_storeu_ps(out + 4 * i + offsets[1], res1);
3112  res2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), res2);
3113  _mm_storeu_ps(out + 4 * i + offsets[2], res2);
3114  res3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), res3);
3115  _mm_storeu_ps(out + 4 * i + offsets[3], res3);
3116  res4 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[4]), res4);
3117  _mm_storeu_ps(out + 4 * i + offsets[4], res4);
3118  res5 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[5]), res5);
3119  _mm_storeu_ps(out + 4 * i + offsets[5], res5);
3120  res6 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[6]), res6);
3121  _mm_storeu_ps(out + 4 * i + offsets[6], res6);
3122  res7 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[7]), res7);
3123  _mm_storeu_ps(out + 4 * i + offsets[7], res7);
3124  }
3125  else
3126  {
3127  _mm_storeu_ps(out + 4 * i + offsets[0], res0);
3128  _mm_storeu_ps(out + 4 * i + offsets[1], res1);
3129  _mm_storeu_ps(out + 4 * i + offsets[2], res2);
3130  _mm_storeu_ps(out + 4 * i + offsets[3], res3);
3131  _mm_storeu_ps(out + 4 * i + offsets[4], res4);
3132  _mm_storeu_ps(out + 4 * i + offsets[5], res5);
3133  _mm_storeu_ps(out + 4 * i + offsets[6], res6);
3134  _mm_storeu_ps(out + 4 * i + offsets[7], res7);
3135  }
3136  }
3137 
3138  // remainder loop of work that does not divide by 4
3139  if (add_into)
3140  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3141  for (unsigned int v = 0; v < 8; ++v)
3142  out[offsets[v] + i] += in[i][v];
3143  else
3144  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3145  for (unsigned int v = 0; v < 8; ++v)
3146  out[offsets[v] + i] = in[i][v];
3147 }
3148 
3149 
3150 
3154 template <>
3155 inline DEAL_II_ALWAYS_INLINE void
3156 vectorized_transpose_and_store(const bool add_into,
3157  const unsigned int n_entries,
3158  const VectorizedArray<float, 8> *in,
3159  std::array<float *, 8> & out)
3160 {
3161  // see the comments in the vectorized_transpose_and_store above
3162 
3163  const unsigned int n_chunks = n_entries / 4;
3164  for (unsigned int i = 0; i < n_chunks; ++i)
3165  {
3166  __m256 u0 = in[4 * i + 0].data;
3167  __m256 u1 = in[4 * i + 1].data;
3168  __m256 u2 = in[4 * i + 2].data;
3169  __m256 u3 = in[4 * i + 3].data;
3170  __m256 t0 = _mm256_shuffle_ps(u0, u1, 0x44);
3171  __m256 t1 = _mm256_shuffle_ps(u0, u1, 0xee);
3172  __m256 t2 = _mm256_shuffle_ps(u2, u3, 0x44);
3173  __m256 t3 = _mm256_shuffle_ps(u2, u3, 0xee);
3174  u0 = _mm256_shuffle_ps(t0, t2, 0x88);
3175  u1 = _mm256_shuffle_ps(t0, t2, 0xdd);
3176  u2 = _mm256_shuffle_ps(t1, t3, 0x88);
3177  u3 = _mm256_shuffle_ps(t1, t3, 0xdd);
3178  __m128 res0 = _mm256_extractf128_ps(u0, 0);
3179  __m128 res4 = _mm256_extractf128_ps(u0, 1);
3180  __m128 res1 = _mm256_extractf128_ps(u1, 0);
3181  __m128 res5 = _mm256_extractf128_ps(u1, 1);
3182  __m128 res2 = _mm256_extractf128_ps(u2, 0);
3183  __m128 res6 = _mm256_extractf128_ps(u2, 1);
3184  __m128 res3 = _mm256_extractf128_ps(u3, 0);
3185  __m128 res7 = _mm256_extractf128_ps(u3, 1);
3186 
3187  if (add_into)
3188  {
3189  res0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), res0);
3190  _mm_storeu_ps(out[0] + 4 * i, res0);
3191  res1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), res1);
3192  _mm_storeu_ps(out[1] + 4 * i, res1);
3193  res2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), res2);
3194  _mm_storeu_ps(out[2] + 4 * i, res2);
3195  res3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), res3);
3196  _mm_storeu_ps(out[3] + 4 * i, res3);
3197  res4 = _mm_add_ps(_mm_loadu_ps(out[4] + 4 * i), res4);
3198  _mm_storeu_ps(out[4] + 4 * i, res4);
3199  res5 = _mm_add_ps(_mm_loadu_ps(out[5] + 4 * i), res5);
3200  _mm_storeu_ps(out[5] + 4 * i, res5);
3201  res6 = _mm_add_ps(_mm_loadu_ps(out[6] + 4 * i), res6);
3202  _mm_storeu_ps(out[6] + 4 * i, res6);
3203  res7 = _mm_add_ps(_mm_loadu_ps(out[7] + 4 * i), res7);
3204  _mm_storeu_ps(out[7] + 4 * i, res7);
3205  }
3206  else
3207  {
3208  _mm_storeu_ps(out[0] + 4 * i, res0);
3209  _mm_storeu_ps(out[1] + 4 * i, res1);
3210  _mm_storeu_ps(out[2] + 4 * i, res2);
3211  _mm_storeu_ps(out[3] + 4 * i, res3);
3212  _mm_storeu_ps(out[4] + 4 * i, res4);
3213  _mm_storeu_ps(out[5] + 4 * i, res5);
3214  _mm_storeu_ps(out[6] + 4 * i, res6);
3215  _mm_storeu_ps(out[7] + 4 * i, res7);
3216  }
3217  }
3218 
3219  if (add_into)
3220  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3221  for (unsigned int v = 0; v < 8; ++v)
3222  out[v][i] += in[i][v];
3223  else
3224  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3225  for (unsigned int v = 0; v < 8; ++v)
3226  out[v][i] = in[i][v];
3227 }
3228 
3229 # endif
3230 
3231 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__SSE2__)
3232 
3236 template <>
3237 class VectorizedArray<double, 2>
3238  : public VectorizedArrayBase<VectorizedArray<double, 2>, 2>
3239 {
3240 public:
3244  using value_type = double;
3245 
3250  VectorizedArray() = default;
3251 
3255  VectorizedArray(const double scalar)
3256  {
3257  this->operator=(scalar);
3258  }
3259 
3263  template <typename U>
3264  VectorizedArray(const std::initializer_list<U> &list)
3266  {}
3267 
3272  VectorizedArray &
3273  operator=(const double x)
3274  {
3275  data = _mm_set1_pd(x);
3276  return *this;
3277  }
3278 
3283  double &
3284  operator[](const unsigned int comp)
3285  {
3286  AssertIndexRange(comp, 2);
3287  return *(reinterpret_cast<double *>(&data) + comp);
3288  }
3289 
3294  const double &
3295  operator[](const unsigned int comp) const
3296  {
3297  AssertIndexRange(comp, 2);
3298  return *(reinterpret_cast<const double *>(&data) + comp);
3299  }
3300 
3305  VectorizedArray &
3306  operator+=(const VectorizedArray &vec)
3307  {
3308 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3309  data += vec.data;
3310 # else
3311  data = _mm_add_pd(data, vec.data);
3312 # endif
3313  return *this;
3314  }
3315 
3320  VectorizedArray &
3321  operator-=(const VectorizedArray &vec)
3322  {
3323 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3324  data -= vec.data;
3325 # else
3326  data = _mm_sub_pd(data, vec.data);
3327 # endif
3328  return *this;
3329  }
3330 
3335  VectorizedArray &
3336  operator*=(const VectorizedArray &vec)
3337  {
3338 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3339  data *= vec.data;
3340 # else
3341  data = _mm_mul_pd(data, vec.data);
3342 # endif
3343  return *this;
3344  }
3345 
3350  VectorizedArray &
3351  operator/=(const VectorizedArray &vec)
3352  {
3353 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3354  data /= vec.data;
3355 # else
3356  data = _mm_div_pd(data, vec.data);
3357 # endif
3358  return *this;
3359  }
3360 
3367  void
3368  load(const double *ptr)
3369  {
3370  data = _mm_loadu_pd(ptr);
3371  }
3372 
3380  void
3381  store(double *ptr) const
3382  {
3383  _mm_storeu_pd(ptr, data);
3384  }
3385 
3390  void
3391  streaming_store(double *ptr) const
3392  {
3393  Assert(reinterpret_cast<std::size_t>(ptr) % 16 == 0,
3394  ExcMessage("Memory not aligned"));
3395  _mm_stream_pd(ptr, data);
3396  }
3397 
3411  void
3412  gather(const double *base_ptr, const unsigned int *offsets)
3413  {
3414  for (unsigned int i = 0; i < 2; ++i)
3415  *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
3416  }
3417 
3431  void
3432  scatter(const unsigned int *offsets, double *base_ptr) const
3433  {
3434  for (unsigned int i = 0; i < 2; ++i)
3435  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
3436  }
3437 
3443  __m128d data;
3444 
3445 private:
3452  get_sqrt() const
3453  {
3454  VectorizedArray res;
3455  res.data = _mm_sqrt_pd(data);
3456  return res;
3457  }
3458 
3465  get_abs() const
3466  {
3467  // to compute the absolute value, perform
3468  // bitwise andnot with -0. This will leave all
3469  // value and exponent bits unchanged but force
3470  // the sign value to +.
3471  __m128d mask = _mm_set1_pd(-0.);
3472  VectorizedArray res;
3473  res.data = _mm_andnot_pd(mask, data);
3474  return res;
3475  }
3476 
3483  get_max(const VectorizedArray &other) const
3484  {
3485  VectorizedArray res;
3486  res.data = _mm_max_pd(data, other.data);
3487  return res;
3488  }
3489 
3496  get_min(const VectorizedArray &other) const
3497  {
3498  VectorizedArray res;
3499  res.data = _mm_min_pd(data, other.data);
3500  return res;
3501  }
3502 
3503  // Make a few functions friends.
3504  template <typename Number2, std::size_t width2>
3506  std::sqrt(const VectorizedArray<Number2, width2> &);
3507  template <typename Number2, std::size_t width2>
3509  std::abs(const VectorizedArray<Number2, width2> &);
3510  template <typename Number2, std::size_t width2>
3512  std::max(const VectorizedArray<Number2, width2> &,
3514  template <typename Number2, std::size_t width2>
3516  std::min(const VectorizedArray<Number2, width2> &,
3518 };
3519 
3520 
3521 
3525 template <>
3526 inline DEAL_II_ALWAYS_INLINE void
3527 vectorized_load_and_transpose(const unsigned int n_entries,
3528  const double * in,
3529  const unsigned int * offsets,
3531 {
3532  const unsigned int n_chunks = n_entries / 2;
3533  for (unsigned int i = 0; i < n_chunks; ++i)
3534  {
3535  __m128d u0 = _mm_loadu_pd(in + 2 * i + offsets[0]);
3536  __m128d u1 = _mm_loadu_pd(in + 2 * i + offsets[1]);
3537  out[2 * i + 0].data = _mm_unpacklo_pd(u0, u1);
3538  out[2 * i + 1].data = _mm_unpackhi_pd(u0, u1);
3539  }
3540 
3541  // remainder loop of work that does not divide by 2
3542  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3543  for (unsigned int v = 0; v < 2; ++v)
3544  out[i][v] = in[offsets[v] + i];
3545 }
3546 
3547 
3548 
3552 template <>
3553 inline DEAL_II_ALWAYS_INLINE void
3554 vectorized_load_and_transpose(const unsigned int n_entries,
3555  const std::array<double *, 2> &in,
3557 {
3558  // see the comments in the vectorized_load_and_transpose above
3559 
3560  const unsigned int n_chunks = n_entries / 2;
3561  for (unsigned int i = 0; i < n_chunks; ++i)
3562  {
3563  __m128d u0 = _mm_loadu_pd(in[0] + 2 * i);
3564  __m128d u1 = _mm_loadu_pd(in[1] + 2 * i);
3565  out[2 * i + 0].data = _mm_unpacklo_pd(u0, u1);
3566  out[2 * i + 1].data = _mm_unpackhi_pd(u0, u1);
3567  }
3568 
3569  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3570  for (unsigned int v = 0; v < 2; ++v)
3571  out[i][v] = in[v][i];
3572 }
3573 
3574 
3575 
3579 template <>
3580 inline DEAL_II_ALWAYS_INLINE void
3581 vectorized_transpose_and_store(const bool add_into,
3582  const unsigned int n_entries,
3583  const VectorizedArray<double, 2> *in,
3584  const unsigned int * offsets,
3585  double * out)
3586 {
3587  const unsigned int n_chunks = n_entries / 2;
3588  if (add_into)
3589  {
3590  for (unsigned int i = 0; i < n_chunks; ++i)
3591  {
3592  __m128d u0 = in[2 * i + 0].data;
3593  __m128d u1 = in[2 * i + 1].data;
3594  __m128d res0 = _mm_unpacklo_pd(u0, u1);
3595  __m128d res1 = _mm_unpackhi_pd(u0, u1);
3596  _mm_storeu_pd(out + 2 * i + offsets[0],
3597  _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[0]),
3598  res0));
3599  _mm_storeu_pd(out + 2 * i + offsets[1],
3600  _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[1]),
3601  res1));
3602  }
3603  // remainder loop of work that does not divide by 2
3604  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3605  for (unsigned int v = 0; v < 2; ++v)
3606  out[offsets[v] + i] += in[i][v];
3607  }
3608  else
3609  {
3610  for (unsigned int i = 0; i < n_chunks; ++i)
3611  {
3612  __m128d u0 = in[2 * i + 0].data;
3613  __m128d u1 = in[2 * i + 1].data;
3614  __m128d res0 = _mm_unpacklo_pd(u0, u1);
3615  __m128d res1 = _mm_unpackhi_pd(u0, u1);
3616  _mm_storeu_pd(out + 2 * i + offsets[0], res0);
3617  _mm_storeu_pd(out + 2 * i + offsets[1], res1);
3618  }
3619  // remainder loop of work that does not divide by 2
3620  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3621  for (unsigned int v = 0; v < 2; ++v)
3622  out[offsets[v] + i] = in[i][v];
3623  }
3624 }
3625 
3626 
3627 
3631 template <>
3632 inline DEAL_II_ALWAYS_INLINE void
3633 vectorized_transpose_and_store(const bool add_into,
3634  const unsigned int n_entries,
3635  const VectorizedArray<double, 2> *in,
3636  std::array<double *, 2> & out)
3637 {
3638  // see the comments in the vectorized_transpose_and_store above
3639 
3640  const unsigned int n_chunks = n_entries / 2;
3641  if (add_into)
3642  {
3643  for (unsigned int i = 0; i < n_chunks; ++i)
3644  {
3645  __m128d u0 = in[2 * i + 0].data;
3646  __m128d u1 = in[2 * i + 1].data;
3647  __m128d res0 = _mm_unpacklo_pd(u0, u1);
3648  __m128d res1 = _mm_unpackhi_pd(u0, u1);
3649  _mm_storeu_pd(out[0] + 2 * i,
3650  _mm_add_pd(_mm_loadu_pd(out[0] + 2 * i), res0));
3651  _mm_storeu_pd(out[1] + 2 * i,
3652  _mm_add_pd(_mm_loadu_pd(out[1] + 2 * i), res1));
3653  }
3654 
3655  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3656  for (unsigned int v = 0; v < 2; ++v)
3657  out[v][i] += in[i][v];
3658  }
3659  else
3660  {
3661  for (unsigned int i = 0; i < n_chunks; ++i)
3662  {
3663  __m128d u0 = in[2 * i + 0].data;
3664  __m128d u1 = in[2 * i + 1].data;
3665  __m128d res0 = _mm_unpacklo_pd(u0, u1);
3666  __m128d res1 = _mm_unpackhi_pd(u0, u1);
3667  _mm_storeu_pd(out[0] + 2 * i, res0);
3668  _mm_storeu_pd(out[1] + 2 * i, res1);
3669  }
3670 
3671  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3672  for (unsigned int v = 0; v < 2; ++v)
3673  out[v][i] = in[i][v];
3674  }
3675 }
3676 
3677 
3678 
3682 template <>
3683 class VectorizedArray<float, 4>
3684  : public VectorizedArrayBase<VectorizedArray<float, 4>, 4>
3685 {
3686 public:
3690  using value_type = float;
3691 
3700  VectorizedArray() = default;
3701 
3705  VectorizedArray(const float scalar)
3706  {
3707  this->operator=(scalar);
3708  }
3709 
3713  template <typename U>
3714  VectorizedArray(const std::initializer_list<U> &list)
3716  {}
3717 
3719  VectorizedArray &
3720  operator=(const float x)
3721  {
3722  data = _mm_set1_ps(x);
3723  return *this;
3724  }
3725 
3730  float &
3731  operator[](const unsigned int comp)
3732  {
3733  AssertIndexRange(comp, 4);
3734  return *(reinterpret_cast<float *>(&data) + comp);
3735  }
3736 
3741  const float &
3742  operator[](const unsigned int comp) const
3743  {
3744  AssertIndexRange(comp, 4);
3745  return *(reinterpret_cast<const float *>(&data) + comp);
3746  }
3747 
3752  VectorizedArray &
3753  operator+=(const VectorizedArray &vec)
3754  {
3755 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3756  data += vec.data;
3757 # else
3758  data = _mm_add_ps(data, vec.data);
3759 # endif
3760  return *this;
3761  }
3762 
3767  VectorizedArray &
3768  operator-=(const VectorizedArray &vec)
3769  {
3770 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3771  data -= vec.data;
3772 # else
3773  data = _mm_sub_ps(data, vec.data);
3774 # endif
3775  return *this;
3776  }
3777 
3782  VectorizedArray &
3783  operator*=(const VectorizedArray &vec)
3784  {
3785 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3786  data *= vec.data;
3787 # else
3788  data = _mm_mul_ps(data, vec.data);
3789 # endif
3790  return *this;
3791  }
3792 
3797  VectorizedArray &
3798  operator/=(const VectorizedArray &vec)
3799  {
3800 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3801  data /= vec.data;
3802 # else
3803  data = _mm_div_ps(data, vec.data);
3804 # endif
3805  return *this;
3806  }
3807 
3814  void
3815  load(const float *ptr)
3816  {
3817  data = _mm_loadu_ps(ptr);
3818  }
3819 
3827  void
3828  store(float *ptr) const
3829  {
3830  _mm_storeu_ps(ptr, data);
3831  }
3832 
3837  void
3838  streaming_store(float *ptr) const
3839  {
3840  Assert(reinterpret_cast<std::size_t>(ptr) % 16 == 0,
3841  ExcMessage("Memory not aligned"));
3842  _mm_stream_ps(ptr, data);
3843  }
3844 
3858  void
3859  gather(const float *base_ptr, const unsigned int *offsets)
3860  {
3861  for (unsigned int i = 0; i < 4; ++i)
3862  *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
3863  }
3864 
3878  void
3879  scatter(const unsigned int *offsets, float *base_ptr) const
3880  {
3881  for (unsigned int i = 0; i < 4; ++i)
3882  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
3883  }
3884 
3890  __m128 data;
3891 
3892 private:
3899  get_sqrt() const
3900  {
3901  VectorizedArray res;
3902  res.data = _mm_sqrt_ps(data);
3903  return res;
3904  }
3905 
3912  get_abs() const
3913  {
3914  // to compute the absolute value, perform bitwise andnot with -0. This
3915  // will leave all value and exponent bits unchanged but force the sign
3916  // value to +.
3917  __m128 mask = _mm_set1_ps(-0.f);
3918  VectorizedArray res;
3919  res.data = _mm_andnot_ps(mask, data);
3920  return res;
3921  }
3922 
3929  get_max(const VectorizedArray &other) const
3930  {
3931  VectorizedArray res;
3932  res.data = _mm_max_ps(data, other.data);
3933  return res;
3934  }
3935 
3942  get_min(const VectorizedArray &other) const
3943  {
3944  VectorizedArray res;
3945  res.data = _mm_min_ps(data, other.data);
3946  return res;
3947  }
3948 
3949  // Make a few functions friends.
3950  template <typename Number2, std::size_t width2>
3952  std::sqrt(const VectorizedArray<Number2, width2> &);
3953  template <typename Number2, std::size_t width2>
3955  std::abs(const VectorizedArray<Number2, width2> &);
3956  template <typename Number2, std::size_t width2>
3958  std::max(const VectorizedArray<Number2, width2> &,
3960  template <typename Number2, std::size_t width2>
3962  std::min(const VectorizedArray<Number2, width2> &,
3964 };
3965 
3966 
3967 
3971 template <>
3972 inline DEAL_II_ALWAYS_INLINE void
3973 vectorized_load_and_transpose(const unsigned int n_entries,
3974  const float * in,
3975  const unsigned int * offsets,
3977 {
3978  const unsigned int n_chunks = n_entries / 4;
3979  for (unsigned int i = 0; i < n_chunks; ++i)
3980  {
3981  __m128 u0 = _mm_loadu_ps(in + 4 * i + offsets[0]);
3982  __m128 u1 = _mm_loadu_ps(in + 4 * i + offsets[1]);
3983  __m128 u2 = _mm_loadu_ps(in + 4 * i + offsets[2]);
3984  __m128 u3 = _mm_loadu_ps(in + 4 * i + offsets[3]);
3985  __m128 v0 = _mm_shuffle_ps(u0, u1, 0x44);
3986  __m128 v1 = _mm_shuffle_ps(u0, u1, 0xee);
3987  __m128 v2 = _mm_shuffle_ps(u2, u3, 0x44);
3988  __m128 v3 = _mm_shuffle_ps(u2, u3, 0xee);
3989  out[4 * i + 0].data = _mm_shuffle_ps(v0, v2, 0x88);
3990  out[4 * i + 1].data = _mm_shuffle_ps(v0, v2, 0xdd);
3991  out[4 * i + 2].data = _mm_shuffle_ps(v1, v3, 0x88);
3992  out[4 * i + 3].data = _mm_shuffle_ps(v1, v3, 0xdd);
3993  }
3994 
3995  // remainder loop of work that does not divide by 4
3996  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3997  for (unsigned int v = 0; v < 4; ++v)
3998  out[i][v] = in[offsets[v] + i];
3999 }
4000 
4001 
4002 
4006 template <>
4007 inline DEAL_II_ALWAYS_INLINE void
4008 vectorized_load_and_transpose(const unsigned int n_entries,
4009  const std::array<float *, 4> &in,
4011 {
4012  // see the comments in the vectorized_load_and_transpose above
4013 
4014  const unsigned int n_chunks = n_entries / 4;
4015  for (unsigned int i = 0; i < n_chunks; ++i)
4016  {
4017  __m128 u0 = _mm_loadu_ps(in[0] + 4 * i);
4018  __m128 u1 = _mm_loadu_ps(in[1] + 4 * i);
4019  __m128 u2 = _mm_loadu_ps(in[2] + 4 * i);
4020  __m128 u3 = _mm_loadu_ps(in[3] + 4 * i);
4021  __m128 v0 = _mm_shuffle_ps(u0, u1, 0x44);
4022  __m128 v1 = _mm_shuffle_ps(u0, u1, 0xee);
4023  __m128 v2 = _mm_shuffle_ps(u2, u3, 0x44);
4024  __m128 v3 = _mm_shuffle_ps(u2, u3, 0xee);
4025  out[4 * i + 0].data = _mm_shuffle_ps(v0, v2, 0x88);
4026  out[4 * i + 1].data = _mm_shuffle_ps(v0, v2, 0xdd);
4027  out[4 * i + 2].data = _mm_shuffle_ps(v1, v3, 0x88);
4028  out[4 * i + 3].data = _mm_shuffle_ps(v1, v3, 0xdd);
4029  }
4030 
4031  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4032  for (unsigned int v = 0; v < 4; ++v)
4033  out[i][v] = in[v][i];
4034 }
4035 
4036 
4037 
4041 template <>
4042 inline DEAL_II_ALWAYS_INLINE void
4043 vectorized_transpose_and_store(const bool add_into,
4044  const unsigned int n_entries,
4045  const VectorizedArray<float, 4> *in,
4046  const unsigned int * offsets,
4047  float * out)
4048 {
4049  const unsigned int n_chunks = n_entries / 4;
4050  for (unsigned int i = 0; i < n_chunks; ++i)
4051  {
4052  __m128 u0 = in[4 * i + 0].data;
4053  __m128 u1 = in[4 * i + 1].data;
4054  __m128 u2 = in[4 * i + 2].data;
4055  __m128 u3 = in[4 * i + 3].data;
4056  __m128 t0 = _mm_shuffle_ps(u0, u1, 0x44);
4057  __m128 t1 = _mm_shuffle_ps(u0, u1, 0xee);
4058  __m128 t2 = _mm_shuffle_ps(u2, u3, 0x44);
4059  __m128 t3 = _mm_shuffle_ps(u2, u3, 0xee);
4060  u0 = _mm_shuffle_ps(t0, t2, 0x88);
4061  u1 = _mm_shuffle_ps(t0, t2, 0xdd);
4062  u2 = _mm_shuffle_ps(t1, t3, 0x88);
4063  u3 = _mm_shuffle_ps(t1, t3, 0xdd);
4064 
4065  // Cannot use the same store instructions in both paths of the 'if'
4066  // because the compiler cannot know that there is no aliasing between
4067  // pointers
4068  if (add_into)
4069  {
4070  u0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), u0);
4071  _mm_storeu_ps(out + 4 * i + offsets[0], u0);
4072  u1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), u1);
4073  _mm_storeu_ps(out + 4 * i + offsets[1], u1);
4074  u2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), u2);
4075  _mm_storeu_ps(out + 4 * i + offsets[2], u2);
4076  u3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), u3);
4077  _mm_storeu_ps(out + 4 * i + offsets[3], u3);
4078  }
4079  else
4080  {
4081  _mm_storeu_ps(out + 4 * i + offsets[0], u0);
4082  _mm_storeu_ps(out + 4 * i + offsets[1], u1);
4083  _mm_storeu_ps(out + 4 * i + offsets[2], u2);
4084  _mm_storeu_ps(out + 4 * i + offsets[3], u3);
4085  }
4086  }
4087 
4088  // remainder loop of work that does not divide by 4
4089  if (add_into)
4090  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4091  for (unsigned int v = 0; v < 4; ++v)
4092  out[offsets[v] + i] += in[i][v];
4093  else
4094  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4095  for (unsigned int v = 0; v < 4; ++v)
4096  out[offsets[v] + i] = in[i][v];
4097 }
4098 
4099 
4100 
4104 template <>
4105 inline DEAL_II_ALWAYS_INLINE void
4106 vectorized_transpose_and_store(const bool add_into,
4107  const unsigned int n_entries,
4108  const VectorizedArray<float, 4> *in,
4109  std::array<float *, 4> & out)
4110 {
4111  // see the comments in the vectorized_transpose_and_store above
4112 
4113  const unsigned int n_chunks = n_entries / 4;
4114  for (unsigned int i = 0; i < n_chunks; ++i)
4115  {
4116  __m128 u0 = in[4 * i + 0].data;
4117  __m128 u1 = in[4 * i + 1].data;
4118  __m128 u2 = in[4 * i + 2].data;
4119  __m128 u3 = in[4 * i + 3].data;
4120  __m128 t0 = _mm_shuffle_ps(u0, u1, 0x44);
4121  __m128 t1 = _mm_shuffle_ps(u0, u1, 0xee);
4122  __m128 t2 = _mm_shuffle_ps(u2, u3, 0x44);
4123  __m128 t3 = _mm_shuffle_ps(u2, u3, 0xee);
4124  u0 = _mm_shuffle_ps(t0, t2, 0x88);
4125  u1 = _mm_shuffle_ps(t0, t2, 0xdd);
4126  u2 = _mm_shuffle_ps(t1, t3, 0x88);
4127  u3 = _mm_shuffle_ps(t1, t3, 0xdd);
4128 
4129  if (add_into)
4130  {
4131  u0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), u0);
4132  _mm_storeu_ps(out[0] + 4 * i, u0);
4133  u1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), u1);
4134  _mm_storeu_ps(out[1] + 4 * i, u1);
4135  u2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), u2);
4136  _mm_storeu_ps(out[2] + 4 * i, u2);
4137  u3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), u3);
4138  _mm_storeu_ps(out[3] + 4 * i, u3);
4139  }
4140  else
4141  {
4142  _mm_storeu_ps(out[0] + 4 * i, u0);
4143  _mm_storeu_ps(out[1] + 4 * i, u1);
4144  _mm_storeu_ps(out[2] + 4 * i, u2);
4145  _mm_storeu_ps(out[3] + 4 * i, u3);
4146  }
4147  }
4148 
4149  if (add_into)
4150  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4151  for (unsigned int v = 0; v < 4; ++v)
4152  out[v][i] += in[i][v];
4153  else
4154  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4155  for (unsigned int v = 0; v < 4; ++v)
4156  out[v][i] = in[i][v];
4157 }
4158 
4159 
4160 
4161 # endif // if DEAL_II_VECTORIZATION_WIDTH_IN_BITS > 0 && defined(__SSE2__)
4162 
4163 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__ALTIVEC__) && \
4164  defined(__VSX__)
4165 
4166 template <>
4167 class VectorizedArray<double, 2>
4168  : public VectorizedArrayBase<VectorizedArray<double, 2>, 2>
4169 {
4170 public:
4174  using value_type = double;
4175 
4180  VectorizedArray() = default;
4181 
4185  VectorizedArray(const double scalar)
4186  {
4187  this->operator=(scalar);
4188  }
4189 
4193  template <typename U>
4194  VectorizedArray(const std::initializer_list<U> &list)
4196  {}
4197 
4202  VectorizedArray &
4203  operator=(const double x)
4204  {
4205  data = vec_splats(x);
4206 
4207  // Some compilers believe that vec_splats sets 'x', but that's not true.
4208  // They then warn about setting a variable and not using it. Suppress the
4209  // warning by "using" the variable:
4210  (void)x;
4211  return *this;
4212  }
4213 
4218  double &
4219  operator[](const unsigned int comp)
4220  {
4221  AssertIndexRange(comp, 2);
4222  return *(reinterpret_cast<double *>(&data) + comp);
4223  }
4224 
4229  const double &
4230  operator[](const unsigned int comp) const
4231  {
4232  AssertIndexRange(comp, 2);
4233  return *(reinterpret_cast<const double *>(&data) + comp);
4234  }
4235 
4240  VectorizedArray &
4241  operator+=(const VectorizedArray &vec)
4242  {
4243  data = vec_add(data, vec.data);
4244  return *this;
4245  }
4246 
4251  VectorizedArray &
4252  operator-=(const VectorizedArray &vec)
4253  {
4254  data = vec_sub(data, vec.data);
4255  return *this;
4256  }
4257 
4262  VectorizedArray &
4263  operator*=(const VectorizedArray &vec)
4264  {
4265  data = vec_mul(data, vec.data);
4266  return *this;
4267  }
4268 
4273  VectorizedArray &
4274  operator/=(const VectorizedArray &vec)
4275  {
4276  data = vec_div(data, vec.data);
4277  return *this;
4278  }
4279 
4285  void
4286  load(const double *ptr)
4287  {
4288  data = vec_vsx_ld(0, ptr);
4289  }
4290 
4296  void
4297  store(double *ptr) const
4298  {
4299  vec_vsx_st(data, 0, ptr);
4300  }
4301 
4305  void
4306  streaming_store(double *ptr) const
4307  {
4308  store(ptr);
4309  }
4310 
4314  void
4315  gather(const double *base_ptr, const unsigned int *offsets)
4316  {
4317  for (unsigned int i = 0; i < 2; ++i)
4318  *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
4319  }
4320 
4324  void
4325  scatter(const unsigned int *offsets, double *base_ptr) const
4326  {
4327  for (unsigned int i = 0; i < 2; ++i)
4328  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
4329  }
4330 
4336  __vector double data;
4337 
4338 private:
4345  get_sqrt() const
4346  {
4347  VectorizedArray res;
4348  res.data = vec_sqrt(data);
4349  return res;
4350  }
4351 
4358  get_abs() const
4359  {
4360  VectorizedArray res;
4361  res.data = vec_abs(data);
4362  return res;
4363  }
4364 
4371  get_max(const VectorizedArray &other) const
4372  {
4373  VectorizedArray res;
4374  res.data = vec_max(data, other.data);
4375  return res;
4376  }
4377 
4384  get_min(const VectorizedArray &other) const
4385  {
4386  VectorizedArray res;
4387  res.data = vec_min(data, other.data);
4388  return res;
4389  }
4390 
4391  // Make a few functions friends.
4392  template <typename Number2, std::size_t width2>
4394  std::sqrt(const VectorizedArray<Number2, width2> &);
4395  template <typename Number2, std::size_t width2>
4397  std::abs(const VectorizedArray<Number2, width2> &);
4398  template <typename Number2, std::size_t width2>
4400  std::max(const VectorizedArray<Number2, width2> &,
4402  template <typename Number2, std::size_t width2>
4404  std::min(const VectorizedArray<Number2, width2> &,
4406 };
4407 
4408 
4409 
4410 template <>
4411 class VectorizedArray<float, 4>
4412  : public VectorizedArrayBase<VectorizedArray<float, 4>, 4>
4413 {
4414 public:
4418  using value_type = float;
4419 
4424  VectorizedArray() = default;
4425 
4429  VectorizedArray(const float scalar)
4430  {
4431  this->operator=(scalar);
4432  }
4433 
4437  template <typename U>
4438  VectorizedArray(const std::initializer_list<U> &list)
4440  {}
4441 
4446  VectorizedArray &
4447  operator=(const float x)
4448  {
4449  data = vec_splats(x);
4450 
4451  // Some compilers believe that vec_splats sets 'x', but that's not true.
4452  // They then warn about setting a variable and not using it. Suppress the
4453  // warning by "using" the variable:
4454  (void)x;
4455  return *this;
4456  }
4457 
4462  float &
4463  operator[](const unsigned int comp)
4464  {
4465  AssertIndexRange(comp, 4);
4466  return *(reinterpret_cast<float *>(&data) + comp);
4467  }
4468 
4473  const float &
4474  operator[](const unsigned int comp) const
4475  {
4476  AssertIndexRange(comp, 4);
4477  return *(reinterpret_cast<const float *>(&data) + comp);
4478  }
4479 
4484  VectorizedArray &
4485  operator+=(const VectorizedArray &vec)
4486  {
4487  data = vec_add(data, vec.data);
4488  return *this;
4489  }
4490 
4495  VectorizedArray &
4496  operator-=(const VectorizedArray &vec)
4497  {
4498  data = vec_sub(data, vec.data);
4499  return *this;
4500  }
4501 
4506  VectorizedArray &
4507  operator*=(const VectorizedArray &vec)
4508  {
4509  data = vec_mul(data, vec.data);
4510  return *this;
4511  }
4512 
4517  VectorizedArray &
4518  operator/=(const VectorizedArray &vec)
4519  {
4520  data = vec_div(data, vec.data);
4521  return *this;
4522  }
4523 
4529  void
4530  load(const float *ptr)
4531  {
4532  data = vec_vsx_ld(0, ptr);
4533  }
4534 
4540  void
4541  store(float *ptr) const
4542  {
4543  vec_vsx_st(data, 0, ptr);
4544  }
4545 
4549  void
4550  streaming_store(float *ptr) const
4551  {
4552  store(ptr);
4553  }
4554 
4558  void
4559  gather(const float *base_ptr, const unsigned int *offsets)
4560  {
4561  for (unsigned int i = 0; i < 4; ++i)
4562  *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
4563  }
4564 
4568  void
4569  scatter(const unsigned int *offsets, float *base_ptr) const
4570  {
4571  for (unsigned int i = 0; i < 4; ++i)
4572  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
4573  }
4574 
4580  __vector float data;
4581 
4582 private:
4589  get_sqrt() const
4590  {
4591  VectorizedArray res;
4592  res.data = vec_sqrt(data);
4593  return res;
4594  }
4595 
4602  get_abs() const
4603  {
4604  VectorizedArray res;
4605  res.data = vec_abs(data);
4606  return res;
4607  }
4608 
4615  get_max(const VectorizedArray &other) const
4616  {
4617  VectorizedArray res;
4618  res.data = vec_max(data, other.data);
4619  return res;
4620  }
4621 
4628  get_min(const VectorizedArray &other) const
4629  {
4630  VectorizedArray res;
4631  res.data = vec_min(data, other.data);
4632  return res;
4633  }
4634 
4635  // Make a few functions friends.
4636  template <typename Number2, std::size_t width2>
4638  std::sqrt(const VectorizedArray<Number2, width2> &);
4639  template <typename Number2, std::size_t width2>
4641  std::abs(const VectorizedArray<Number2, width2> &);
4642  template <typename Number2, std::size_t width2>
4644  std::max(const VectorizedArray<Number2, width2> &,
4646  template <typename Number2, std::size_t width2>
4648  std::min(const VectorizedArray<Number2, width2> &,
4650 };
4651 
4652 # endif // if DEAL_II_VECTORIZATION_LEVEL >=1 && defined(__ALTIVEC__) &&
4653  // defined(__VSX__)
4654 
4655 
4656 #endif // DOXYGEN
4657 
4662 
4668 template <typename Number, std::size_t width>
4669 inline DEAL_II_ALWAYS_INLINE bool
4671  const VectorizedArray<Number, width> &rhs)
4672 {
4673  for (unsigned int i = 0; i < VectorizedArray<Number, width>::size(); ++i)
4674  if (lhs[i] != rhs[i])
4675  return false;
4676 
4677  return true;
4678 }
4679 
4680 
4686 template <typename Number, std::size_t width>
4690 {
4692  return tmp += v;
4693 }
4694 
4700 template <typename Number, std::size_t width>
4704 {
4706  return tmp -= v;
4707 }
4708 
4714 template <typename Number, std::size_t width>
4718 {
4720  return tmp *= v;
4721 }
4722 
4728 template <typename Number, std::size_t width>
4732 {
4734  return tmp /= v;
4735 }
4736 
4743 template <typename Number, std::size_t width>
4745 operator+(const Number &u, const VectorizedArray<Number, width> &v)
4746 {
4748  return tmp += v;
4749 }
4750 
4759 template <std::size_t width>
4761 operator+(const double u, const VectorizedArray<float, width> &v)
4762 {
4764  return tmp += v;
4765 }
4766 
4773 template <typename Number, std::size_t width>
4775 operator+(const VectorizedArray<Number, width> &v, const Number &u)
4776 {
4777  return u + v;
4778 }
4779 
4788 template <std::size_t width>
4790 operator+(const VectorizedArray<float, width> &v, const double u)
4791 {
4792  return u + v;
4793 }
4794 
4801 template <typename Number, std::size_t width>
4803 operator-(const Number &u, const VectorizedArray<Number, width> &v)
4804 {
4806  return tmp -= v;
4807 }
4808 
4817 template <std::size_t width>
4819 operator-(const double u, const VectorizedArray<float, width> &v)
4820 {
4821  VectorizedArray<float, width> tmp = static_cast<float>(u);
4822  return tmp -= v;
4823 }
4824 
4831 template <typename Number, std::size_t width>
4833 operator-(const VectorizedArray<Number, width> &v, const Number &u)
4834 {
4836  return v - tmp;
4837 }
4838 
4847 template <std::size_t width>
4849 operator-(const VectorizedArray<float, width> &v, const double u)
4850 {
4851  VectorizedArray<float, width> tmp = static_cast<float>(u);
4852  return v - tmp;
4853 }
4854 
4861 template <typename Number, std::size_t width>
4863 operator*(const Number &u, const VectorizedArray<Number, width> &v)
4864 {
4866  return tmp *= v;
4867 }
4868 
4877 template <std::size_t width>
4879 operator*(const double u, const VectorizedArray<float, width> &v)
4880 {
4881  VectorizedArray<float, width> tmp = static_cast<float>(u);
4882  return tmp *= v;
4883 }
4884 
4891 template <typename Number, std::size_t width>
4893 operator*(const VectorizedArray<Number, width> &v, const Number &u)
4894 {
4895  return u * v;
4896 }
4897 
4906 template <std::size_t width>
4908 operator*(const VectorizedArray<float, width> &v, const double u)
4909 {
4910  return u * v;
4911 }
4912 
4919 template <typename Number, std::size_t width>
4921 operator/(const Number &u, const VectorizedArray<Number, width> &v)
4922 {
4924  return tmp /= v;
4925 }
4926 
4935 template <std::size_t width>
4937 operator/(const double u, const VectorizedArray<float, width> &v)
4938 {
4939  VectorizedArray<float, width> tmp = static_cast<float>(u);
4940  return tmp /= v;
4941 }
4942 
4949 template <typename Number, std::size_t width>
4951 operator/(const VectorizedArray<Number, width> &v, const Number &u)
4952 {
4954  return v / tmp;
4955 }
4956 
4965 template <std::size_t width>
4967 operator/(const VectorizedArray<float, width> &v, const double u)
4968 {
4969  VectorizedArray<float, width> tmp = static_cast<float>(u);
4970  return v / tmp;
4971 }
4972 
4978 template <typename Number, std::size_t width>
4981 {
4982  return u;
4983 }
4984 
4990 template <typename Number, std::size_t width>
4993 {
4994  // to get a negative sign, subtract the input from zero (could also
4995  // multiply by -1, but this one is slightly simpler)
4996  return VectorizedArray<Number, width>() - u;
4997 }
4998 
5004 template <typename Number, std::size_t width>
5005 inline std::ostream &
5006 operator<<(std::ostream &out, const VectorizedArray<Number, width> &p)
5007 {
5008  constexpr unsigned int n = VectorizedArray<Number, width>::size();
5009  for (unsigned int i = 0; i < n - 1; ++i)
5010  out << p[i] << ' ';
5011  out << p[n - 1];
5012 
5013  return out;
5014 }
5015 
5017 
5022 
5023 
5031 enum class SIMDComparison : int
5032 {
5033 #if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
5034  equal = _CMP_EQ_OQ,
5035  not_equal = _CMP_NEQ_OQ,
5036  less_than = _CMP_LT_OQ,
5037  less_than_or_equal = _CMP_LE_OQ,
5038  greater_than = _CMP_GT_OQ,
5039  greater_than_or_equal = _CMP_GE_OQ
5040 #else
5041  equal,
5042  not_equal,
5043  less_than,
5045  greater_than,
5046  greater_than_or_equal
5047 #endif
5048 };
5049 
5050 
5114 template <SIMDComparison predicate, typename Number>
5115 DEAL_II_ALWAYS_INLINE inline Number
5116 compare_and_apply_mask(const Number &left,
5117  const Number &right,
5118  const Number &true_value,
5119  const Number &false_value)
5120 {
5121  bool mask;
5122  switch (predicate)
5123  {
5124  case SIMDComparison::equal:
5125  mask = (left == right);
5126  break;
5128  mask = (left != right);
5129  break;
5131  mask = (left < right);
5132  break;
5134  mask = (left <= right);
5135  break;
5137  mask = (left > right);
5138  break;
5140  mask = (left >= right);
5141  break;
5142  }
5143 
5144  return mask ? true_value : false_value;
5145 }
5146 
5147 
5152 template <SIMDComparison predicate, typename Number>
5155  const VectorizedArray<Number, 1> &right,
5156  const VectorizedArray<Number, 1> &true_value,
5157  const VectorizedArray<Number, 1> &false_value)
5158 {
5160  result.data = compare_and_apply_mask<predicate, Number>(left.data,
5161  right.data,
5162  true_value.data,
5163  false_value.data);
5164  return result;
5165 }
5166 
5168 
5169 #ifndef DOXYGEN
5170 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && defined(__AVX512F__)
5171 
5172 template <SIMDComparison predicate>
5175  const VectorizedArray<float, 16> &right,
5176  const VectorizedArray<float, 16> &true_values,
5177  const VectorizedArray<float, 16> &false_values)
5178 {
5179  const __mmask16 mask =
5180  _mm512_cmp_ps_mask(left.data, right.data, static_cast<int>(predicate));
5182  result.data = _mm512_mask_mov_ps(false_values.data, mask, true_values.data);
5183  return result;
5184 }
5185 
5186 
5187 
5188 template <SIMDComparison predicate>
5191  const VectorizedArray<double, 8> &right,
5192  const VectorizedArray<double, 8> &true_values,
5193  const VectorizedArray<double, 8> &false_values)
5194 {
5195  const __mmask16 mask =
5196  _mm512_cmp_pd_mask(left.data, right.data, static_cast<int>(predicate));
5198  result.data = _mm512_mask_mov_pd(false_values.data, mask, true_values.data);
5199  return result;
5200 }
5201 
5202 # endif
5203 
5204 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
5205 
5206 template <SIMDComparison predicate>
5209  const VectorizedArray<float, 8> &right,
5210  const VectorizedArray<float, 8> &true_values,
5211  const VectorizedArray<float, 8> &false_values)
5212 {
5213  const auto mask =
5214  _mm256_cmp_ps(left.data, right.data, static_cast<int>(predicate));
5215 
5217  result.data = _mm256_blendv_ps(false_values.data, true_values.data, mask);
5218  return result;
5219 }
5220 
5221 
5222 template <SIMDComparison predicate>
5225  const VectorizedArray<double, 4> &right,
5226  const VectorizedArray<double, 4> &true_values,
5227  const VectorizedArray<double, 4> &false_values)
5228 {
5229  const auto mask =
5230  _mm256_cmp_pd(left.data, right.data, static_cast<int>(predicate));
5231 
5233  result.data = _mm256_blendv_pd(false_values.data, true_values.data, mask);
5234  return result;
5235 }
5236 
5237 # endif
5238 
5239 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__SSE2__)
5240 
5241 template <SIMDComparison predicate>
5244  const VectorizedArray<float, 4> &right,
5245  const VectorizedArray<float, 4> &true_values,
5246  const VectorizedArray<float, 4> &false_values)
5247 {
5248  __m128 mask;
5249  switch (predicate)
5250  {
5251  case SIMDComparison::equal:
5252  mask = _mm_cmpeq_ps(left.data, right.data);
5253  break;
5255  mask = _mm_cmpneq_ps(left.data, right.data);
5256  break;
5258  mask = _mm_cmplt_ps(left.data, right.data);
5259  break;
5261  mask = _mm_cmple_ps(left.data, right.data);
5262  break;
5264  mask = _mm_cmpgt_ps(left.data, right.data);
5265  break;
5267  mask = _mm_cmpge_ps(left.data, right.data);
5268  break;
5269  }
5270 
5272  result.data = _mm_or_ps(_mm_and_ps(mask, true_values.data),
5273  _mm_andnot_ps(mask, false_values.data));
5274 
5275  return result;
5276 }
5277 
5278 
5279 template <SIMDComparison predicate>
5282  const VectorizedArray<double, 2> &right,
5283  const VectorizedArray<double, 2> &true_values,
5284  const VectorizedArray<double, 2> &false_values)
5285 {
5286  __m128d mask;
5287  switch (predicate)
5288  {
5289  case SIMDComparison::equal:
5290  mask = _mm_cmpeq_pd(left.data, right.data);
5291  break;
5293  mask = _mm_cmpneq_pd(left.data, right.data);
5294  break;
5296  mask = _mm_cmplt_pd(left.data, right.data);
5297  break;
5299  mask = _mm_cmple_pd(left.data, right.data);
5300  break;
5302  mask = _mm_cmpgt_pd(left.data, right.data);
5303  break;
5305  mask = _mm_cmpge_pd(left.data, right.data);
5306  break;
5307  }
5308 
5310  result.data = _mm_or_pd(_mm_and_pd(mask, true_values.data),
5311  _mm_andnot_pd(mask, false_values.data));
5312 
5313  return result;
5314 }
5315 
5316 # endif
5317 #endif // DOXYGEN
5318 
5319 
5320 namespace internal
5321 {
5322  template <typename T>
5324  {
5325  using value_type = T;
5326  };
5327 
5328  template <typename T, std::size_t width>
5330  {
5331  using value_type = T;
5332  };
5333 } // namespace internal
5334 
5335 
5337 
5344 namespace std
5345 {
5353  template <typename Number, std::size_t width>
5354  inline ::VectorizedArray<Number, width>
5355  sin(const ::VectorizedArray<Number, width> &x)
5356  {
5357  // put values in an array and later read in that array with an unaligned
5358  // read. This should save some instructions as compared to directly
5359  // setting the individual elements and also circumvents a compiler
5360  // optimization bug in gcc-4.6 with SSE2 (see also deal.II developers list
5361  // from April 2014, topic "matrix_free/step-48 Test").
5363  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5364  ++i)
5365  values[i] = std::sin(x[i]);
5367  out.load(&values[0]);
5368  return out;
5369  }
5370 
5371 
5372 
5380  template <typename Number, std::size_t width>
5381  inline ::VectorizedArray<Number, width>
5382  cos(const ::VectorizedArray<Number, width> &x)
5383  {
5385  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5386  ++i)
5387  values[i] = std::cos(x[i]);
5389  out.load(&values[0]);
5390  return out;
5391  }
5392 
5393 
5394 
5402  template <typename Number, std::size_t width>
5403  inline ::VectorizedArray<Number, width>
5404  tan(const ::VectorizedArray<Number, width> &x)
5405  {
5407  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5408  ++i)
5409  values[i] = std::tan(x[i]);
5411  out.load(&values[0]);
5412  return out;
5413  }
5414 
5415 
5416 
5424  template <typename Number, std::size_t width>
5425  inline ::VectorizedArray<Number, width>
5426  exp(const ::VectorizedArray<Number, width> &x)
5427  {
5429  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5430  ++i)
5431  values[i] = std::exp(x[i]);
5433  out.load(&values[0]);
5434  return out;
5435  }
5436 
5437 
5438 
5446  template <typename Number, std::size_t width>
5447  inline ::VectorizedArray<Number, width>
5448  log(const ::VectorizedArray<Number, width> &x)
5449  {
5451  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5452  ++i)
5453  values[i] = std::log(x[i]);
5455  out.load(&values[0]);
5456  return out;
5457  }
5458 
5459 
5460 
5468  template <typename Number, std::size_t width>
5469  inline ::VectorizedArray<Number, width>
5470  sqrt(const ::VectorizedArray<Number, width> &x)
5471  {
5472  return x.get_sqrt();
5473  }
5474 
5475 
5476 
5484  template <typename Number, std::size_t width>
5485  inline ::VectorizedArray<Number, width>
5486  pow(const ::VectorizedArray<Number, width> &x, const Number p)
5487  {
5489  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5490  ++i)
5491  values[i] = std::pow(x[i], p);
5493  out.load(&values[0]);
5494  return out;
5495  }
5496 
5497 
5498 
5507  template <typename Number, std::size_t width>
5508  inline ::VectorizedArray<Number, width>
5509  pow(const ::VectorizedArray<Number, width> &x,
5510  const ::VectorizedArray<Number, width> &p)
5511  {
5513  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5514  ++i)
5515  values[i] = std::pow(x[i], p[i]);
5517  out.load(&values[0]);
5518  return out;
5519  }
5520 
5521 
5522 
5530  template <typename Number, std::size_t width>
5531  inline ::VectorizedArray<Number, width>
5532  abs(const ::VectorizedArray<Number, width> &x)
5533  {
5534  return x.get_abs();
5535  }
5536 
5537 
5538 
5546  template <typename Number, std::size_t width>
5547  inline ::VectorizedArray<Number, width>
5548  max(const ::VectorizedArray<Number, width> &x,
5549  const ::VectorizedArray<Number, width> &y)
5550  {
5551  return x.get_max(y);
5552  }
5553 
5554 
5555 
5563  template <typename Number, std::size_t width>
5564  inline ::VectorizedArray<Number, width>
5565  min(const ::VectorizedArray<Number, width> &x,
5566  const ::VectorizedArray<Number, width> &y)
5567  {
5568  return x.get_min(y);
5569  }
5570 
5571 
5572 
5576  template <class T>
5577  struct iterator_traits<::VectorizedArrayIterator<T>>
5578  {
5579  using iterator_category = random_access_iterator_tag;
5580  using value_type = typename T::value_type;
5581  using difference_type = std::ptrdiff_t;
5582  };
5583 
5584 } // namespace std
5585 
5586 #endif
inline ::VectorizedArray< Number, width > cos(const ::VectorizedArray< Number, width > &x)
Number compare_and_apply_mask(const Number &left, const Number &right, const Number &true_value, const Number &false_value)
::VectorizedArray< Number, width > log(const ::VectorizedArray< Number, width > &)
inline ::VectorizedArray< Number, width > exp(const ::VectorizedArray< Number, width > &x)
Point< dim, typename ProductType< Number, typename EnableIfScalar< OtherNumber >::type >::type > operator*(const OtherNumber factor, const Point< dim, Number > &p)
Definition: point.h:657
::VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &, const Number p)
VectorizedArray< Number, width > operator/(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
void gather(VectorizedArray< Number, width > &out, const std::array< Number *, width > &ptrs, const unsigned int offset)
::VectorizedArray< Number, width > cos(const ::VectorizedArray< Number, width > &)
VectorizedArray & operator+=(const VectorizedArray &vec)
VectorizedArray get_sqrt() const
::VectorizedArray< Number, width > tan(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &)
#define AssertIndexRange(index, range)
Definition: exceptions.h:1720
VectorizedArrayIterator< T > begin()
void streaming_store(Number *ptr) const
const unsigned int v0
Definition: grid_tools.cc:963
STL namespace.
VectorizedArray & operator*=(const VectorizedArray &vec)
const unsigned int v1
Definition: grid_tools.cc:963
std::enable_if<!std::is_same< U, const U >::value, typename T::value_type >::type & operator*()
__global__ void vec_add(Number *val, const Number a, const size_type N)
VectorizedArray< Number, width > operator-(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
void store(Number *ptr) const
VectorizedArray & operator-=(const VectorizedArray &vec)
VectorizedArrayIterator< T > operator+(const std::size_t &offset) const
VectorizedArrayIterator< T > & operator+=(const std::size_t offset)
void vectorized_load_and_transpose(const unsigned int n_entries, const Number *in, const unsigned int *offsets, VectorizedArray< Number, width > *out)
static ::ExceptionBase & ExcMessage(std::string arg1)
static const char T
void gather(const Number *base_ptr, const unsigned int *offsets)
VectorizedArrayIterator(T &data, const std::size_t lane)
inline ::VectorizedArray< Number, width > tan(const ::VectorizedArray< Number, width > &x)
#define Assert(cond, exc)
Definition: exceptions.h:1461
bool operator==(const VectorizedArrayIterator< T > &other) const
std::ptrdiff_t operator-(const VectorizedArrayIterator< T > &other) const
inline ::VectorizedArray< Number, width > log(const ::VectorizedArray< Number, width > &x)
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > exp(const ::VectorizedArray< Number, width > &)
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:407
void load(const Number *ptr)
VectorType::value_type * end(VectorType &V)
#define DEAL_II_ALWAYS_INLINE
Definition: config.h:101
bool operator!=(const VectorizedArrayIterator< T > &other) const
VectorizedArray get_min(const VectorizedArray &other) const
static constexpr std::size_t size()
VectorizedArray(const std::initializer_list< U > &list)
Expression fabs(const Expression &x)
VectorizedArray & operator=(const Number scalar)
VectorizedArray get_max(const VectorizedArray &other) const
SIMDComparison
VectorizedArrayIterator< T > & operator--()
VectorizedArrayIterator< T > end()
inline ::VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &x, const ::VectorizedArray< Number, width > &p)
VectorizedArray & operator/=(const VectorizedArray &vec)
VectorizedArray< Number, width > operator+(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)
const T::value_type & operator*() const
Number & operator[](const unsigned int comp)
VectorizedArrayBase(const std::initializer_list< U > &list)
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:406
VectorType::value_type * begin(VectorType &V)
VectorizedArrayIterator< const T > begin() const
VectorizedArray(const Number scalar)
VectorizedArrayIterator< const T > end() const
bool operator==(const VectorizedArray< Number, width > &lhs, const VectorizedArray< Number, width > &rhs)
VectorizedArray< Number, width > make_vectorized_array(const Number &u)
VectorizedArrayIterator< T > & operator++()
void vectorized_transpose_and_store(const bool add_into, const unsigned int n_entries, const VectorizedArray< Number, width > *in, const unsigned int *offsets, Number *out)
const Number & operator[](const unsigned int comp) const
VectorizedArray get_abs() const
void scatter(const unsigned int *offsets, Number *base_ptr) const
inline ::VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &x)