Reference documentation for deal.II version 9.2.0
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
vectorization.h
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2011 - 2020 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #ifndef dealii_vectorization_h
18 #define dealii_vectorization_h
19 
20 #include <deal.II/base/config.h>
21 
24 
25 #include <array>
26 #include <cmath>
27 
28 // Note:
29 // The flag DEAL_II_VECTORIZATION_WIDTH_IN_BITS is essentially constructed
30 // according to the following scheme (on x86-based architectures)
31 // #ifdef __AVX512F__
32 // #define DEAL_II_VECTORIZATION_WIDTH_IN_BITS 512
33 // #elif defined (__AVX__)
34 // #define DEAL_II_VECTORIZATION_WIDTH_IN_BITS 256
35 // #elif defined (__SSE2__)
36 // #define DEAL_II_VECTORIZATION_WIDTH_IN_BITS 128
37 // #else
38 // #define DEAL_II_VECTORIZATION_WIDTH_IN_BITS 0
39 // #endif
40 // In addition to checking the flags __AVX512F__, __AVX__ and __SSE2__, a CMake
41 // test, 'check_01_cpu_features.cmake', ensures that these feature are not only
42 // present in the compilation unit but also working properly.
43 
44 #if DEAL_II_VECTORIZATION_WIDTH_IN_BITS > 0
45 
46 // These error messages try to detect the case that deal.II was compiled with
47 // a wider instruction set extension as the current compilation unit, for
48 // example because deal.II was compiled with AVX, but a user project does not
49 // add -march=native or similar flags, making it fall to SSE2. This leads to
50 // very strange errors as the size of data structures differs between the
51 // compiled deal.II code sitting in libdeal_II.so and the user code if not
52 // detected.
53 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && !defined(__AVX__)
54 # error \
55  "Mismatch in vectorization capabilities: AVX was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native."
56 # endif
57 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && !defined(__AVX512F__)
58 # error \
59  "Mismatch in vectorization capabilities: AVX-512F was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native."
60 # endif
61 
62 # if defined(_MSC_VER)
63 # include <intrin.h>
64 # elif defined(__ALTIVEC__)
65 # include <altivec.h>
66 
67 // altivec.h defines vector, pixel, bool, but we do not use them, so undefine
68 // them before they make trouble
69 # undef vector
70 # undef pixel
71 # undef bool
72 # else
73 # include <x86intrin.h>
74 # endif
75 
76 #endif
77 
78 
80 
81 
82 // Enable the EnableIfScalar type trait for VectorizedArray<Number> such
83 // that it can be used as a Number type in Tensor<rank,dim,Number>, etc.
84 
85 template <typename Number, std::size_t width>
86 struct EnableIfScalar<VectorizedArray<Number, width>>
87 {
89 };
90 
91 
92 
98 template <typename T>
100 {
101 public:
108  VectorizedArrayIterator(T &data, const std::size_t lane)
109  : data(&data)
110  , lane(lane)
111  {}
112 
116  bool
118  {
119  Assert(this->data == other.data,
120  ExcMessage(
121  "You are trying to compare iterators into different arrays."));
122  return this->lane == other.lane;
123  }
124 
128  bool
130  {
131  Assert(this->data == other.data,
132  ExcMessage(
133  "You are trying to compare iterators into different arrays."));
134  return this->lane != other.lane;
135  }
136 
141  operator=(const VectorizedArrayIterator<T> &other) = default;
142 
147  const typename T::value_type &operator*() const
148  {
149  AssertIndexRange(lane, T::size());
150  return (*data)[lane];
151  }
152 
153 
158  template <typename U = T>
160  typename T::value_type>::type &
162  {
163  AssertIndexRange(lane, T::size());
164  return (*data)[lane];
165  }
166 
174  {
175  AssertIndexRange(lane + 1, T::size() + 1);
176  lane++;
177  return *this;
178  }
179 
185  operator+=(const std::size_t offset)
186  {
187  AssertIndexRange(lane + offset, T::size() + 1);
188  lane += offset;
189  return *this;
190  }
191 
199  {
200  Assert(
201  lane > 0,
202  ExcMessage(
203  "You can't decrement an iterator that is already at the beginning of the range."));
204  --lane;
205  return *this;
206  }
207 
212  operator+(const std::size_t &offset) const
213  {
214  AssertIndexRange(lane + offset, T::size() + 1);
215  return VectorizedArrayIterator<T>(*data, lane + offset);
216  }
217 
221  std::ptrdiff_t
223  {
224  return static_cast<std::ptrdiff_t>(lane) -
225  static_cast<ptrdiff_t>(other.lane);
226  }
227 
228 private:
232  T *data;
233 
237  std::size_t lane;
238 };
239 
240 
241 
253 template <typename T, std::size_t width>
255 {
256 public:
260  static constexpr std::size_t
262  {
263  return width;
264  }
265 
271  {
272  return VectorizedArrayIterator<T>(static_cast<T &>(*this), 0);
273  }
274 
280  begin() const
281  {
282  return VectorizedArrayIterator<const T>(static_cast<const T &>(*this), 0);
283  }
284 
289  end()
290  {
291  return VectorizedArrayIterator<T>(static_cast<T &>(*this), width);
292  }
293 
299  end() const
300  {
301  return VectorizedArrayIterator<const T>(static_cast<const T &>(*this),
302  width);
303  }
304 };
305 
306 
307 
394 template <typename Number, std::size_t width>
396  : public VectorizedArrayBase<VectorizedArray<Number, width>, 1>
397 {
398 public:
402  using value_type = Number;
403 
411  DEAL_II_DEPRECATED static const unsigned int n_array_elements = 1;
412 
413  static_assert(width == 1,
414  "You specified an illegal width that is not supported.");
415 
420  VectorizedArray() = default;
421 
425  VectorizedArray(const Number scalar)
426  {
427  this->operator=(scalar);
428  }
429 
435  operator=(const Number scalar)
436  {
437  data = scalar;
438  return *this;
439  }
440 
446  Number &operator[](const unsigned int comp)
447  {
448  (void)comp;
449  AssertIndexRange(comp, 1);
450  return data;
451  }
452 
458  const Number &operator[](const unsigned int comp) const
459  {
460  (void)comp;
461  AssertIndexRange(comp, 1);
462  return data;
463  }
464 
471  {
472  data += vec.data;
473  return *this;
474  }
475 
482  {
483  data -= vec.data;
484  return *this;
485  }
486 
493  {
494  data *= vec.data;
495  return *this;
496  }
497 
504  {
505  data /= vec.data;
506  return *this;
507  }
508 
516  void
517  load(const Number *ptr)
518  {
519  data = *ptr;
520  }
521 
529  void
530  store(Number *ptr) const
531  {
532  *ptr = data;
533  }
534 
582  void
583  streaming_store(Number *ptr) const
584  {
585  *ptr = data;
586  }
587 
601  void
602  gather(const Number *base_ptr, const unsigned int *offsets)
603  {
604  data = base_ptr[offsets[0]];
605  }
606 
620  void
621  scatter(const unsigned int *offsets, Number *base_ptr) const
622  {
623  base_ptr[offsets[0]] = data;
624  }
625 
631  Number data;
632 
633 private:
640  get_sqrt() const
641  {
642  VectorizedArray res;
643  res.data = std::sqrt(data);
644  return res;
645  }
646 
653  get_abs() const
654  {
655  VectorizedArray res;
656  res.data = std::fabs(data);
657  return res;
658  }
659 
666  get_max(const VectorizedArray &other) const
667  {
668  VectorizedArray res;
669  res.data = std::max(data, other.data);
670  return res;
671  }
672 
679  get_min(const VectorizedArray &other) const
680  {
681  VectorizedArray res;
682  res.data = std::min(data, other.data);
683  return res;
684  }
685 
686  // Make a few functions friends.
687  template <typename Number2, std::size_t width2>
689  std::sqrt(const VectorizedArray<Number2, width2> &);
690  template <typename Number2, std::size_t width2>
692  std::abs(const VectorizedArray<Number2, width2> &);
693  template <typename Number2, std::size_t width2>
695  std::max(const VectorizedArray<Number2, width2> &,
697  template <typename Number2, std::size_t width2>
699  std::min(const VectorizedArray<Number2, width2> &,
701 };
702 
703 
704 
705 // We need to have a separate declaration for static const members
706 template <typename Number, std::size_t width>
708 
709 
710 
715 
716 
723 template <typename Number,
724  std::size_t width =
727  make_vectorized_array(const Number &u)
728 {
730  return result;
731 }
732 
733 
734 
741 template <typename VectorizedArrayType>
742 inline DEAL_II_ALWAYS_INLINE VectorizedArrayType
743  make_vectorized_array(const typename VectorizedArrayType::value_type &u)
744 {
745  static_assert(
746  std::is_same<VectorizedArrayType,
747  VectorizedArray<typename VectorizedArrayType::value_type,
748  VectorizedArrayType::size()>>::value,
749  "VectorizedArrayType is not a VectorizedArray.");
750 
751  VectorizedArrayType result = u;
752  return result;
753 }
754 
755 
756 
768 template <typename Number, std::size_t width>
769 inline DEAL_II_ALWAYS_INLINE void
771  const std::array<Number *, width> &ptrs,
772  const unsigned int offset)
773 {
774  for (unsigned int v = 0; v < width; v++)
775  out.data[v] = ptrs[v][offset];
776 }
777 
778 
779 
805 template <typename Number, std::size_t width>
806 inline DEAL_II_ALWAYS_INLINE void
807 vectorized_load_and_transpose(const unsigned int n_entries,
808  const Number * in,
809  const unsigned int * offsets,
811 {
812  for (unsigned int i = 0; i < n_entries; ++i)
813  for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
814  out[i][v] = in[offsets[v] + i];
815 }
816 
817 
829 template <typename Number, std::size_t width>
830 inline DEAL_II_ALWAYS_INLINE void
831 vectorized_load_and_transpose(const unsigned int n_entries,
832  const std::array<Number *, width> &in,
834 {
835  for (unsigned int i = 0; i < n_entries; ++i)
836  for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
837  out[i][v] = in[v][i];
838 }
839 
840 
841 
880 template <typename Number, std::size_t width>
881 inline DEAL_II_ALWAYS_INLINE void
882 vectorized_transpose_and_store(const bool add_into,
883  const unsigned int n_entries,
885  const unsigned int * offsets,
886  Number * out)
887 {
888  if (add_into)
889  for (unsigned int i = 0; i < n_entries; ++i)
890  for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
891  out[offsets[v] + i] += in[i][v];
892  else
893  for (unsigned int i = 0; i < n_entries; ++i)
894  for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
895  out[offsets[v] + i] = in[i][v];
896 }
897 
898 
910 template <typename Number, std::size_t width>
911 inline DEAL_II_ALWAYS_INLINE void
912 vectorized_transpose_and_store(const bool add_into,
913  const unsigned int n_entries,
915  std::array<Number *, width> & out)
916 {
917  if (add_into)
918  for (unsigned int i = 0; i < n_entries; ++i)
919  for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
920  out[v][i] += in[i][v];
921  else
922  for (unsigned int i = 0; i < n_entries; ++i)
923  for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
924  out[v][i] = in[i][v];
925 }
926 
927 
929 
930 #ifndef DOXYGEN
931 
932 // for safety, also check that __AVX512F__ is defined in case the user manually
933 // set some conflicting compile flags which prevent compilation
934 
935 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && defined(__AVX512F__)
936 
940 template <>
941 class VectorizedArray<double, 8>
942  : public VectorizedArrayBase<VectorizedArray<double, 8>, 8>
943 {
944 public:
948  using value_type = double;
949 
955  DEAL_II_DEPRECATED static const unsigned int n_array_elements = 8;
956 
961  VectorizedArray() = default;
962 
966  VectorizedArray(const double scalar)
967  {
968  this->operator=(scalar);
969  }
970 
976  operator=(const double x)
977  {
978  data = _mm512_set1_pd(x);
979  return *this;
980  }
981 
986  double &operator[](const unsigned int comp)
987  {
988  AssertIndexRange(comp, 8);
989  return *(reinterpret_cast<double *>(&data) + comp);
990  }
991 
996  const double &operator[](const unsigned int comp) const
997  {
998  AssertIndexRange(comp, 8);
999  return *(reinterpret_cast<const double *>(&data) + comp);
1000  }
1001 
1006  VectorizedArray &
1007  operator+=(const VectorizedArray &vec)
1008  {
1009  // if the compiler supports vector arithmetic, we can simply use +=
1010  // operator on the given data type. this allows the compiler to combine
1011  // additions with multiplication (fused multiply-add) if those
1012  // instructions are available. Otherwise, we need to use the built-in
1013  // intrinsic command for __m512d
1014 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1015  data += vec.data;
1016 # else
1017  data = _mm512_add_pd(data, vec.data);
1018 # endif
1019  return *this;
1020  }
1021 
1026  VectorizedArray &
1027  operator-=(const VectorizedArray &vec)
1028  {
1029 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1030  data -= vec.data;
1031 # else
1032  data = _mm512_sub_pd(data, vec.data);
1033 # endif
1034  return *this;
1035  }
1040  VectorizedArray &
1041  operator*=(const VectorizedArray &vec)
1042  {
1043 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1044  data *= vec.data;
1045 # else
1046  data = _mm512_mul_pd(data, vec.data);
1047 # endif
1048  return *this;
1049  }
1050 
1055  VectorizedArray &
1056  operator/=(const VectorizedArray &vec)
1057  {
1058 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1059  data /= vec.data;
1060 # else
1061  data = _mm512_div_pd(data, vec.data);
1062 # endif
1063  return *this;
1064  }
1065 
1072  void
1073  load(const double *ptr)
1074  {
1075  data = _mm512_loadu_pd(ptr);
1076  }
1077 
1085  void
1086  store(double *ptr) const
1087  {
1088  _mm512_storeu_pd(ptr, data);
1089  }
1090 
1095  void
1096  streaming_store(double *ptr) const
1097  {
1098  Assert(reinterpret_cast<std::size_t>(ptr) % 64 == 0,
1099  ExcMessage("Memory not aligned"));
1100  _mm512_stream_pd(ptr, data);
1101  }
1102 
1116  void
1117  gather(const double *base_ptr, const unsigned int *offsets)
1118  {
1119  // unfortunately, there does not appear to be a 256 bit integer load, so
1120  // do it by some reinterpret casts here. this is allowed because the Intel
1121  // API allows aliasing between different vector types.
1122  const __m256 index_val =
1123  _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
1124  const __m256i index = *reinterpret_cast<const __m256i *>(&index_val);
1125  data = _mm512_i32gather_pd(index, base_ptr, 8);
1126  }
1127 
1141  void
1142  scatter(const unsigned int *offsets, double *base_ptr) const
1143  {
1144  for (unsigned int i = 0; i < 8; ++i)
1145  for (unsigned int j = i + 1; j < 8; ++j)
1146  Assert(offsets[i] != offsets[j],
1147  ExcMessage("Result of scatter undefined if two offset elements"
1148  " point to the same position"));
1149 
1150  // unfortunately, there does not appear to be a 256 bit integer load, so
1151  // do it by some reinterpret casts here. this is allowed because the Intel
1152  // API allows aliasing between different vector types.
1153  const __m256 index_val =
1154  _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
1155  const __m256i index = *reinterpret_cast<const __m256i *>(&index_val);
1156  _mm512_i32scatter_pd(base_ptr, index, data, 8);
1157  }
1158 
1164  __m512d data;
1165 
1166 private:
1173  get_sqrt() const
1174  {
1175  VectorizedArray res;
1176  res.data = _mm512_sqrt_pd(data);
1177  return res;
1178  }
1179 
1186  get_abs() const
1187  {
1188  // to compute the absolute value, perform bitwise andnot with -0. This
1189  // will leave all value and exponent bits unchanged but force the sign
1190  // value to +. Since there is no andnot for AVX512, we interpret the data
1191  // as 64 bit integers and do the andnot on those types (note that andnot
1192  // is a bitwise operation so the data type does not matter)
1193  __m512d mask = _mm512_set1_pd(-0.);
1194  VectorizedArray res;
1195  res.data = reinterpret_cast<__m512d>(
1196  _mm512_andnot_epi64(reinterpret_cast<__m512i>(mask),
1197  reinterpret_cast<__m512i>(data)));
1198  return res;
1199  }
1200 
1207  get_max(const VectorizedArray &other) const
1208  {
1209  VectorizedArray res;
1210  res.data = _mm512_max_pd(data, other.data);
1211  return res;
1212  }
1213 
1220  get_min(const VectorizedArray &other) const
1221  {
1222  VectorizedArray res;
1223  res.data = _mm512_min_pd(data, other.data);
1224  return res;
1225  }
1226 
1227  // Make a few functions friends.
1228  template <typename Number2, std::size_t width2>
1231  template <typename Number2, std::size_t width2>
1234  template <typename Number2, std::size_t width2>
1238  template <typename Number2, std::size_t width2>
1242 };
1243 
1244 
1245 
1249 template <>
1250 inline DEAL_II_ALWAYS_INLINE void
1251 vectorized_load_and_transpose(const unsigned int n_entries,
1252  const double * in,
1253  const unsigned int * offsets,
1255 {
1256  // do not do full transpose because the code is long and will most
1257  // likely not pay off because many processors have two load units
1258  // (for the top 8 instructions) but only 1 permute unit (for the 8
1259  // shuffle/unpack instructions). rather start the transposition on the
1260  // vectorized array of half the size with 256 bits
1261  const unsigned int n_chunks = n_entries / 4;
1262  for (unsigned int i = 0; i < n_chunks; ++i)
1263  {
1264  __m512d t0, t1, t2, t3 = {};
1265 
1266  t0 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[0] + 4 * i), 0);
1267  t0 = _mm512_insertf64x4(t0, _mm256_loadu_pd(in + offsets[2] + 4 * i), 1);
1268  t1 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[1] + 4 * i), 0);
1269  t1 = _mm512_insertf64x4(t1, _mm256_loadu_pd(in + offsets[3] + 4 * i), 1);
1270  t2 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[4] + 4 * i), 0);
1271  t2 = _mm512_insertf64x4(t2, _mm256_loadu_pd(in + offsets[6] + 4 * i), 1);
1272  t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[5] + 4 * i), 0);
1273  t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[7] + 4 * i), 1);
1274 
1275  __m512d v0 = _mm512_shuffle_f64x2(t0, t2, 0x88);
1276  __m512d v1 = _mm512_shuffle_f64x2(t0, t2, 0xdd);
1277  __m512d v2 = _mm512_shuffle_f64x2(t1, t3, 0x88);
1278  __m512d v3 = _mm512_shuffle_f64x2(t1, t3, 0xdd);
1279  out[4 * i + 0].data = _mm512_unpacklo_pd(v0, v2);
1280  out[4 * i + 1].data = _mm512_unpackhi_pd(v0, v2);
1281  out[4 * i + 2].data = _mm512_unpacklo_pd(v1, v3);
1282  out[4 * i + 3].data = _mm512_unpackhi_pd(v1, v3);
1283  }
1284  // remainder loop of work that does not divide by 4
1285  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1286  out[i].gather(in + i, offsets);
1287 }
1288 
1289 
1290 
1294 template <>
1295 inline DEAL_II_ALWAYS_INLINE void
1296 vectorized_load_and_transpose(const unsigned int n_entries,
1297  const std::array<double *, 8> &in,
1299 {
1300  const unsigned int n_chunks = n_entries / 4;
1301  for (unsigned int i = 0; i < n_chunks; ++i)
1302  {
1303  __m512d t0, t1, t2, t3 = {};
1304 
1305  t0 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[0] + 4 * i), 0);
1306  t0 = _mm512_insertf64x4(t0, _mm256_loadu_pd(in[2] + 4 * i), 1);
1307  t1 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[1] + 4 * i), 0);
1308  t1 = _mm512_insertf64x4(t1, _mm256_loadu_pd(in[3] + 4 * i), 1);
1309  t2 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[4] + 4 * i), 0);
1310  t2 = _mm512_insertf64x4(t2, _mm256_loadu_pd(in[6] + 4 * i), 1);
1311  t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[5] + 4 * i), 0);
1312  t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[7] + 4 * i), 1);
1313 
1314  __m512d v0 = _mm512_shuffle_f64x2(t0, t2, 0x88);
1315  __m512d v1 = _mm512_shuffle_f64x2(t0, t2, 0xdd);
1316  __m512d v2 = _mm512_shuffle_f64x2(t1, t3, 0x88);
1317  __m512d v3 = _mm512_shuffle_f64x2(t1, t3, 0xdd);
1318  out[4 * i + 0].data = _mm512_unpacklo_pd(v0, v2);
1319  out[4 * i + 1].data = _mm512_unpackhi_pd(v0, v2);
1320  out[4 * i + 2].data = _mm512_unpacklo_pd(v1, v3);
1321  out[4 * i + 3].data = _mm512_unpackhi_pd(v1, v3);
1322  }
1323 
1324  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1325  gather(out[i], in, i);
1326 }
1327 
1328 
1329 
1333 template <>
1334 inline DEAL_II_ALWAYS_INLINE void
1335 vectorized_transpose_and_store(const bool add_into,
1336  const unsigned int n_entries,
1337  const VectorizedArray<double, 8> *in,
1338  const unsigned int * offsets,
1339  double * out)
1340 {
1341  // as for the load, we split the store operations into 256 bit units to
1342  // better balance between code size, shuffle instructions, and stores
1343  const unsigned int n_chunks = n_entries / 4;
1344  __m512i mask1 = _mm512_set_epi64(0xd, 0xc, 0x5, 0x4, 0x9, 0x8, 0x1, 0x0);
1345  __m512i mask2 = _mm512_set_epi64(0xf, 0xe, 0x7, 0x6, 0xb, 0xa, 0x3, 0x2);
1346  for (unsigned int i = 0; i < n_chunks; ++i)
1347  {
1348  __m512d t0 = _mm512_unpacklo_pd(in[i * 4].data, in[i * 4 + 1].data);
1349  __m512d t1 = _mm512_unpackhi_pd(in[i * 4].data, in[i * 4 + 1].data);
1350  __m512d t2 = _mm512_unpacklo_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
1351  __m512d t3 = _mm512_unpackhi_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
1352  __m512d v0 = _mm512_permutex2var_pd(t0, mask1, t2);
1353  __m512d v1 = _mm512_permutex2var_pd(t0, mask2, t2);
1354  __m512d v2 = _mm512_permutex2var_pd(t1, mask1, t3);
1355  __m512d v3 = _mm512_permutex2var_pd(t1, mask2, t3);
1356  __m256d res0 = _mm512_extractf64x4_pd(v0, 0);
1357  __m256d res4 = _mm512_extractf64x4_pd(v0, 1);
1358  __m256d res1 = _mm512_extractf64x4_pd(v2, 0);
1359  __m256d res5 = _mm512_extractf64x4_pd(v2, 1);
1360  __m256d res2 = _mm512_extractf64x4_pd(v1, 0);
1361  __m256d res6 = _mm512_extractf64x4_pd(v1, 1);
1362  __m256d res3 = _mm512_extractf64x4_pd(v3, 0);
1363  __m256d res7 = _mm512_extractf64x4_pd(v3, 1);
1364 
1365  // Cannot use the same store instructions in both paths of the 'if'
1366  // because the compiler cannot know that there is no aliasing
1367  // between pointers
1368  if (add_into)
1369  {
1370  res0 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[0]), res0);
1371  _mm256_storeu_pd(out + 4 * i + offsets[0], res0);
1372  res1 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[1]), res1);
1373  _mm256_storeu_pd(out + 4 * i + offsets[1], res1);
1374  res2 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[2]), res2);
1375  _mm256_storeu_pd(out + 4 * i + offsets[2], res2);
1376  res3 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[3]), res3);
1377  _mm256_storeu_pd(out + 4 * i + offsets[3], res3);
1378  res4 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[4]), res4);
1379  _mm256_storeu_pd(out + 4 * i + offsets[4], res4);
1380  res5 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[5]), res5);
1381  _mm256_storeu_pd(out + 4 * i + offsets[5], res5);
1382  res6 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[6]), res6);
1383  _mm256_storeu_pd(out + 4 * i + offsets[6], res6);
1384  res7 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[7]), res7);
1385  _mm256_storeu_pd(out + 4 * i + offsets[7], res7);
1386  }
1387  else
1388  {
1389  _mm256_storeu_pd(out + 4 * i + offsets[0], res0);
1390  _mm256_storeu_pd(out + 4 * i + offsets[1], res1);
1391  _mm256_storeu_pd(out + 4 * i + offsets[2], res2);
1392  _mm256_storeu_pd(out + 4 * i + offsets[3], res3);
1393  _mm256_storeu_pd(out + 4 * i + offsets[4], res4);
1394  _mm256_storeu_pd(out + 4 * i + offsets[5], res5);
1395  _mm256_storeu_pd(out + 4 * i + offsets[6], res6);
1396  _mm256_storeu_pd(out + 4 * i + offsets[7], res7);
1397  }
1398  }
1399 
1400  // remainder loop of work that does not divide by 4
1401  if (add_into)
1402  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1403  for (unsigned int v = 0; v < 8; ++v)
1404  out[offsets[v] + i] += in[i][v];
1405  else
1406  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1407  for (unsigned int v = 0; v < 8; ++v)
1408  out[offsets[v] + i] = in[i][v];
1409 }
1410 
1411 
1412 
1416 template <>
1417 inline DEAL_II_ALWAYS_INLINE void
1418 vectorized_transpose_and_store(const bool add_into,
1419  const unsigned int n_entries,
1420  const VectorizedArray<double, 8> *in,
1421  std::array<double *, 8> & out)
1422 {
1423  // see the comments in the vectorized_transpose_and_store above
1424 
1425  const unsigned int n_chunks = n_entries / 4;
1426  __m512i mask1 = _mm512_set_epi64(0xd, 0xc, 0x5, 0x4, 0x9, 0x8, 0x1, 0x0);
1427  __m512i mask2 = _mm512_set_epi64(0xf, 0xe, 0x7, 0x6, 0xb, 0xa, 0x3, 0x2);
1428  for (unsigned int i = 0; i < n_chunks; ++i)
1429  {
1430  __m512d t0 = _mm512_unpacklo_pd(in[i * 4].data, in[i * 4 + 1].data);
1431  __m512d t1 = _mm512_unpackhi_pd(in[i * 4].data, in[i * 4 + 1].data);
1432  __m512d t2 = _mm512_unpacklo_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
1433  __m512d t3 = _mm512_unpackhi_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
1434  __m512d v0 = _mm512_permutex2var_pd(t0, mask1, t2);
1435  __m512d v1 = _mm512_permutex2var_pd(t0, mask2, t2);
1436  __m512d v2 = _mm512_permutex2var_pd(t1, mask1, t3);
1437  __m512d v3 = _mm512_permutex2var_pd(t1, mask2, t3);
1438  __m256d res0 = _mm512_extractf64x4_pd(v0, 0);
1439  __m256d res4 = _mm512_extractf64x4_pd(v0, 1);
1440  __m256d res1 = _mm512_extractf64x4_pd(v2, 0);
1441  __m256d res5 = _mm512_extractf64x4_pd(v2, 1);
1442  __m256d res2 = _mm512_extractf64x4_pd(v1, 0);
1443  __m256d res6 = _mm512_extractf64x4_pd(v1, 1);
1444  __m256d res3 = _mm512_extractf64x4_pd(v3, 0);
1445  __m256d res7 = _mm512_extractf64x4_pd(v3, 1);
1446 
1447  if (add_into)
1448  {
1449  res0 = _mm256_add_pd(_mm256_loadu_pd(out[0] + 4 * i), res0);
1450  _mm256_storeu_pd(out[0] + 4 * i, res0);
1451  res1 = _mm256_add_pd(_mm256_loadu_pd(out[1] + 4 * i), res1);
1452  _mm256_storeu_pd(out[1] + 4 * i, res1);
1453  res2 = _mm256_add_pd(_mm256_loadu_pd(out[2] + 4 * i), res2);
1454  _mm256_storeu_pd(out[2] + 4 * i, res2);
1455  res3 = _mm256_add_pd(_mm256_loadu_pd(out[3] + 4 * i), res3);
1456  _mm256_storeu_pd(out[3] + 4 * i, res3);
1457  res4 = _mm256_add_pd(_mm256_loadu_pd(out[4] + 4 * i), res4);
1458  _mm256_storeu_pd(out[4] + 4 * i, res4);
1459  res5 = _mm256_add_pd(_mm256_loadu_pd(out[5] + 4 * i), res5);
1460  _mm256_storeu_pd(out[5] + 4 * i, res5);
1461  res6 = _mm256_add_pd(_mm256_loadu_pd(out[6] + 4 * i), res6);
1462  _mm256_storeu_pd(out[6] + 4 * i, res6);
1463  res7 = _mm256_add_pd(_mm256_loadu_pd(out[7] + 4 * i), res7);
1464  _mm256_storeu_pd(out[7] + 4 * i, res7);
1465  }
1466  else
1467  {
1468  _mm256_storeu_pd(out[0] + 4 * i, res0);
1469  _mm256_storeu_pd(out[1] + 4 * i, res1);
1470  _mm256_storeu_pd(out[2] + 4 * i, res2);
1471  _mm256_storeu_pd(out[3] + 4 * i, res3);
1472  _mm256_storeu_pd(out[4] + 4 * i, res4);
1473  _mm256_storeu_pd(out[5] + 4 * i, res5);
1474  _mm256_storeu_pd(out[6] + 4 * i, res6);
1475  _mm256_storeu_pd(out[7] + 4 * i, res7);
1476  }
1477  }
1478 
1479  if (add_into)
1480  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1481  for (unsigned int v = 0; v < 8; ++v)
1482  out[v][i] += in[i][v];
1483  else
1484  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1485  for (unsigned int v = 0; v < 8; ++v)
1486  out[v][i] = in[i][v];
1487 }
1488 
1489 
1490 
1494 template <>
1495 class VectorizedArray<float, 16>
1496  : public VectorizedArrayBase<VectorizedArray<float, 16>, 16>
1497 {
1498 public:
1502  using value_type = float;
1503 
1509  DEAL_II_DEPRECATED static const unsigned int n_array_elements = 16;
1510 
1515  VectorizedArray() = default;
1516 
1520  VectorizedArray(const float scalar)
1521  {
1522  this->operator=(scalar);
1523  }
1524 
1529  VectorizedArray &
1530  operator=(const float x)
1531  {
1532  data = _mm512_set1_ps(x);
1533  return *this;
1534  }
1535 
1540  float &operator[](const unsigned int comp)
1541  {
1542  AssertIndexRange(comp, 16);
1543  return *(reinterpret_cast<float *>(&data) + comp);
1544  }
1545 
1550  const float &operator[](const unsigned int comp) const
1551  {
1552  AssertIndexRange(comp, 16);
1553  return *(reinterpret_cast<const float *>(&data) + comp);
1554  }
1555 
1560  VectorizedArray &
1561  operator+=(const VectorizedArray &vec)
1562  {
1563  // if the compiler supports vector arithmetic, we can simply use +=
1564  // operator on the given data type. this allows the compiler to combine
1565  // additions with multiplication (fused multiply-add) if those
1566  // instructions are available. Otherwise, we need to use the built-in
1567  // intrinsic command for __m512d
1568 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1569  data += vec.data;
1570 # else
1571  data = _mm512_add_ps(data, vec.data);
1572 # endif
1573  return *this;
1574  }
1575 
1580  VectorizedArray &
1581  operator-=(const VectorizedArray &vec)
1582  {
1583 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1584  data -= vec.data;
1585 # else
1586  data = _mm512_sub_ps(data, vec.data);
1587 # endif
1588  return *this;
1589  }
1594  VectorizedArray &
1595  operator*=(const VectorizedArray &vec)
1596  {
1597 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1598  data *= vec.data;
1599 # else
1600  data = _mm512_mul_ps(data, vec.data);
1601 # endif
1602  return *this;
1603  }
1604 
1609  VectorizedArray &
1610  operator/=(const VectorizedArray &vec)
1611  {
1612 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1613  data /= vec.data;
1614 # else
1615  data = _mm512_div_ps(data, vec.data);
1616 # endif
1617  return *this;
1618  }
1619 
1626  void
1627  load(const float *ptr)
1628  {
1629  data = _mm512_loadu_ps(ptr);
1630  }
1631 
1639  void
1640  store(float *ptr) const
1641  {
1642  _mm512_storeu_ps(ptr, data);
1643  }
1644 
1649  void
1650  streaming_store(float *ptr) const
1651  {
1652  Assert(reinterpret_cast<std::size_t>(ptr) % 64 == 0,
1653  ExcMessage("Memory not aligned"));
1654  _mm512_stream_ps(ptr, data);
1655  }
1656 
1670  void
1671  gather(const float *base_ptr, const unsigned int *offsets)
1672  {
1673  // unfortunately, there does not appear to be a 512 bit integer load, so
1674  // do it by some reinterpret casts here. this is allowed because the Intel
1675  // API allows aliasing between different vector types.
1676  const __m512 index_val =
1677  _mm512_loadu_ps(reinterpret_cast<const float *>(offsets));
1678  const __m512i index = *reinterpret_cast<const __m512i *>(&index_val);
1679  data = _mm512_i32gather_ps(index, base_ptr, 4);
1680  }
1681 
1695  void
1696  scatter(const unsigned int *offsets, float *base_ptr) const
1697  {
1698  for (unsigned int i = 0; i < 16; ++i)
1699  for (unsigned int j = i + 1; j < 16; ++j)
1700  Assert(offsets[i] != offsets[j],
1701  ExcMessage("Result of scatter undefined if two offset elements"
1702  " point to the same position"));
1703 
1704  // unfortunately, there does not appear to be a 512 bit integer load, so
1705  // do it by some reinterpret casts here. this is allowed because the Intel
1706  // API allows aliasing between different vector types.
1707  const __m512 index_val =
1708  _mm512_loadu_ps(reinterpret_cast<const float *>(offsets));
1709  const __m512i index = *reinterpret_cast<const __m512i *>(&index_val);
1710  _mm512_i32scatter_ps(base_ptr, index, data, 4);
1711  }
1712 
1718  __m512 data;
1719 
1720 private:
1727  get_sqrt() const
1728  {
1729  VectorizedArray res;
1730  res.data = _mm512_sqrt_ps(data);
1731  return res;
1732  }
1733 
1740  get_abs() const
1741  {
1742  // to compute the absolute value, perform bitwise andnot with -0. This
1743  // will leave all value and exponent bits unchanged but force the sign
1744  // value to +. Since there is no andnot for AVX512, we interpret the data
1745  // as 32 bit integers and do the andnot on those types (note that andnot
1746  // is a bitwise operation so the data type does not matter)
1747  __m512 mask = _mm512_set1_ps(-0.f);
1748  VectorizedArray res;
1749  res.data = reinterpret_cast<__m512>(
1750  _mm512_andnot_epi32(reinterpret_cast<__m512i>(mask),
1751  reinterpret_cast<__m512i>(data)));
1752  return res;
1753  }
1754 
1761  get_max(const VectorizedArray &other) const
1762  {
1763  VectorizedArray res;
1764  res.data = _mm512_max_ps(data, other.data);
1765  return res;
1766  }
1767 
1774  get_min(const VectorizedArray &other) const
1775  {
1776  VectorizedArray res;
1777  res.data = _mm512_min_ps(data, other.data);
1778  return res;
1779  }
1780 
1781  // Make a few functions friends.
1782  template <typename Number2, std::size_t width2>
1785  template <typename Number2, std::size_t width2>
1788  template <typename Number2, std::size_t width2>
1792  template <typename Number2, std::size_t width2>
1796 };
1797 
1798 
1799 
1803 template <>
1804 inline DEAL_II_ALWAYS_INLINE void
1805 vectorized_load_and_transpose(const unsigned int n_entries,
1806  const float * in,
1807  const unsigned int * offsets,
1809 {
1810  // Similar to the double case, we perform the work on smaller entities. In
1811  // this case, we start from 128 bit arrays and insert them into a full 512
1812  // bit index. This reduces the code size and register pressure because we do
1813  // shuffles on 4 numbers rather than 16.
1814  const unsigned int n_chunks = n_entries / 4;
1815 
1816  // To avoid warnings about uninitialized variables, need to initialize one
1817  // variable to a pre-exisiting value in out, which will never get used in
1818  // the end. Keep the initialization outside the loop because of a bug in
1819  // gcc-9.1 which generates a "vmovapd" instruction instead of "vmovupd" in
1820  // case t3 is initialized to zero (inside/outside of loop), see
1821  // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90991
1822  __m512 t0, t1, t2, t3;
1823  if (n_chunks > 0)
1824  t3 = out[0].data;
1825  for (unsigned int i = 0; i < n_chunks; ++i)
1826  {
1827  t0 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[0] + 4 * i), 0);
1828  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[4] + 4 * i), 1);
1829  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[8] + 4 * i), 2);
1830  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[12] + 4 * i), 3);
1831  t1 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[1] + 4 * i), 0);
1832  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[5] + 4 * i), 1);
1833  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[9] + 4 * i), 2);
1834  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[13] + 4 * i), 3);
1835  t2 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[2] + 4 * i), 0);
1836  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[6] + 4 * i), 1);
1837  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[10] + 4 * i), 2);
1838  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[14] + 4 * i), 3);
1839  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[3] + 4 * i), 0);
1840  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[7] + 4 * i), 1);
1841  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[11] + 4 * i), 2);
1842  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[15] + 4 * i), 3);
1843 
1844  __m512 v0 = _mm512_shuffle_ps(t0, t1, 0x44);
1845  __m512 v1 = _mm512_shuffle_ps(t0, t1, 0xee);
1846  __m512 v2 = _mm512_shuffle_ps(t2, t3, 0x44);
1847  __m512 v3 = _mm512_shuffle_ps(t2, t3, 0xee);
1848 
1849  out[4 * i + 0].data = _mm512_shuffle_ps(v0, v2, 0x88);
1850  out[4 * i + 1].data = _mm512_shuffle_ps(v0, v2, 0xdd);
1851  out[4 * i + 2].data = _mm512_shuffle_ps(v1, v3, 0x88);
1852  out[4 * i + 3].data = _mm512_shuffle_ps(v1, v3, 0xdd);
1853  }
1854 
1855  // remainder loop of work that does not divide by 4
1856  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1857  out[i].gather(in + i, offsets);
1858 }
1859 
1860 
1861 
1865 template <>
1866 inline DEAL_II_ALWAYS_INLINE void
1867 vectorized_load_and_transpose(const unsigned int n_entries,
1868  const std::array<float *, 16> &in,
1870 {
1871  // see the comments in the vectorized_load_and_transpose above
1872 
1873  const unsigned int n_chunks = n_entries / 4;
1874 
1875  __m512 t0, t1, t2, t3;
1876  if (n_chunks > 0)
1877  t3 = out[0].data;
1878  for (unsigned int i = 0; i < n_chunks; ++i)
1879  {
1880  t0 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[0] + 4 * i), 0);
1881  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[4] + 4 * i), 1);
1882  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[8] + 4 * i), 2);
1883  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[12] + 4 * i), 3);
1884  t1 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[1] + 4 * i), 0);
1885  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[5] + 4 * i), 1);
1886  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[9] + 4 * i), 2);
1887  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[13] + 4 * i), 3);
1888  t2 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[2] + 4 * i), 0);
1889  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[6] + 4 * i), 1);
1890  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[10] + 4 * i), 2);
1891  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[14] + 4 * i), 3);
1892  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[3] + 4 * i), 0);
1893  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[7] + 4 * i), 1);
1894  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[11] + 4 * i), 2);
1895  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[15] + 4 * i), 3);
1896 
1897  __m512 v0 = _mm512_shuffle_ps(t0, t1, 0x44);
1898  __m512 v1 = _mm512_shuffle_ps(t0, t1, 0xee);
1899  __m512 v2 = _mm512_shuffle_ps(t2, t3, 0x44);
1900  __m512 v3 = _mm512_shuffle_ps(t2, t3, 0xee);
1901 
1902  out[4 * i + 0].data = _mm512_shuffle_ps(v0, v2, 0x88);
1903  out[4 * i + 1].data = _mm512_shuffle_ps(v0, v2, 0xdd);
1904  out[4 * i + 2].data = _mm512_shuffle_ps(v1, v3, 0x88);
1905  out[4 * i + 3].data = _mm512_shuffle_ps(v1, v3, 0xdd);
1906  }
1907 
1908  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1909  gather(out[i], in, i);
1910 }
1911 
1912 
1913 
1917 template <>
1918 inline DEAL_II_ALWAYS_INLINE void
1919 vectorized_transpose_and_store(const bool add_into,
1920  const unsigned int n_entries,
1921  const VectorizedArray<float, 16> *in,
1922  const unsigned int * offsets,
1923  float * out)
1924 {
1925  const unsigned int n_chunks = n_entries / 4;
1926  for (unsigned int i = 0; i < n_chunks; ++i)
1927  {
1928  __m512 t0 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0x44);
1929  __m512 t1 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0xee);
1930  __m512 t2 =
1931  _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0x44);
1932  __m512 t3 =
1933  _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0xee);
1934  __m512 u0 = _mm512_shuffle_ps(t0, t2, 0x88);
1935  __m512 u1 = _mm512_shuffle_ps(t0, t2, 0xdd);
1936  __m512 u2 = _mm512_shuffle_ps(t1, t3, 0x88);
1937  __m512 u3 = _mm512_shuffle_ps(t1, t3, 0xdd);
1938 
1939  __m128 res0 = _mm512_extractf32x4_ps(u0, 0);
1940  __m128 res4 = _mm512_extractf32x4_ps(u0, 1);
1941  __m128 res8 = _mm512_extractf32x4_ps(u0, 2);
1942  __m128 res12 = _mm512_extractf32x4_ps(u0, 3);
1943  __m128 res1 = _mm512_extractf32x4_ps(u1, 0);
1944  __m128 res5 = _mm512_extractf32x4_ps(u1, 1);
1945  __m128 res9 = _mm512_extractf32x4_ps(u1, 2);
1946  __m128 res13 = _mm512_extractf32x4_ps(u1, 3);
1947  __m128 res2 = _mm512_extractf32x4_ps(u2, 0);
1948  __m128 res6 = _mm512_extractf32x4_ps(u2, 1);
1949  __m128 res10 = _mm512_extractf32x4_ps(u2, 2);
1950  __m128 res14 = _mm512_extractf32x4_ps(u2, 3);
1951  __m128 res3 = _mm512_extractf32x4_ps(u3, 0);
1952  __m128 res7 = _mm512_extractf32x4_ps(u3, 1);
1953  __m128 res11 = _mm512_extractf32x4_ps(u3, 2);
1954  __m128 res15 = _mm512_extractf32x4_ps(u3, 3);
1955 
1956  // Cannot use the same store instructions in both paths of the 'if'
1957  // because the compiler cannot know that there is no aliasing between
1958  // pointers
1959  if (add_into)
1960  {
1961  res0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), res0);
1962  _mm_storeu_ps(out + 4 * i + offsets[0], res0);
1963  res1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), res1);
1964  _mm_storeu_ps(out + 4 * i + offsets[1], res1);
1965  res2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), res2);
1966  _mm_storeu_ps(out + 4 * i + offsets[2], res2);
1967  res3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), res3);
1968  _mm_storeu_ps(out + 4 * i + offsets[3], res3);
1969  res4 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[4]), res4);
1970  _mm_storeu_ps(out + 4 * i + offsets[4], res4);
1971  res5 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[5]), res5);
1972  _mm_storeu_ps(out + 4 * i + offsets[5], res5);
1973  res6 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[6]), res6);
1974  _mm_storeu_ps(out + 4 * i + offsets[6], res6);
1975  res7 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[7]), res7);
1976  _mm_storeu_ps(out + 4 * i + offsets[7], res7);
1977  res8 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[8]), res8);
1978  _mm_storeu_ps(out + 4 * i + offsets[8], res8);
1979  res9 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[9]), res9);
1980  _mm_storeu_ps(out + 4 * i + offsets[9], res9);
1981  res10 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[10]), res10);
1982  _mm_storeu_ps(out + 4 * i + offsets[10], res10);
1983  res11 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[11]), res11);
1984  _mm_storeu_ps(out + 4 * i + offsets[11], res11);
1985  res12 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[12]), res12);
1986  _mm_storeu_ps(out + 4 * i + offsets[12], res12);
1987  res13 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[13]), res13);
1988  _mm_storeu_ps(out + 4 * i + offsets[13], res13);
1989  res14 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[14]), res14);
1990  _mm_storeu_ps(out + 4 * i + offsets[14], res14);
1991  res15 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[15]), res15);
1992  _mm_storeu_ps(out + 4 * i + offsets[15], res15);
1993  }
1994  else
1995  {
1996  _mm_storeu_ps(out + 4 * i + offsets[0], res0);
1997  _mm_storeu_ps(out + 4 * i + offsets[1], res1);
1998  _mm_storeu_ps(out + 4 * i + offsets[2], res2);
1999  _mm_storeu_ps(out + 4 * i + offsets[3], res3);
2000  _mm_storeu_ps(out + 4 * i + offsets[4], res4);
2001  _mm_storeu_ps(out + 4 * i + offsets[5], res5);
2002  _mm_storeu_ps(out + 4 * i + offsets[6], res6);
2003  _mm_storeu_ps(out + 4 * i + offsets[7], res7);
2004  _mm_storeu_ps(out + 4 * i + offsets[8], res8);
2005  _mm_storeu_ps(out + 4 * i + offsets[9], res9);
2006  _mm_storeu_ps(out + 4 * i + offsets[10], res10);
2007  _mm_storeu_ps(out + 4 * i + offsets[11], res11);
2008  _mm_storeu_ps(out + 4 * i + offsets[12], res12);
2009  _mm_storeu_ps(out + 4 * i + offsets[13], res13);
2010  _mm_storeu_ps(out + 4 * i + offsets[14], res14);
2011  _mm_storeu_ps(out + 4 * i + offsets[15], res15);
2012  }
2013  }
2014 
2015  // remainder loop of work that does not divide by 4
2016  if (add_into)
2017  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2018  for (unsigned int v = 0; v < 16; ++v)
2019  out[offsets[v] + i] += in[i][v];
2020  else
2021  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2022  for (unsigned int v = 0; v < 16; ++v)
2023  out[offsets[v] + i] = in[i][v];
2024 }
2025 
2026 
2027 
2031 template <>
2032 inline DEAL_II_ALWAYS_INLINE void
2033 vectorized_transpose_and_store(const bool add_into,
2034  const unsigned int n_entries,
2035  const VectorizedArray<float, 16> *in,
2036  std::array<float *, 16> & out)
2037 {
2038  // see the comments in the vectorized_transpose_and_store above
2039 
2040  const unsigned int n_chunks = n_entries / 4;
2041  for (unsigned int i = 0; i < n_chunks; ++i)
2042  {
2043  __m512 t0 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0x44);
2044  __m512 t1 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0xee);
2045  __m512 t2 =
2046  _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0x44);
2047  __m512 t3 =
2048  _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0xee);
2049  __m512 u0 = _mm512_shuffle_ps(t0, t2, 0x88);
2050  __m512 u1 = _mm512_shuffle_ps(t0, t2, 0xdd);
2051  __m512 u2 = _mm512_shuffle_ps(t1, t3, 0x88);
2052  __m512 u3 = _mm512_shuffle_ps(t1, t3, 0xdd);
2053 
2054  __m128 res0 = _mm512_extractf32x4_ps(u0, 0);
2055  __m128 res4 = _mm512_extractf32x4_ps(u0, 1);
2056  __m128 res8 = _mm512_extractf32x4_ps(u0, 2);
2057  __m128 res12 = _mm512_extractf32x4_ps(u0, 3);
2058  __m128 res1 = _mm512_extractf32x4_ps(u1, 0);
2059  __m128 res5 = _mm512_extractf32x4_ps(u1, 1);
2060  __m128 res9 = _mm512_extractf32x4_ps(u1, 2);
2061  __m128 res13 = _mm512_extractf32x4_ps(u1, 3);
2062  __m128 res2 = _mm512_extractf32x4_ps(u2, 0);
2063  __m128 res6 = _mm512_extractf32x4_ps(u2, 1);
2064  __m128 res10 = _mm512_extractf32x4_ps(u2, 2);
2065  __m128 res14 = _mm512_extractf32x4_ps(u2, 3);
2066  __m128 res3 = _mm512_extractf32x4_ps(u3, 0);
2067  __m128 res7 = _mm512_extractf32x4_ps(u3, 1);
2068  __m128 res11 = _mm512_extractf32x4_ps(u3, 2);
2069  __m128 res15 = _mm512_extractf32x4_ps(u3, 3);
2070 
2071  if (add_into)
2072  {
2073  res0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), res0);
2074  _mm_storeu_ps(out[0] + 4 * i, res0);
2075  res1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), res1);
2076  _mm_storeu_ps(out[1] + 4 * i, res1);
2077  res2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), res2);
2078  _mm_storeu_ps(out[2] + 4 * i, res2);
2079  res3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), res3);
2080  _mm_storeu_ps(out[3] + 4 * i, res3);
2081  res4 = _mm_add_ps(_mm_loadu_ps(out[4] + 4 * i), res4);
2082  _mm_storeu_ps(out[4] + 4 * i, res4);
2083  res5 = _mm_add_ps(_mm_loadu_ps(out[5] + 4 * i), res5);
2084  _mm_storeu_ps(out[5] + 4 * i, res5);
2085  res6 = _mm_add_ps(_mm_loadu_ps(out[6] + 4 * i), res6);
2086  _mm_storeu_ps(out[6] + 4 * i, res6);
2087  res7 = _mm_add_ps(_mm_loadu_ps(out[7] + 4 * i), res7);
2088  _mm_storeu_ps(out[7] + 4 * i, res7);
2089  res8 = _mm_add_ps(_mm_loadu_ps(out[8] + 4 * i), res8);
2090  _mm_storeu_ps(out[8] + 4 * i, res8);
2091  res9 = _mm_add_ps(_mm_loadu_ps(out[9] + 4 * i), res9);
2092  _mm_storeu_ps(out[9] + 4 * i, res9);
2093  res10 = _mm_add_ps(_mm_loadu_ps(out[10] + 4 * i), res10);
2094  _mm_storeu_ps(out[10] + 4 * i, res10);
2095  res11 = _mm_add_ps(_mm_loadu_ps(out[11] + 4 * i), res11);
2096  _mm_storeu_ps(out[11] + 4 * i, res11);
2097  res12 = _mm_add_ps(_mm_loadu_ps(out[12] + 4 * i), res12);
2098  _mm_storeu_ps(out[12] + 4 * i, res12);
2099  res13 = _mm_add_ps(_mm_loadu_ps(out[13] + 4 * i), res13);
2100  _mm_storeu_ps(out[13] + 4 * i, res13);
2101  res14 = _mm_add_ps(_mm_loadu_ps(out[14] + 4 * i), res14);
2102  _mm_storeu_ps(out[14] + 4 * i, res14);
2103  res15 = _mm_add_ps(_mm_loadu_ps(out[15] + 4 * i), res15);
2104  _mm_storeu_ps(out[15] + 4 * i, res15);
2105  }
2106  else
2107  {
2108  _mm_storeu_ps(out[0] + 4 * i, res0);
2109  _mm_storeu_ps(out[1] + 4 * i, res1);
2110  _mm_storeu_ps(out[2] + 4 * i, res2);
2111  _mm_storeu_ps(out[3] + 4 * i, res3);
2112  _mm_storeu_ps(out[4] + 4 * i, res4);
2113  _mm_storeu_ps(out[5] + 4 * i, res5);
2114  _mm_storeu_ps(out[6] + 4 * i, res6);
2115  _mm_storeu_ps(out[7] + 4 * i, res7);
2116  _mm_storeu_ps(out[8] + 4 * i, res8);
2117  _mm_storeu_ps(out[9] + 4 * i, res9);
2118  _mm_storeu_ps(out[10] + 4 * i, res10);
2119  _mm_storeu_ps(out[11] + 4 * i, res11);
2120  _mm_storeu_ps(out[12] + 4 * i, res12);
2121  _mm_storeu_ps(out[13] + 4 * i, res13);
2122  _mm_storeu_ps(out[14] + 4 * i, res14);
2123  _mm_storeu_ps(out[15] + 4 * i, res15);
2124  }
2125  }
2126 
2127  if (add_into)
2128  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2129  for (unsigned int v = 0; v < 16; ++v)
2130  out[v][i] += in[i][v];
2131  else
2132  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2133  for (unsigned int v = 0; v < 16; ++v)
2134  out[v][i] = in[i][v];
2135 }
2136 
2137 # endif
2138 
2139 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
2140 
2144 template <>
2145 class VectorizedArray<double, 4>
2146  : public VectorizedArrayBase<VectorizedArray<double, 4>, 4>
2147 {
2148 public:
2152  using value_type = double;
2153 
2159  DEAL_II_DEPRECATED static const unsigned int n_array_elements = 4;
2160 
2165  VectorizedArray() = default;
2166 
2170  VectorizedArray(const double scalar)
2171  {
2172  this->operator=(scalar);
2173  }
2174 
2179  VectorizedArray &
2180  operator=(const double x)
2181  {
2182  data = _mm256_set1_pd(x);
2183  return *this;
2184  }
2185 
2190  double &operator[](const unsigned int comp)
2191  {
2192  AssertIndexRange(comp, 4);
2193  return *(reinterpret_cast<double *>(&data) + comp);
2194  }
2195 
2200  const double &operator[](const unsigned int comp) const
2201  {
2202  AssertIndexRange(comp, 4);
2203  return *(reinterpret_cast<const double *>(&data) + comp);
2204  }
2205 
2210  VectorizedArray &
2211  operator+=(const VectorizedArray &vec)
2212  {
2213  // if the compiler supports vector arithmetic, we can simply use +=
2214  // operator on the given data type. this allows the compiler to combine
2215  // additions with multiplication (fused multiply-add) if those
2216  // instructions are available. Otherwise, we need to use the built-in
2217  // intrinsic command for __m256d
2218 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2219  data += vec.data;
2220 # else
2221  data = _mm256_add_pd(data, vec.data);
2222 # endif
2223  return *this;
2224  }
2225 
2230  VectorizedArray &
2231  operator-=(const VectorizedArray &vec)
2232  {
2233 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2234  data -= vec.data;
2235 # else
2236  data = _mm256_sub_pd(data, vec.data);
2237 # endif
2238  return *this;
2239  }
2244  VectorizedArray &
2245  operator*=(const VectorizedArray &vec)
2246  {
2247 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2248  data *= vec.data;
2249 # else
2250  data = _mm256_mul_pd(data, vec.data);
2251 # endif
2252  return *this;
2253  }
2254 
2259  VectorizedArray &
2260  operator/=(const VectorizedArray &vec)
2261  {
2262 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2263  data /= vec.data;
2264 # else
2265  data = _mm256_div_pd(data, vec.data);
2266 # endif
2267  return *this;
2268  }
2269 
2276  void
2277  load(const double *ptr)
2278  {
2279  data = _mm256_loadu_pd(ptr);
2280  }
2281 
2289  void
2290  store(double *ptr) const
2291  {
2292  _mm256_storeu_pd(ptr, data);
2293  }
2294 
2299  void
2300  streaming_store(double *ptr) const
2301  {
2302  Assert(reinterpret_cast<std::size_t>(ptr) % 32 == 0,
2303  ExcMessage("Memory not aligned"));
2304  _mm256_stream_pd(ptr, data);
2305  }
2306 
2320  void
2321  gather(const double *base_ptr, const unsigned int *offsets)
2322  {
2323 # ifdef __AVX2__
2324  // unfortunately, there does not appear to be a 128 bit integer load, so
2325  // do it by some reinterpret casts here. this is allowed because the Intel
2326  // API allows aliasing between different vector types.
2327  const __m128 index_val =
2328  _mm_loadu_ps(reinterpret_cast<const float *>(offsets));
2329  const __m128i index = *reinterpret_cast<const __m128i *>(&index_val);
2330  data = _mm256_i32gather_pd(base_ptr, index, 8);
2331 # else
2332  for (unsigned int i = 0; i < 4; ++i)
2333  *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
2334 # endif
2335  }
2336 
2350  void
2351  scatter(const unsigned int *offsets, double *base_ptr) const
2352  {
2353  // no scatter operation in AVX/AVX2
2354  for (unsigned int i = 0; i < 4; ++i)
2355  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
2356  }
2357 
2363  __m256d data;
2364 
2365 private:
2372  get_sqrt() const
2373  {
2374  VectorizedArray res;
2375  res.data = _mm256_sqrt_pd(data);
2376  return res;
2377  }
2378 
2385  get_abs() const
2386  {
2387  // to compute the absolute value, perform bitwise andnot with -0. This
2388  // will leave all value and exponent bits unchanged but force the sign
2389  // value to +.
2390  __m256d mask = _mm256_set1_pd(-0.);
2391  VectorizedArray res;
2392  res.data = _mm256_andnot_pd(mask, data);
2393  return res;
2394  }
2395 
2402  get_max(const VectorizedArray &other) const
2403  {
2404  VectorizedArray res;
2405  res.data = _mm256_max_pd(data, other.data);
2406  return res;
2407  }
2408 
2415  get_min(const VectorizedArray &other) const
2416  {
2417  VectorizedArray res;
2418  res.data = _mm256_min_pd(data, other.data);
2419  return res;
2420  }
2421 
2422  // Make a few functions friends.
2423  template <typename Number2, std::size_t width2>
2426  template <typename Number2, std::size_t width2>
2429  template <typename Number2, std::size_t width2>
2433  template <typename Number2, std::size_t width2>
2437 };
2438 
2439 
2440 
2444 template <>
2445 inline DEAL_II_ALWAYS_INLINE void
2446 vectorized_load_and_transpose(const unsigned int n_entries,
2447  const double * in,
2448  const unsigned int * offsets,
2450 {
2451  const unsigned int n_chunks = n_entries / 4;
2452  const double * in0 = in + offsets[0];
2453  const double * in1 = in + offsets[1];
2454  const double * in2 = in + offsets[2];
2455  const double * in3 = in + offsets[3];
2456 
2457  for (unsigned int i = 0; i < n_chunks; ++i)
2458  {
2459  __m256d u0 = _mm256_loadu_pd(in0 + 4 * i);
2460  __m256d u1 = _mm256_loadu_pd(in1 + 4 * i);
2461  __m256d u2 = _mm256_loadu_pd(in2 + 4 * i);
2462  __m256d u3 = _mm256_loadu_pd(in3 + 4 * i);
2463  __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2464  __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2465  __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2466  __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2467  out[4 * i + 0].data = _mm256_unpacklo_pd(t0, t1);
2468  out[4 * i + 1].data = _mm256_unpackhi_pd(t0, t1);
2469  out[4 * i + 2].data = _mm256_unpacklo_pd(t2, t3);
2470  out[4 * i + 3].data = _mm256_unpackhi_pd(t2, t3);
2471  }
2472 
2473  // remainder loop of work that does not divide by 4
2474  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2475  out[i].gather(in + i, offsets);
2476 }
2477 
2478 
2479 
2483 template <>
2484 inline DEAL_II_ALWAYS_INLINE void
2485 vectorized_load_and_transpose(const unsigned int n_entries,
2486  const std::array<double *, 4> &in,
2488 {
2489  // see the comments in the vectorized_load_and_transpose above
2490 
2491  const unsigned int n_chunks = n_entries / 4;
2492  const double * in0 = in[0];
2493  const double * in1 = in[1];
2494  const double * in2 = in[2];
2495  const double * in3 = in[3];
2496 
2497  for (unsigned int i = 0; i < n_chunks; ++i)
2498  {
2499  __m256d u0 = _mm256_loadu_pd(in0 + 4 * i);
2500  __m256d u1 = _mm256_loadu_pd(in1 + 4 * i);
2501  __m256d u2 = _mm256_loadu_pd(in2 + 4 * i);
2502  __m256d u3 = _mm256_loadu_pd(in3 + 4 * i);
2503  __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2504  __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2505  __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2506  __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2507  out[4 * i + 0].data = _mm256_unpacklo_pd(t0, t1);
2508  out[4 * i + 1].data = _mm256_unpackhi_pd(t0, t1);
2509  out[4 * i + 2].data = _mm256_unpacklo_pd(t2, t3);
2510  out[4 * i + 3].data = _mm256_unpackhi_pd(t2, t3);
2511  }
2512 
2513  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2514  gather(out[i], in, i);
2515 }
2516 
2517 
2518 
2522 template <>
2523 inline DEAL_II_ALWAYS_INLINE void
2524 vectorized_transpose_and_store(const bool add_into,
2525  const unsigned int n_entries,
2526  const VectorizedArray<double, 4> *in,
2527  const unsigned int * offsets,
2528  double * out)
2529 {
2530  const unsigned int n_chunks = n_entries / 4;
2531  double * out0 = out + offsets[0];
2532  double * out1 = out + offsets[1];
2533  double * out2 = out + offsets[2];
2534  double * out3 = out + offsets[3];
2535  for (unsigned int i = 0; i < n_chunks; ++i)
2536  {
2537  __m256d u0 = in[4 * i + 0].data;
2538  __m256d u1 = in[4 * i + 1].data;
2539  __m256d u2 = in[4 * i + 2].data;
2540  __m256d u3 = in[4 * i + 3].data;
2541  __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2542  __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2543  __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2544  __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2545  __m256d res0 = _mm256_unpacklo_pd(t0, t1);
2546  __m256d res1 = _mm256_unpackhi_pd(t0, t1);
2547  __m256d res2 = _mm256_unpacklo_pd(t2, t3);
2548  __m256d res3 = _mm256_unpackhi_pd(t2, t3);
2549 
2550  // Cannot use the same store instructions in both paths of the 'if'
2551  // because the compiler cannot know that there is no aliasing between
2552  // pointers
2553  if (add_into)
2554  {
2555  res0 = _mm256_add_pd(_mm256_loadu_pd(out0 + 4 * i), res0);
2556  _mm256_storeu_pd(out0 + 4 * i, res0);
2557  res1 = _mm256_add_pd(_mm256_loadu_pd(out1 + 4 * i), res1);
2558  _mm256_storeu_pd(out1 + 4 * i, res1);
2559  res2 = _mm256_add_pd(_mm256_loadu_pd(out2 + 4 * i), res2);
2560  _mm256_storeu_pd(out2 + 4 * i, res2);
2561  res3 = _mm256_add_pd(_mm256_loadu_pd(out3 + 4 * i), res3);
2562  _mm256_storeu_pd(out3 + 4 * i, res3);
2563  }
2564  else
2565  {
2566  _mm256_storeu_pd(out0 + 4 * i, res0);
2567  _mm256_storeu_pd(out1 + 4 * i, res1);
2568  _mm256_storeu_pd(out2 + 4 * i, res2);
2569  _mm256_storeu_pd(out3 + 4 * i, res3);
2570  }
2571  }
2572 
2573  // remainder loop of work that does not divide by 4
2574  if (add_into)
2575  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2576  for (unsigned int v = 0; v < 4; ++v)
2577  out[offsets[v] + i] += in[i][v];
2578  else
2579  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2580  for (unsigned int v = 0; v < 4; ++v)
2581  out[offsets[v] + i] = in[i][v];
2582 }
2583 
2584 
2585 
2589 template <>
2590 inline DEAL_II_ALWAYS_INLINE void
2591 vectorized_transpose_and_store(const bool add_into,
2592  const unsigned int n_entries,
2593  const VectorizedArray<double, 4> *in,
2594  std::array<double *, 4> & out)
2595 {
2596  // see the comments in the vectorized_transpose_and_store above
2597 
2598  const unsigned int n_chunks = n_entries / 4;
2599  double * out0 = out[0];
2600  double * out1 = out[1];
2601  double * out2 = out[2];
2602  double * out3 = out[3];
2603  for (unsigned int i = 0; i < n_chunks; ++i)
2604  {
2605  __m256d u0 = in[4 * i + 0].data;
2606  __m256d u1 = in[4 * i + 1].data;
2607  __m256d u2 = in[4 * i + 2].data;
2608  __m256d u3 = in[4 * i + 3].data;
2609  __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2610  __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2611  __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2612  __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2613  __m256d res0 = _mm256_unpacklo_pd(t0, t1);
2614  __m256d res1 = _mm256_unpackhi_pd(t0, t1);
2615  __m256d res2 = _mm256_unpacklo_pd(t2, t3);
2616  __m256d res3 = _mm256_unpackhi_pd(t2, t3);
2617 
2618  // Cannot use the same store instructions in both paths of the 'if'
2619  // because the compiler cannot know that there is no aliasing between
2620  // pointers
2621  if (add_into)
2622  {
2623  res0 = _mm256_add_pd(_mm256_loadu_pd(out0 + 4 * i), res0);
2624  _mm256_storeu_pd(out0 + 4 * i, res0);
2625  res1 = _mm256_add_pd(_mm256_loadu_pd(out1 + 4 * i), res1);
2626  _mm256_storeu_pd(out1 + 4 * i, res1);
2627  res2 = _mm256_add_pd(_mm256_loadu_pd(out2 + 4 * i), res2);
2628  _mm256_storeu_pd(out2 + 4 * i, res2);
2629  res3 = _mm256_add_pd(_mm256_loadu_pd(out3 + 4 * i), res3);
2630  _mm256_storeu_pd(out3 + 4 * i, res3);
2631  }
2632  else
2633  {
2634  _mm256_storeu_pd(out0 + 4 * i, res0);
2635  _mm256_storeu_pd(out1 + 4 * i, res1);
2636  _mm256_storeu_pd(out2 + 4 * i, res2);
2637  _mm256_storeu_pd(out3 + 4 * i, res3);
2638  }
2639  }
2640 
2641  // remainder loop of work that does not divide by 4
2642  if (add_into)
2643  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2644  for (unsigned int v = 0; v < 4; ++v)
2645  out[v][i] += in[i][v];
2646  else
2647  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2648  for (unsigned int v = 0; v < 4; ++v)
2649  out[v][i] = in[i][v];
2650 }
2651 
2652 
2653 
2657 template <>
2658 class VectorizedArray<float, 8>
2659  : public VectorizedArrayBase<VectorizedArray<float, 8>, 8>
2660 {
2661 public:
2665  using value_type = float;
2666 
2672  DEAL_II_DEPRECATED static const unsigned int n_array_elements = 8;
2673 
2678  VectorizedArray() = default;
2679 
2683  VectorizedArray(const float scalar)
2684  {
2685  this->operator=(scalar);
2686  }
2687 
2692  VectorizedArray &
2693  operator=(const float x)
2694  {
2695  data = _mm256_set1_ps(x);
2696  return *this;
2697  }
2698 
2703  float &operator[](const unsigned int comp)
2704  {
2705  AssertIndexRange(comp, 8);
2706  return *(reinterpret_cast<float *>(&data) + comp);
2707  }
2708 
2713  const float &operator[](const unsigned int comp) const
2714  {
2715  AssertIndexRange(comp, 8);
2716  return *(reinterpret_cast<const float *>(&data) + comp);
2717  }
2718 
2723  VectorizedArray &
2724  operator+=(const VectorizedArray &vec)
2725  {
2726  // if the compiler supports vector arithmetic, we can simply use +=
2727  // operator on the given data type. this allows the compiler to combine
2728  // additions with multiplication (fused multiply-add) if those
2729  // instructions are available. Otherwise, we need to use the built-in
2730  // intrinsic command for __m256d
2731 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2732  data += vec.data;
2733 # else
2734  data = _mm256_add_ps(data, vec.data);
2735 # endif
2736  return *this;
2737  }
2738 
2743  VectorizedArray &
2744  operator-=(const VectorizedArray &vec)
2745  {
2746 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2747  data -= vec.data;
2748 # else
2749  data = _mm256_sub_ps(data, vec.data);
2750 # endif
2751  return *this;
2752  }
2757  VectorizedArray &
2758  operator*=(const VectorizedArray &vec)
2759  {
2760 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2761  data *= vec.data;
2762 # else
2763  data = _mm256_mul_ps(data, vec.data);
2764 # endif
2765  return *this;
2766  }
2767 
2772  VectorizedArray &
2773  operator/=(const VectorizedArray &vec)
2774  {
2775 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2776  data /= vec.data;
2777 # else
2778  data = _mm256_div_ps(data, vec.data);
2779 # endif
2780  return *this;
2781  }
2782 
2789  void
2790  load(const float *ptr)
2791  {
2792  data = _mm256_loadu_ps(ptr);
2793  }
2794 
2802  void
2803  store(float *ptr) const
2804  {
2805  _mm256_storeu_ps(ptr, data);
2806  }
2807 
2812  void
2813  streaming_store(float *ptr) const
2814  {
2815  Assert(reinterpret_cast<std::size_t>(ptr) % 32 == 0,
2816  ExcMessage("Memory not aligned"));
2817  _mm256_stream_ps(ptr, data);
2818  }
2819 
2833  void
2834  gather(const float *base_ptr, const unsigned int *offsets)
2835  {
2836 # ifdef __AVX2__
2837  // unfortunately, there does not appear to be a 256 bit integer load, so
2838  // do it by some reinterpret casts here. this is allowed because the Intel
2839  // API allows aliasing between different vector types.
2840  const __m256 index_val =
2841  _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
2842  const __m256i index = *reinterpret_cast<const __m256i *>(&index_val);
2843  data = _mm256_i32gather_ps(base_ptr, index, 4);
2844 # else
2845  for (unsigned int i = 0; i < 8; ++i)
2846  *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
2847 # endif
2848  }
2849 
2863  void
2864  scatter(const unsigned int *offsets, float *base_ptr) const
2865  {
2866  // no scatter operation in AVX/AVX2
2867  for (unsigned int i = 0; i < 8; ++i)
2868  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
2869  }
2870 
2876  __m256 data;
2877 
2878 private:
2885  get_sqrt() const
2886  {
2887  VectorizedArray res;
2888  res.data = _mm256_sqrt_ps(data);
2889  return res;
2890  }
2891 
2898  get_abs() const
2899  {
2900  // to compute the absolute value, perform bitwise andnot with -0. This
2901  // will leave all value and exponent bits unchanged but force the sign
2902  // value to +.
2903  __m256 mask = _mm256_set1_ps(-0.f);
2904  VectorizedArray res;
2905  res.data = _mm256_andnot_ps(mask, data);
2906  return res;
2907  }
2908 
2915  get_max(const VectorizedArray &other) const
2916  {
2917  VectorizedArray res;
2918  res.data = _mm256_max_ps(data, other.data);
2919  return res;
2920  }
2921 
2928  get_min(const VectorizedArray &other) const
2929  {
2930  VectorizedArray res;
2931  res.data = _mm256_min_ps(data, other.data);
2932  return res;
2933  }
2934 
2935  // Make a few functions friends.
2936  template <typename Number2, std::size_t width2>
2939  template <typename Number2, std::size_t width2>
2942  template <typename Number2, std::size_t width2>
2946  template <typename Number2, std::size_t width2>
2950 };
2951 
2952 
2953 
2957 template <>
2958 inline DEAL_II_ALWAYS_INLINE void
2959 vectorized_load_and_transpose(const unsigned int n_entries,
2960  const float * in,
2961  const unsigned int * offsets,
2963 {
2964  const unsigned int n_chunks = n_entries / 4;
2965  for (unsigned int i = 0; i < n_chunks; ++i)
2966  {
2967  // To avoid warnings about uninitialized variables, need to initialize
2968  // one variable with zero before using it.
2969  __m256 t0, t1, t2, t3 = {};
2970  t0 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[0]), 0);
2971  t0 = _mm256_insertf128_ps(t0, _mm_loadu_ps(in + 4 * i + offsets[4]), 1);
2972  t1 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[1]), 0);
2973  t1 = _mm256_insertf128_ps(t1, _mm_loadu_ps(in + 4 * i + offsets[5]), 1);
2974  t2 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[2]), 0);
2975  t2 = _mm256_insertf128_ps(t2, _mm_loadu_ps(in + 4 * i + offsets[6]), 1);
2976  t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[3]), 0);
2977  t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[7]), 1);
2978 
2979  __m256 v0 = _mm256_shuffle_ps(t0, t1, 0x44);
2980  __m256 v1 = _mm256_shuffle_ps(t0, t1, 0xee);
2981  __m256 v2 = _mm256_shuffle_ps(t2, t3, 0x44);
2982  __m256 v3 = _mm256_shuffle_ps(t2, t3, 0xee);
2983  out[4 * i + 0].data = _mm256_shuffle_ps(v0, v2, 0x88);
2984  out[4 * i + 1].data = _mm256_shuffle_ps(v0, v2, 0xdd);
2985  out[4 * i + 2].data = _mm256_shuffle_ps(v1, v3, 0x88);
2986  out[4 * i + 3].data = _mm256_shuffle_ps(v1, v3, 0xdd);
2987  }
2988 
2989  // remainder loop of work that does not divide by 4
2990  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2991  out[i].gather(in + i, offsets);
2992 }
2993 
2994 
2995 
2999 template <>
3000 inline DEAL_II_ALWAYS_INLINE void
3001 vectorized_load_and_transpose(const unsigned int n_entries,
3002  const std::array<float *, 8> &in,
3004 {
3005  // see the comments in the vectorized_load_and_transpose above
3006 
3007  const unsigned int n_chunks = n_entries / 4;
3008  for (unsigned int i = 0; i < n_chunks; ++i)
3009  {
3010  __m256 t0, t1, t2, t3 = {};
3011  t0 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[0] + 4 * i), 0);
3012  t0 = _mm256_insertf128_ps(t0, _mm_loadu_ps(in[4] + 4 * i), 1);
3013  t1 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[1] + 4 * i), 0);
3014  t1 = _mm256_insertf128_ps(t1, _mm_loadu_ps(in[5] + 4 * i), 1);
3015  t2 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[2] + 4 * i), 0);
3016  t2 = _mm256_insertf128_ps(t2, _mm_loadu_ps(in[6] + 4 * i), 1);
3017  t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[3] + 4 * i), 0);
3018  t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[7] + 4 * i), 1);
3019 
3020  __m256 v0 = _mm256_shuffle_ps(t0, t1, 0x44);
3021  __m256 v1 = _mm256_shuffle_ps(t0, t1, 0xee);
3022  __m256 v2 = _mm256_shuffle_ps(t2, t3, 0x44);
3023  __m256 v3 = _mm256_shuffle_ps(t2, t3, 0xee);
3024  out[4 * i + 0].data = _mm256_shuffle_ps(v0, v2, 0x88);
3025  out[4 * i + 1].data = _mm256_shuffle_ps(v0, v2, 0xdd);
3026  out[4 * i + 2].data = _mm256_shuffle_ps(v1, v3, 0x88);
3027  out[4 * i + 3].data = _mm256_shuffle_ps(v1, v3, 0xdd);
3028  }
3029 
3030  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3031  gather(out[i], in, i);
3032 }
3033 
3034 
3035 
3039 template <>
3040 inline DEAL_II_ALWAYS_INLINE void
3041 vectorized_transpose_and_store(const bool add_into,
3042  const unsigned int n_entries,
3043  const VectorizedArray<float, 8> *in,
3044  const unsigned int * offsets,
3045  float * out)
3046 {
3047  const unsigned int n_chunks = n_entries / 4;
3048  for (unsigned int i = 0; i < n_chunks; ++i)
3049  {
3050  __m256 u0 = in[4 * i + 0].data;
3051  __m256 u1 = in[4 * i + 1].data;
3052  __m256 u2 = in[4 * i + 2].data;
3053  __m256 u3 = in[4 * i + 3].data;
3054  __m256 t0 = _mm256_shuffle_ps(u0, u1, 0x44);
3055  __m256 t1 = _mm256_shuffle_ps(u0, u1, 0xee);
3056  __m256 t2 = _mm256_shuffle_ps(u2, u3, 0x44);
3057  __m256 t3 = _mm256_shuffle_ps(u2, u3, 0xee);
3058  u0 = _mm256_shuffle_ps(t0, t2, 0x88);
3059  u1 = _mm256_shuffle_ps(t0, t2, 0xdd);
3060  u2 = _mm256_shuffle_ps(t1, t3, 0x88);
3061  u3 = _mm256_shuffle_ps(t1, t3, 0xdd);
3062  __m128 res0 = _mm256_extractf128_ps(u0, 0);
3063  __m128 res4 = _mm256_extractf128_ps(u0, 1);
3064  __m128 res1 = _mm256_extractf128_ps(u1, 0);
3065  __m128 res5 = _mm256_extractf128_ps(u1, 1);
3066  __m128 res2 = _mm256_extractf128_ps(u2, 0);
3067  __m128 res6 = _mm256_extractf128_ps(u2, 1);
3068  __m128 res3 = _mm256_extractf128_ps(u3, 0);
3069  __m128 res7 = _mm256_extractf128_ps(u3, 1);
3070 
3071  // Cannot use the same store instructions in both paths of the 'if'
3072  // because the compiler cannot know that there is no aliasing between
3073  // pointers
3074  if (add_into)
3075  {
3076  res0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), res0);
3077  _mm_storeu_ps(out + 4 * i + offsets[0], res0);
3078  res1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), res1);
3079  _mm_storeu_ps(out + 4 * i + offsets[1], res1);
3080  res2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), res2);
3081  _mm_storeu_ps(out + 4 * i + offsets[2], res2);
3082  res3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), res3);
3083  _mm_storeu_ps(out + 4 * i + offsets[3], res3);
3084  res4 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[4]), res4);
3085  _mm_storeu_ps(out + 4 * i + offsets[4], res4);
3086  res5 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[5]), res5);
3087  _mm_storeu_ps(out + 4 * i + offsets[5], res5);
3088  res6 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[6]), res6);
3089  _mm_storeu_ps(out + 4 * i + offsets[6], res6);
3090  res7 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[7]), res7);
3091  _mm_storeu_ps(out + 4 * i + offsets[7], res7);
3092  }
3093  else
3094  {
3095  _mm_storeu_ps(out + 4 * i + offsets[0], res0);
3096  _mm_storeu_ps(out + 4 * i + offsets[1], res1);
3097  _mm_storeu_ps(out + 4 * i + offsets[2], res2);
3098  _mm_storeu_ps(out + 4 * i + offsets[3], res3);
3099  _mm_storeu_ps(out + 4 * i + offsets[4], res4);
3100  _mm_storeu_ps(out + 4 * i + offsets[5], res5);
3101  _mm_storeu_ps(out + 4 * i + offsets[6], res6);
3102  _mm_storeu_ps(out + 4 * i + offsets[7], res7);
3103  }
3104  }
3105 
3106  // remainder loop of work that does not divide by 4
3107  if (add_into)
3108  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3109  for (unsigned int v = 0; v < 8; ++v)
3110  out[offsets[v] + i] += in[i][v];
3111  else
3112  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3113  for (unsigned int v = 0; v < 8; ++v)
3114  out[offsets[v] + i] = in[i][v];
3115 }
3116 
3117 
3118 
3122 template <>
3123 inline DEAL_II_ALWAYS_INLINE void
3124 vectorized_transpose_and_store(const bool add_into,
3125  const unsigned int n_entries,
3126  const VectorizedArray<float, 8> *in,
3127  std::array<float *, 8> & out)
3128 {
3129  // see the comments in the vectorized_transpose_and_store above
3130 
3131  const unsigned int n_chunks = n_entries / 4;
3132  for (unsigned int i = 0; i < n_chunks; ++i)
3133  {
3134  __m256 u0 = in[4 * i + 0].data;
3135  __m256 u1 = in[4 * i + 1].data;
3136  __m256 u2 = in[4 * i + 2].data;
3137  __m256 u3 = in[4 * i + 3].data;
3138  __m256 t0 = _mm256_shuffle_ps(u0, u1, 0x44);
3139  __m256 t1 = _mm256_shuffle_ps(u0, u1, 0xee);
3140  __m256 t2 = _mm256_shuffle_ps(u2, u3, 0x44);
3141  __m256 t3 = _mm256_shuffle_ps(u2, u3, 0xee);
3142  u0 = _mm256_shuffle_ps(t0, t2, 0x88);
3143  u1 = _mm256_shuffle_ps(t0, t2, 0xdd);
3144  u2 = _mm256_shuffle_ps(t1, t3, 0x88);
3145  u3 = _mm256_shuffle_ps(t1, t3, 0xdd);
3146  __m128 res0 = _mm256_extractf128_ps(u0, 0);
3147  __m128 res4 = _mm256_extractf128_ps(u0, 1);
3148  __m128 res1 = _mm256_extractf128_ps(u1, 0);
3149  __m128 res5 = _mm256_extractf128_ps(u1, 1);
3150  __m128 res2 = _mm256_extractf128_ps(u2, 0);
3151  __m128 res6 = _mm256_extractf128_ps(u2, 1);
3152  __m128 res3 = _mm256_extractf128_ps(u3, 0);
3153  __m128 res7 = _mm256_extractf128_ps(u3, 1);
3154 
3155  if (add_into)
3156  {
3157  res0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), res0);
3158  _mm_storeu_ps(out[0] + 4 * i, res0);
3159  res1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), res1);
3160  _mm_storeu_ps(out[1] + 4 * i, res1);
3161  res2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), res2);
3162  _mm_storeu_ps(out[2] + 4 * i, res2);
3163  res3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), res3);
3164  _mm_storeu_ps(out[3] + 4 * i, res3);
3165  res4 = _mm_add_ps(_mm_loadu_ps(out[4] + 4 * i), res4);
3166  _mm_storeu_ps(out[4] + 4 * i, res4);
3167  res5 = _mm_add_ps(_mm_loadu_ps(out[5] + 4 * i), res5);
3168  _mm_storeu_ps(out[5] + 4 * i, res5);
3169  res6 = _mm_add_ps(_mm_loadu_ps(out[6] + 4 * i), res6);
3170  _mm_storeu_ps(out[6] + 4 * i, res6);
3171  res7 = _mm_add_ps(_mm_loadu_ps(out[7] + 4 * i), res7);
3172  _mm_storeu_ps(out[7] + 4 * i, res7);
3173  }
3174  else
3175  {
3176  _mm_storeu_ps(out[0] + 4 * i, res0);
3177  _mm_storeu_ps(out[1] + 4 * i, res1);
3178  _mm_storeu_ps(out[2] + 4 * i, res2);
3179  _mm_storeu_ps(out[3] + 4 * i, res3);
3180  _mm_storeu_ps(out[4] + 4 * i, res4);
3181  _mm_storeu_ps(out[5] + 4 * i, res5);
3182  _mm_storeu_ps(out[6] + 4 * i, res6);
3183  _mm_storeu_ps(out[7] + 4 * i, res7);
3184  }
3185  }
3186 
3187  if (add_into)
3188  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3189  for (unsigned int v = 0; v < 8; ++v)
3190  out[v][i] += in[i][v];
3191  else
3192  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3193  for (unsigned int v = 0; v < 8; ++v)
3194  out[v][i] = in[i][v];
3195 }
3196 
3197 # endif
3198 
3199 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__SSE2__)
3200 
3204 template <>
3205 class VectorizedArray<double, 2>
3206  : public VectorizedArrayBase<VectorizedArray<double, 2>, 2>
3207 {
3208 public:
3212  using value_type = double;
3213 
3219  DEAL_II_DEPRECATED static const unsigned int n_array_elements = 2;
3220 
3225  VectorizedArray() = default;
3226 
3230  VectorizedArray(const double scalar)
3231  {
3232  this->operator=(scalar);
3233  }
3234 
3239  VectorizedArray &
3240  operator=(const double x)
3241  {
3242  data = _mm_set1_pd(x);
3243  return *this;
3244  }
3245 
3250  double &operator[](const unsigned int comp)
3251  {
3252  AssertIndexRange(comp, 2);
3253  return *(reinterpret_cast<double *>(&data) + comp);
3254  }
3255 
3260  const double &operator[](const unsigned int comp) const
3261  {
3262  AssertIndexRange(comp, 2);
3263  return *(reinterpret_cast<const double *>(&data) + comp);
3264  }
3265 
3270  VectorizedArray &
3271  operator+=(const VectorizedArray &vec)
3272  {
3273 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3274  data += vec.data;
3275 # else
3276  data = _mm_add_pd(data, vec.data);
3277 # endif
3278  return *this;
3279  }
3280 
3285  VectorizedArray &
3286  operator-=(const VectorizedArray &vec)
3287  {
3288 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3289  data -= vec.data;
3290 # else
3291  data = _mm_sub_pd(data, vec.data);
3292 # endif
3293  return *this;
3294  }
3295 
3300  VectorizedArray &
3301  operator*=(const VectorizedArray &vec)
3302  {
3303 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3304  data *= vec.data;
3305 # else
3306  data = _mm_mul_pd(data, vec.data);
3307 # endif
3308  return *this;
3309  }
3310 
3315  VectorizedArray &
3316  operator/=(const VectorizedArray &vec)
3317  {
3318 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3319  data /= vec.data;
3320 # else
3321  data = _mm_div_pd(data, vec.data);
3322 # endif
3323  return *this;
3324  }
3325 
3332  void
3333  load(const double *ptr)
3334  {
3335  data = _mm_loadu_pd(ptr);
3336  }
3337 
3345  void
3346  store(double *ptr) const
3347  {
3348  _mm_storeu_pd(ptr, data);
3349  }
3350 
3355  void
3356  streaming_store(double *ptr) const
3357  {
3358  Assert(reinterpret_cast<std::size_t>(ptr) % 16 == 0,
3359  ExcMessage("Memory not aligned"));
3360  _mm_stream_pd(ptr, data);
3361  }
3362 
3376  void
3377  gather(const double *base_ptr, const unsigned int *offsets)
3378  {
3379  for (unsigned int i = 0; i < 2; ++i)
3380  *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
3381  }
3382 
3396  void
3397  scatter(const unsigned int *offsets, double *base_ptr) const
3398  {
3399  for (unsigned int i = 0; i < 2; ++i)
3400  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
3401  }
3402 
3408  __m128d data;
3409 
3410 private:
3417  get_sqrt() const
3418  {
3419  VectorizedArray res;
3420  res.data = _mm_sqrt_pd(data);
3421  return res;
3422  }
3423 
3430  get_abs() const
3431  {
3432  // to compute the absolute value, perform
3433  // bitwise andnot with -0. This will leave all
3434  // value and exponent bits unchanged but force
3435  // the sign value to +.
3436  __m128d mask = _mm_set1_pd(-0.);
3437  VectorizedArray res;
3438  res.data = _mm_andnot_pd(mask, data);
3439  return res;
3440  }
3441 
3448  get_max(const VectorizedArray &other) const
3449  {
3450  VectorizedArray res;
3451  res.data = _mm_max_pd(data, other.data);
3452  return res;
3453  }
3454 
3461  get_min(const VectorizedArray &other) const
3462  {
3463  VectorizedArray res;
3464  res.data = _mm_min_pd(data, other.data);
3465  return res;
3466  }
3467 
3468  // Make a few functions friends.
3469  template <typename Number2, std::size_t width2>
3472  template <typename Number2, std::size_t width2>
3475  template <typename Number2, std::size_t width2>
3479  template <typename Number2, std::size_t width2>
3483 };
3484 
3485 
3486 
3490 template <>
3491 inline DEAL_II_ALWAYS_INLINE void
3492 vectorized_load_and_transpose(const unsigned int n_entries,
3493  const double * in,
3494  const unsigned int * offsets,
3496 {
3497  const unsigned int n_chunks = n_entries / 2;
3498  for (unsigned int i = 0; i < n_chunks; ++i)
3499  {
3500  __m128d u0 = _mm_loadu_pd(in + 2 * i + offsets[0]);
3501  __m128d u1 = _mm_loadu_pd(in + 2 * i + offsets[1]);
3502  out[2 * i + 0].data = _mm_unpacklo_pd(u0, u1);
3503  out[2 * i + 1].data = _mm_unpackhi_pd(u0, u1);
3504  }
3505 
3506  // remainder loop of work that does not divide by 2
3507  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3508  for (unsigned int v = 0; v < 2; ++v)
3509  out[i][v] = in[offsets[v] + i];
3510 }
3511 
3512 
3513 
3517 template <>
3518 inline DEAL_II_ALWAYS_INLINE void
3519 vectorized_load_and_transpose(const unsigned int n_entries,
3520  const std::array<double *, 2> &in,
3522 {
3523  // see the comments in the vectorized_load_and_transpose above
3524 
3525  const unsigned int n_chunks = n_entries / 2;
3526  for (unsigned int i = 0; i < n_chunks; ++i)
3527  {
3528  __m128d u0 = _mm_loadu_pd(in[0] + 2 * i);
3529  __m128d u1 = _mm_loadu_pd(in[1] + 2 * i);
3530  out[2 * i + 0].data = _mm_unpacklo_pd(u0, u1);
3531  out[2 * i + 1].data = _mm_unpackhi_pd(u0, u1);
3532  }
3533 
3534  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3535  for (unsigned int v = 0; v < 2; ++v)
3536  out[i][v] = in[v][i];
3537 }
3538 
3539 
3540 
3544 template <>
3545 inline DEAL_II_ALWAYS_INLINE void
3546 vectorized_transpose_and_store(const bool add_into,
3547  const unsigned int n_entries,
3548  const VectorizedArray<double, 2> *in,
3549  const unsigned int * offsets,
3550  double * out)
3551 {
3552  const unsigned int n_chunks = n_entries / 2;
3553  if (add_into)
3554  {
3555  for (unsigned int i = 0; i < n_chunks; ++i)
3556  {
3557  __m128d u0 = in[2 * i + 0].data;
3558  __m128d u1 = in[2 * i + 1].data;
3559  __m128d res0 = _mm_unpacklo_pd(u0, u1);
3560  __m128d res1 = _mm_unpackhi_pd(u0, u1);
3561  _mm_storeu_pd(out + 2 * i + offsets[0],
3562  _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[0]),
3563  res0));
3564  _mm_storeu_pd(out + 2 * i + offsets[1],
3565  _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[1]),
3566  res1));
3567  }
3568  // remainder loop of work that does not divide by 2
3569  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3570  for (unsigned int v = 0; v < 2; ++v)
3571  out[offsets[v] + i] += in[i][v];
3572  }
3573  else
3574  {
3575  for (unsigned int i = 0; i < n_chunks; ++i)
3576  {
3577  __m128d u0 = in[2 * i + 0].data;
3578  __m128d u1 = in[2 * i + 1].data;
3579  __m128d res0 = _mm_unpacklo_pd(u0, u1);
3580  __m128d res1 = _mm_unpackhi_pd(u0, u1);
3581  _mm_storeu_pd(out + 2 * i + offsets[0], res0);
3582  _mm_storeu_pd(out + 2 * i + offsets[1], res1);
3583  }
3584  // remainder loop of work that does not divide by 2
3585  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3586  for (unsigned int v = 0; v < 2; ++v)
3587  out[offsets[v] + i] = in[i][v];
3588  }
3589 }
3590 
3591 
3592 
3596 template <>
3597 inline DEAL_II_ALWAYS_INLINE void
3598 vectorized_transpose_and_store(const bool add_into,
3599  const unsigned int n_entries,
3600  const VectorizedArray<double, 2> *in,
3601  std::array<double *, 2> & out)
3602 {
3603  // see the comments in the vectorized_transpose_and_store above
3604 
3605  const unsigned int n_chunks = n_entries / 2;
3606  if (add_into)
3607  {
3608  for (unsigned int i = 0; i < n_chunks; ++i)
3609  {
3610  __m128d u0 = in[2 * i + 0].data;
3611  __m128d u1 = in[2 * i + 1].data;
3612  __m128d res0 = _mm_unpacklo_pd(u0, u1);
3613  __m128d res1 = _mm_unpackhi_pd(u0, u1);
3614  _mm_storeu_pd(out[0] + 2 * i,
3615  _mm_add_pd(_mm_loadu_pd(out[0] + 2 * i), res0));
3616  _mm_storeu_pd(out[1] + 2 * i,
3617  _mm_add_pd(_mm_loadu_pd(out[1] + 2 * i), res1));
3618  }
3619 
3620  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3621  for (unsigned int v = 0; v < 2; ++v)
3622  out[v][i] += in[i][v];
3623  }
3624  else
3625  {
3626  for (unsigned int i = 0; i < n_chunks; ++i)
3627  {
3628  __m128d u0 = in[2 * i + 0].data;
3629  __m128d u1 = in[2 * i + 1].data;
3630  __m128d res0 = _mm_unpacklo_pd(u0, u1);
3631  __m128d res1 = _mm_unpackhi_pd(u0, u1);
3632  _mm_storeu_pd(out[0] + 2 * i, res0);
3633  _mm_storeu_pd(out[1] + 2 * i, res1);
3634  }
3635 
3636  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3637  for (unsigned int v = 0; v < 2; ++v)
3638  out[v][i] = in[i][v];
3639  }
3640 }
3641 
3642 
3643 
3647 template <>
3648 class VectorizedArray<float, 4>
3649  : public VectorizedArrayBase<VectorizedArray<float, 4>, 4>
3650 {
3651 public:
3655  using value_type = float;
3656 
3662  DEAL_II_DEPRECATED static const unsigned int n_array_elements = 4;
3663 
3672  VectorizedArray() = default;
3673 
3677  VectorizedArray(const float scalar)
3678  {
3679  this->operator=(scalar);
3680  }
3681 
3683  VectorizedArray &
3684  operator=(const float x)
3685  {
3686  data = _mm_set1_ps(x);
3687  return *this;
3688  }
3689 
3694  float &operator[](const unsigned int comp)
3695  {
3696  AssertIndexRange(comp, 4);
3697  return *(reinterpret_cast<float *>(&data) + comp);
3698  }
3699 
3704  const float &operator[](const unsigned int comp) const
3705  {
3706  AssertIndexRange(comp, 4);
3707  return *(reinterpret_cast<const float *>(&data) + comp);
3708  }
3709 
3714  VectorizedArray &
3715  operator+=(const VectorizedArray &vec)
3716  {
3717 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3718  data += vec.data;
3719 # else
3720  data = _mm_add_ps(data, vec.data);
3721 # endif
3722  return *this;
3723  }
3724 
3729  VectorizedArray &
3730  operator-=(const VectorizedArray &vec)
3731  {
3732 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3733  data -= vec.data;
3734 # else
3735  data = _mm_sub_ps(data, vec.data);
3736 # endif
3737  return *this;
3738  }
3739 
3744  VectorizedArray &
3745  operator*=(const VectorizedArray &vec)
3746  {
3747 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3748  data *= vec.data;
3749 # else
3750  data = _mm_mul_ps(data, vec.data);
3751 # endif
3752  return *this;
3753  }
3754 
3759  VectorizedArray &
3760  operator/=(const VectorizedArray &vec)
3761  {
3762 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3763  data /= vec.data;
3764 # else
3765  data = _mm_div_ps(data, vec.data);
3766 # endif
3767  return *this;
3768  }
3769 
3776  void
3777  load(const float *ptr)
3778  {
3779  data = _mm_loadu_ps(ptr);
3780  }
3781 
3789  void
3790  store(float *ptr) const
3791  {
3792  _mm_storeu_ps(ptr, data);
3793  }
3794 
3799  void
3800  streaming_store(float *ptr) const
3801  {
3802  Assert(reinterpret_cast<std::size_t>(ptr) % 16 == 0,
3803  ExcMessage("Memory not aligned"));
3804  _mm_stream_ps(ptr, data);
3805  }
3806 
3820  void
3821  gather(const float *base_ptr, const unsigned int *offsets)
3822  {
3823  for (unsigned int i = 0; i < 4; ++i)
3824  *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
3825  }
3826 
3840  void
3841  scatter(const unsigned int *offsets, float *base_ptr) const
3842  {
3843  for (unsigned int i = 0; i < 4; ++i)
3844  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
3845  }
3846 
3852  __m128 data;
3853 
3854 private:
3861  get_sqrt() const
3862  {
3863  VectorizedArray res;
3864  res.data = _mm_sqrt_ps(data);
3865  return res;
3866  }
3867 
3874  get_abs() const
3875  {
3876  // to compute the absolute value, perform bitwise andnot with -0. This
3877  // will leave all value and exponent bits unchanged but force the sign
3878  // value to +.
3879  __m128 mask = _mm_set1_ps(-0.f);
3880  VectorizedArray res;
3881  res.data = _mm_andnot_ps(mask, data);
3882  return res;
3883  }
3884 
3891  get_max(const VectorizedArray &other) const
3892  {
3893  VectorizedArray res;
3894  res.data = _mm_max_ps(data, other.data);
3895  return res;
3896  }
3897 
3904  get_min(const VectorizedArray &other) const
3905  {
3906  VectorizedArray res;
3907  res.data = _mm_min_ps(data, other.data);
3908  return res;
3909  }
3910 
3911  // Make a few functions friends.
3912  template <typename Number2, std::size_t width2>
3915  template <typename Number2, std::size_t width2>
3918  template <typename Number2, std::size_t width2>
3922  template <typename Number2, std::size_t width2>
3926 };
3927 
3928 
3929 
3933 template <>
3934 inline DEAL_II_ALWAYS_INLINE void
3935 vectorized_load_and_transpose(const unsigned int n_entries,
3936  const float * in,
3937  const unsigned int * offsets,
3939 {
3940  const unsigned int n_chunks = n_entries / 4;
3941  for (unsigned int i = 0; i < n_chunks; ++i)
3942  {
3943  __m128 u0 = _mm_loadu_ps(in + 4 * i + offsets[0]);
3944  __m128 u1 = _mm_loadu_ps(in + 4 * i + offsets[1]);
3945  __m128 u2 = _mm_loadu_ps(in + 4 * i + offsets[2]);
3946  __m128 u3 = _mm_loadu_ps(in + 4 * i + offsets[3]);
3947  __m128 v0 = _mm_shuffle_ps(u0, u1, 0x44);
3948  __m128 v1 = _mm_shuffle_ps(u0, u1, 0xee);
3949  __m128 v2 = _mm_shuffle_ps(u2, u3, 0x44);
3950  __m128 v3 = _mm_shuffle_ps(u2, u3, 0xee);
3951  out[4 * i + 0].data = _mm_shuffle_ps(v0, v2, 0x88);
3952  out[4 * i + 1].data = _mm_shuffle_ps(v0, v2, 0xdd);
3953  out[4 * i + 2].data = _mm_shuffle_ps(v1, v3, 0x88);
3954  out[4 * i + 3].data = _mm_shuffle_ps(v1, v3, 0xdd);
3955  }
3956 
3957  // remainder loop of work that does not divide by 4
3958  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3959  for (unsigned int v = 0; v < 4; ++v)
3960  out[i][v] = in[offsets[v] + i];
3961 }
3962 
3963 
3964 
3968 template <>
3969 inline DEAL_II_ALWAYS_INLINE void
3970 vectorized_load_and_transpose(const unsigned int n_entries,
3971  const std::array<float *, 4> &in,
3973 {
3974  // see the comments in the vectorized_load_and_transpose above
3975 
3976  const unsigned int n_chunks = n_entries / 4;
3977  for (unsigned int i = 0; i < n_chunks; ++i)
3978  {
3979  __m128 u0 = _mm_loadu_ps(in[0] + 4 * i);
3980  __m128 u1 = _mm_loadu_ps(in[1] + 4 * i);
3981  __m128 u2 = _mm_loadu_ps(in[2] + 4 * i);
3982  __m128 u3 = _mm_loadu_ps(in[3] + 4 * i);
3983  __m128 v0 = _mm_shuffle_ps(u0, u1, 0x44);
3984  __m128 v1 = _mm_shuffle_ps(u0, u1, 0xee);
3985  __m128 v2 = _mm_shuffle_ps(u2, u3, 0x44);
3986  __m128 v3 = _mm_shuffle_ps(u2, u3, 0xee);
3987  out[4 * i + 0].data = _mm_shuffle_ps(v0, v2, 0x88);
3988  out[4 * i + 1].data = _mm_shuffle_ps(v0, v2, 0xdd);
3989  out[4 * i + 2].data = _mm_shuffle_ps(v1, v3, 0x88);
3990  out[4 * i + 3].data = _mm_shuffle_ps(v1, v3, 0xdd);
3991  }
3992 
3993  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3994  for (unsigned int v = 0; v < 4; ++v)
3995  out[i][v] = in[v][i];
3996 }
3997 
3998 
3999 
4003 template <>
4004 inline DEAL_II_ALWAYS_INLINE void
4005 vectorized_transpose_and_store(const bool add_into,
4006  const unsigned int n_entries,
4007  const VectorizedArray<float, 4> *in,
4008  const unsigned int * offsets,
4009  float * out)
4010 {
4011  const unsigned int n_chunks = n_entries / 4;
4012  for (unsigned int i = 0; i < n_chunks; ++i)
4013  {
4014  __m128 u0 = in[4 * i + 0].data;
4015  __m128 u1 = in[4 * i + 1].data;
4016  __m128 u2 = in[4 * i + 2].data;
4017  __m128 u3 = in[4 * i + 3].data;
4018  __m128 t0 = _mm_shuffle_ps(u0, u1, 0x44);
4019  __m128 t1 = _mm_shuffle_ps(u0, u1, 0xee);
4020  __m128 t2 = _mm_shuffle_ps(u2, u3, 0x44);
4021  __m128 t3 = _mm_shuffle_ps(u2, u3, 0xee);
4022  u0 = _mm_shuffle_ps(t0, t2, 0x88);
4023  u1 = _mm_shuffle_ps(t0, t2, 0xdd);
4024  u2 = _mm_shuffle_ps(t1, t3, 0x88);
4025  u3 = _mm_shuffle_ps(t1, t3, 0xdd);
4026 
4027  // Cannot use the same store instructions in both paths of the 'if'
4028  // because the compiler cannot know that there is no aliasing between
4029  // pointers
4030  if (add_into)
4031  {
4032  u0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), u0);
4033  _mm_storeu_ps(out + 4 * i + offsets[0], u0);
4034  u1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), u1);
4035  _mm_storeu_ps(out + 4 * i + offsets[1], u1);
4036  u2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), u2);
4037  _mm_storeu_ps(out + 4 * i + offsets[2], u2);
4038  u3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), u3);
4039  _mm_storeu_ps(out + 4 * i + offsets[3], u3);
4040  }
4041  else
4042  {
4043  _mm_storeu_ps(out + 4 * i + offsets[0], u0);
4044  _mm_storeu_ps(out + 4 * i + offsets[1], u1);
4045  _mm_storeu_ps(out + 4 * i + offsets[2], u2);
4046  _mm_storeu_ps(out + 4 * i + offsets[3], u3);
4047  }
4048  }
4049 
4050  // remainder loop of work that does not divide by 4
4051  if (add_into)
4052  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4053  for (unsigned int v = 0; v < 4; ++v)
4054  out[offsets[v] + i] += in[i][v];
4055  else
4056  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4057  for (unsigned int v = 0; v < 4; ++v)
4058  out[offsets[v] + i] = in[i][v];
4059 }
4060 
4061 
4062 
4066 template <>
4067 inline DEAL_II_ALWAYS_INLINE void
4068 vectorized_transpose_and_store(const bool add_into,
4069  const unsigned int n_entries,
4070  const VectorizedArray<float, 4> *in,
4071  std::array<float *, 4> & out)
4072 {
4073  // see the comments in the vectorized_transpose_and_store above
4074 
4075  const unsigned int n_chunks = n_entries / 4;
4076  for (unsigned int i = 0; i < n_chunks; ++i)
4077  {
4078  __m128 u0 = in[4 * i + 0].data;
4079  __m128 u1 = in[4 * i + 1].data;
4080  __m128 u2 = in[4 * i + 2].data;
4081  __m128 u3 = in[4 * i + 3].data;
4082  __m128 t0 = _mm_shuffle_ps(u0, u1, 0x44);
4083  __m128 t1 = _mm_shuffle_ps(u0, u1, 0xee);
4084  __m128 t2 = _mm_shuffle_ps(u2, u3, 0x44);
4085  __m128 t3 = _mm_shuffle_ps(u2, u3, 0xee);
4086  u0 = _mm_shuffle_ps(t0, t2, 0x88);
4087  u1 = _mm_shuffle_ps(t0, t2, 0xdd);
4088  u2 = _mm_shuffle_ps(t1, t3, 0x88);
4089  u3 = _mm_shuffle_ps(t1, t3, 0xdd);
4090 
4091  if (add_into)
4092  {
4093  u0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), u0);
4094  _mm_storeu_ps(out[0] + 4 * i, u0);
4095  u1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), u1);
4096  _mm_storeu_ps(out[1] + 4 * i, u1);
4097  u2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), u2);
4098  _mm_storeu_ps(out[2] + 4 * i, u2);
4099  u3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), u3);
4100  _mm_storeu_ps(out[3] + 4 * i, u3);
4101  }
4102  else
4103  {
4104  _mm_storeu_ps(out[0] + 4 * i, u0);
4105  _mm_storeu_ps(out[1] + 4 * i, u1);
4106  _mm_storeu_ps(out[2] + 4 * i, u2);
4107  _mm_storeu_ps(out[3] + 4 * i, u3);
4108  }
4109  }
4110 
4111  if (add_into)
4112  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4113  for (unsigned int v = 0; v < 4; ++v)
4114  out[v][i] += in[i][v];
4115  else
4116  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4117  for (unsigned int v = 0; v < 4; ++v)
4118  out[v][i] = in[i][v];
4119 }
4120 
4121 
4122 
4123 # endif // if DEAL_II_VECTORIZATION_WIDTH_IN_BITS > 0 && defined(__SSE2__)
4124 
4125 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__ALTIVEC__) && \
4126  defined(__VSX__)
4127 
4128 template <>
4129 class VectorizedArray<double, 2>
4130  : public VectorizedArrayBase<VectorizedArray<double, 2>, 2>
4131 {
4132 public:
4136  using value_type = double;
4137 
4143  DEAL_II_DEPRECATED static const unsigned int n_array_elements = 2;
4144 
4149  VectorizedArray() = default;
4150 
4154  VectorizedArray(const double scalar)
4155  {
4156  this->operator=(scalar);
4157  }
4158 
4163  VectorizedArray &
4164  operator=(const double x)
4165  {
4166  data = vec_splats(x);
4167 
4168  // Some compilers believe that vec_splats sets 'x', but that's not true.
4169  // They then warn about setting a variable and not using it. Suppress the
4170  // warning by "using" the variable:
4171  (void)x;
4172  return *this;
4173  }
4174 
4179  double &operator[](const unsigned int comp)
4180  {
4181  AssertIndexRange(comp, 2);
4182  return *(reinterpret_cast<double *>(&data) + comp);
4183  }
4184 
4189  const double &operator[](const unsigned int comp) const
4190  {
4191  AssertIndexRange(comp, 2);
4192  return *(reinterpret_cast<const double *>(&data) + comp);
4193  }
4194 
4199  VectorizedArray &
4200  operator+=(const VectorizedArray &vec)
4201  {
4202  data = vec_add(data, vec.data);
4203  return *this;
4204  }
4205 
4210  VectorizedArray &
4211  operator-=(const VectorizedArray &vec)
4212  {
4213  data = vec_sub(data, vec.data);
4214  return *this;
4215  }
4216 
4221  VectorizedArray &
4222  operator*=(const VectorizedArray &vec)
4223  {
4224  data = vec_mul(data, vec.data);
4225  return *this;
4226  }
4227 
4232  VectorizedArray &
4233  operator/=(const VectorizedArray &vec)
4234  {
4235  data = vec_div(data, vec.data);
4236  return *this;
4237  }
4238 
4244  void
4245  load(const double *ptr)
4246  {
4247  data = vec_vsx_ld(0, ptr);
4248  }
4249 
4255  void
4256  store(double *ptr) const
4257  {
4258  vec_vsx_st(data, 0, ptr);
4259  }
4260 
4264  void
4265  streaming_store(double *ptr) const
4266  {
4267  store(ptr);
4268  }
4269 
4273  void
4274  gather(const double *base_ptr, const unsigned int *offsets)
4275  {
4276  for (unsigned int i = 0; i < 2; ++i)
4277  *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
4278  }
4279 
4283  void
4284  scatter(const unsigned int *offsets, double *base_ptr) const
4285  {
4286  for (unsigned int i = 0; i < 2; ++i)
4287  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
4288  }
4289 
4295  __vector double data;
4296 
4297 private:
4304  get_sqrt() const
4305  {
4306  VectorizedArray res;
4307  res.data = vec_sqrt(data);
4308  return res;
4309  }
4310 
4317  get_abs() const
4318  {
4319  VectorizedArray res;
4320  res.data = vec_abs(data);
4321  return res;
4322  }
4323 
4330  get_max(const VectorizedArray &other) const
4331  {
4332  VectorizedArray res;
4333  res.data = vec_max(data, other.data);
4334  return res;
4335  }
4336 
4343  get_min(const VectorizedArray &other) const
4344  {
4345  VectorizedArray res;
4346  res.data = vec_min(data, other.data);
4347  return res;
4348  }
4349 
4350  // Make a few functions friends.
4351  template <typename Number2, std::size_t width2>
4354  template <typename Number2, std::size_t width2>
4357  template <typename Number2, std::size_t width2>
4361  template <typename Number2, std::size_t width2>
4365 };
4366 
4367 
4368 
4369 template <>
4370 class VectorizedArray<float, 4>
4371  : public VectorizedArrayBase<VectorizedArray<float, 4>, 4>
4372 {
4373 public:
4377  using value_type = float;
4378 
4384  DEAL_II_DEPRECATED static const unsigned int n_array_elements = 4;
4385 
4390  VectorizedArray() = default;
4391 
4395  VectorizedArray(const float scalar)
4396  {
4397  this->operator=(scalar);
4398  }
4399 
4404  VectorizedArray &
4405  operator=(const float x)
4406  {
4407  data = vec_splats(x);
4408 
4409  // Some compilers believe that vec_splats sets 'x', but that's not true.
4410  // They then warn about setting a variable and not using it. Suppress the
4411  // warning by "using" the variable:
4412  (void)x;
4413  return *this;
4414  }
4415 
4420  float &operator[](const unsigned int comp)
4421  {
4422  AssertIndexRange(comp, 4);
4423  return *(reinterpret_cast<float *>(&data) + comp);
4424  }
4425 
4430  const float &operator[](const unsigned int comp) const
4431  {
4432  AssertIndexRange(comp, 4);
4433  return *(reinterpret_cast<const float *>(&data) + comp);
4434  }
4435 
4440  VectorizedArray &
4441  operator+=(const VectorizedArray &vec)
4442  {
4443  data = vec_add(data, vec.data);
4444  return *this;
4445  }
4446 
4451  VectorizedArray &
4452  operator-=(const VectorizedArray &vec)
4453  {
4454  data = vec_sub(data, vec.data);
4455  return *this;
4456  }
4457 
4462  VectorizedArray &
4463  operator*=(const VectorizedArray &vec)
4464  {
4465  data = vec_mul(data, vec.data);
4466  return *this;
4467  }
4468 
4473  VectorizedArray &
4474  operator/=(const VectorizedArray &vec)
4475  {
4476  data = vec_div(data, vec.data);
4477  return *this;
4478  }
4479 
4485  void
4486  load(const float *ptr)
4487  {
4488  data = vec_vsx_ld(0, ptr);
4489  }
4490 
4496  void
4497  store(float *ptr) const
4498  {
4499  vec_vsx_st(data, 0, ptr);
4500  }
4501 
4505  void
4506  streaming_store(float *ptr) const
4507  {
4508  store(ptr);
4509  }
4510 
4514  void
4515  gather(const float *base_ptr, const unsigned int *offsets)
4516  {
4517  for (unsigned int i = 0; i < 4; ++i)
4518  *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
4519  }
4520 
4524  void
4525  scatter(const unsigned int *offsets, float *base_ptr) const
4526  {
4527  for (unsigned int i = 0; i < 4; ++i)
4528  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
4529  }
4530 
4536  __vector float data;
4537 
4538 private:
4545  get_sqrt() const
4546  {
4547  VectorizedArray res;
4548  res.data = vec_sqrt(data);
4549  return res;
4550  }
4551 
4558  get_abs() const
4559  {
4560  VectorizedArray res;
4561  res.data = vec_abs(data);
4562  return res;
4563  }
4564 
4571  get_max(const VectorizedArray &other) const
4572  {
4573  VectorizedArray res;
4574  res.data = vec_max(data, other.data);
4575  return res;
4576  }
4577 
4584  get_min(const VectorizedArray &other) const
4585  {
4586  VectorizedArray res;
4587  res.data = vec_min(data, other.data);
4588  return res;
4589  }
4590 
4591  // Make a few functions friends.
4592  template <typename Number2, std::size_t width2>
4595  template <typename Number2, std::size_t width2>
4598  template <typename Number2, std::size_t width2>
4602  template <typename Number2, std::size_t width2>
4606 };
4607 
4608 # endif // if DEAL_II_VECTORIZATION_LEVEL >=1 && defined(__ALTIVEC__) &&
4609  // defined(__VSX__)
4610 
4611 
4612 #endif // DOXYGEN
4613 
4618 
4624 template <typename Number, std::size_t width>
4625 inline DEAL_II_ALWAYS_INLINE bool
4627  const VectorizedArray<Number, width> &rhs)
4628 {
4629  for (unsigned int i = 0; i < VectorizedArray<Number, width>::size(); ++i)
4630  if (lhs[i] != rhs[i])
4631  return false;
4632 
4633  return true;
4634 }
4635 
4636 
4642 template <typename Number, std::size_t width>
4646 {
4648  return tmp += v;
4649 }
4650 
4656 template <typename Number, std::size_t width>
4660 {
4662  return tmp -= v;
4663 }
4664 
4670 template <typename Number, std::size_t width>
4674 {
4676  return tmp *= v;
4677 }
4678 
4684 template <typename Number, std::size_t width>
4688 {
4690  return tmp /= v;
4691 }
4692 
4699 template <typename Number, std::size_t width>
4701  operator+(const Number &u, const VectorizedArray<Number, width> &v)
4702 {
4704  return tmp += v;
4705 }
4706 
4715 template <std::size_t width>
4717  operator+(const double u, const VectorizedArray<float, width> &v)
4718 {
4720  return tmp += v;
4721 }
4722 
4729 template <typename Number, std::size_t width>
4731  operator+(const VectorizedArray<Number, width> &v, const Number &u)
4732 {
4733  return u + v;
4734 }
4735 
4744 template <std::size_t width>
4746  operator+(const VectorizedArray<float, width> &v, const double u)
4747 {
4748  return u + v;
4749 }
4750 
4757 template <typename Number, std::size_t width>
4759  operator-(const Number &u, const VectorizedArray<Number, width> &v)
4760 {
4762  return tmp -= v;
4763 }
4764 
4773 template <std::size_t width>
4775  operator-(const double u, const VectorizedArray<float, width> &v)
4776 {
4777  VectorizedArray<float, width> tmp = static_cast<float>(u);
4778  return tmp -= v;
4779 }
4780 
4787 template <typename Number, std::size_t width>
4789  operator-(const VectorizedArray<Number, width> &v, const Number &u)
4790 {
4792  return v - tmp;
4793 }
4794 
4803 template <std::size_t width>
4805  operator-(const VectorizedArray<float, width> &v, const double u)
4806 {
4807  VectorizedArray<float, width> tmp = static_cast<float>(u);
4808  return v - tmp;
4809 }
4810 
4817 template <typename Number, std::size_t width>
4819  operator*(const Number &u, const VectorizedArray<Number, width> &v)
4820 {
4822  return tmp *= v;
4823 }
4824 
4833 template <std::size_t width>
4835  operator*(const double u, const VectorizedArray<float, width> &v)
4836 {
4837  VectorizedArray<float, width> tmp = static_cast<float>(u);
4838  return tmp *= v;
4839 }
4840 
4847 template <typename Number, std::size_t width>
4849  operator*(const VectorizedArray<Number, width> &v, const Number &u)
4850 {
4851  return u * v;
4852 }
4853 
4862 template <std::size_t width>
4864  operator*(const VectorizedArray<float, width> &v, const double u)
4865 {
4866  return u * v;
4867 }
4868 
4875 template <typename Number, std::size_t width>
4877  operator/(const Number &u, const VectorizedArray<Number, width> &v)
4878 {
4880  return tmp /= v;
4881 }
4882 
4891 template <std::size_t width>
4893  operator/(const double u, const VectorizedArray<float, width> &v)
4894 {
4895  VectorizedArray<float, width> tmp = static_cast<float>(u);
4896  return tmp /= v;
4897 }
4898 
4905 template <typename Number, std::size_t width>
4907  operator/(const VectorizedArray<Number, width> &v, const Number &u)
4908 {
4910  return v / tmp;
4911 }
4912 
4921 template <std::size_t width>
4923  operator/(const VectorizedArray<float, width> &v, const double u)
4924 {
4925  VectorizedArray<float, width> tmp = static_cast<float>(u);
4926  return v / tmp;
4927 }
4928 
4934 template <typename Number, std::size_t width>
4937 {
4938  return u;
4939 }
4940 
4946 template <typename Number, std::size_t width>
4949 {
4950  // to get a negative sign, subtract the input from zero (could also
4951  // multiply by -1, but this one is slightly simpler)
4952  return VectorizedArray<Number, width>() - u;
4953 }
4954 
4960 template <typename Number, std::size_t width>
4961 inline std::ostream &
4962 operator<<(std::ostream &out, const VectorizedArray<Number, width> &p)
4963 {
4964  constexpr unsigned int n = VectorizedArray<Number, width>::size();
4965  for (unsigned int i = 0; i < n - 1; ++i)
4966  out << p[i] << ' ';
4967  out << p[n - 1];
4968 
4969  return out;
4970 }
4971 
4973 
4978 
4979 
4987 enum class SIMDComparison : int
4988 {
4989 #if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
4990  equal = _CMP_EQ_OQ,
4991  not_equal = _CMP_NEQ_OQ,
4992  less_than = _CMP_LT_OQ,
4993  less_than_or_equal = _CMP_LE_OQ,
4994  greater_than = _CMP_GT_OQ,
4995  greater_than_or_equal = _CMP_GE_OQ
4996 #else
4997  equal,
4998  not_equal,
4999  less_than,
5001  greater_than,
5003 #endif
5004 };
5005 
5006 
5070 template <SIMDComparison predicate, typename Number>
5071 DEAL_II_ALWAYS_INLINE inline Number
5072 compare_and_apply_mask(const Number &left,
5073  const Number &right,
5074  const Number &true_value,
5075  const Number &false_value)
5076 {
5077  bool mask;
5078  switch (predicate)
5079  {
5080  case SIMDComparison::equal:
5081  mask = (left == right);
5082  break;
5084  mask = (left != right);
5085  break;
5087  mask = (left < right);
5088  break;
5090  mask = (left <= right);
5091  break;
5093  mask = (left > right);
5094  break;
5096  mask = (left >= right);
5097  break;
5098  }
5099 
5100  return mask ? true_value : false_value;
5101 }
5102 
5103 
5108 template <SIMDComparison predicate, typename Number>
5111  const VectorizedArray<Number, 1> &right,
5112  const VectorizedArray<Number, 1> &true_value,
5113  const VectorizedArray<Number, 1> &false_value)
5114 {
5116  result.data = compare_and_apply_mask<predicate, Number>(left.data,
5117  right.data,
5118  true_value.data,
5119  false_value.data);
5120  return result;
5121 }
5122 
5124 
5125 #ifndef DOXYGEN
5126 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && defined(__AVX512F__)
5127 
5128 template <SIMDComparison predicate>
5131  const VectorizedArray<float, 16> &right,
5132  const VectorizedArray<float, 16> &true_values,
5133  const VectorizedArray<float, 16> &false_values)
5134 {
5135  const __mmask16 mask =
5136  _mm512_cmp_ps_mask(left.data, right.data, static_cast<int>(predicate));
5138  result.data = _mm512_mask_mov_ps(false_values.data, mask, true_values.data);
5139  return result;
5140 }
5141 
5142 
5143 
5144 template <SIMDComparison predicate>
5147  const VectorizedArray<double, 8> &right,
5148  const VectorizedArray<double, 8> &true_values,
5149  const VectorizedArray<double, 8> &false_values)
5150 {
5151  const __mmask16 mask =
5152  _mm512_cmp_pd_mask(left.data, right.data, static_cast<int>(predicate));
5154  result.data = _mm512_mask_mov_pd(false_values.data, mask, true_values.data);
5155  return result;
5156 }
5157 
5158 # endif
5159 
5160 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
5161 
5162 template <SIMDComparison predicate>
5165  const VectorizedArray<float, 8> &right,
5166  const VectorizedArray<float, 8> &true_values,
5167  const VectorizedArray<float, 8> &false_values)
5168 {
5169  const auto mask =
5170  _mm256_cmp_ps(left.data, right.data, static_cast<int>(predicate));
5171 
5173  result.data = _mm256_or_ps(_mm256_and_ps(mask, true_values.data),
5174  _mm256_andnot_ps(mask, false_values.data));
5175  return result;
5176 }
5177 
5178 
5179 template <SIMDComparison predicate>
5182  const VectorizedArray<double, 4> &right,
5183  const VectorizedArray<double, 4> &true_values,
5184  const VectorizedArray<double, 4> &false_values)
5185 {
5186  const auto mask =
5187  _mm256_cmp_pd(left.data, right.data, static_cast<int>(predicate));
5188 
5190  result.data = _mm256_or_pd(_mm256_and_pd(mask, true_values.data),
5191  _mm256_andnot_pd(mask, false_values.data));
5192  return result;
5193 }
5194 
5195 # endif
5196 
5197 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__SSE2__)
5198 
5199 template <SIMDComparison predicate>
5202  const VectorizedArray<float, 4> &right,
5203  const VectorizedArray<float, 4> &true_values,
5204  const VectorizedArray<float, 4> &false_values)
5205 {
5206  __m128 mask;
5207  switch (predicate)
5208  {
5209  case SIMDComparison::equal:
5210  mask = _mm_cmpeq_ps(left.data, right.data);
5211  break;
5213  mask = _mm_cmpneq_ps(left.data, right.data);
5214  break;
5216  mask = _mm_cmplt_ps(left.data, right.data);
5217  break;
5219  mask = _mm_cmple_ps(left.data, right.data);
5220  break;
5222  mask = _mm_cmpgt_ps(left.data, right.data);
5223  break;
5225  mask = _mm_cmpge_ps(left.data, right.data);
5226  break;
5227  }
5228 
5230  result.data = _mm_or_ps(_mm_and_ps(mask, true_values.data),
5231  _mm_andnot_ps(mask, false_values.data));
5232 
5233  return result;
5234 }
5235 
5236 
5237 template <SIMDComparison predicate>
5240  const VectorizedArray<double, 2> &right,
5241  const VectorizedArray<double, 2> &true_values,
5242  const VectorizedArray<double, 2> &false_values)
5243 {
5244  __m128d mask;
5245  switch (predicate)
5246  {
5247  case SIMDComparison::equal:
5248  mask = _mm_cmpeq_pd(left.data, right.data);
5249  break;
5251  mask = _mm_cmpneq_pd(left.data, right.data);
5252  break;
5254  mask = _mm_cmplt_pd(left.data, right.data);
5255  break;
5257  mask = _mm_cmple_pd(left.data, right.data);
5258  break;
5260  mask = _mm_cmpgt_pd(left.data, right.data);
5261  break;
5263  mask = _mm_cmpge_pd(left.data, right.data);
5264  break;
5265  }
5266 
5268  result.data = _mm_or_pd(_mm_and_pd(mask, true_values.data),
5269  _mm_andnot_pd(mask, false_values.data));
5270 
5271  return result;
5272 }
5273 
5274 # endif
5275 #endif // DOXYGEN
5276 
5277 
5279 
5286 namespace std
5287 {
5295  template <typename Number, std::size_t width>
5296  inline ::VectorizedArray<Number, width>
5297  sin(const ::VectorizedArray<Number, width> &x)
5298  {
5299  // put values in an array and later read in that array with an unaligned
5300  // read. This should save some instructions as compared to directly
5301  // setting the individual elements and also circumvents a compiler
5302  // optimization bug in gcc-4.6 with SSE2 (see also deal.II developers list
5303  // from April 2014, topic "matrix_free/step-48 Test").
5304  Number values[::VectorizedArray<Number, width>::size()];
5305  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5306  ++i)
5307  values[i] = std::sin(x[i]);
5309  out.load(&values[0]);
5310  return out;
5311  }
5312 
5313 
5314 
5322  template <typename Number, std::size_t width>
5323  inline ::VectorizedArray<Number, width>
5324  cos(const ::VectorizedArray<Number, width> &x)
5325  {
5326  Number values[::VectorizedArray<Number, width>::size()];
5327  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5328  ++i)
5329  values[i] = std::cos(x[i]);
5331  out.load(&values[0]);
5332  return out;
5333  }
5334 
5335 
5336 
5344  template <typename Number, std::size_t width>
5345  inline ::VectorizedArray<Number, width>
5346  tan(const ::VectorizedArray<Number, width> &x)
5347  {
5348  Number values[::VectorizedArray<Number, width>::size()];
5349  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5350  ++i)
5351  values[i] = std::tan(x[i]);
5353  out.load(&values[0]);
5354  return out;
5355  }
5356 
5357 
5358 
5366  template <typename Number, std::size_t width>
5367  inline ::VectorizedArray<Number, width>
5368  exp(const ::VectorizedArray<Number, width> &x)
5369  {
5370  Number values[::VectorizedArray<Number, width>::size()];
5371  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5372  ++i)
5373  values[i] = std::exp(x[i]);
5375  out.load(&values[0]);
5376  return out;
5377  }
5378 
5379 
5380 
5388  template <typename Number, std::size_t width>
5389  inline ::VectorizedArray<Number, width>
5390  log(const ::VectorizedArray<Number, width> &x)
5391  {
5392  Number values[::VectorizedArray<Number, width>::size()];
5393  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5394  ++i)
5395  values[i] = std::log(x[i]);
5397  out.load(&values[0]);
5398  return out;
5399  }
5400 
5401 
5402 
5410  template <typename Number, std::size_t width>
5411  inline ::VectorizedArray<Number, width>
5412  sqrt(const ::VectorizedArray<Number, width> &x)
5413  {
5414  return x.get_sqrt();
5415  }
5416 
5417 
5418 
5426  template <typename Number, std::size_t width>
5427  inline ::VectorizedArray<Number, width>
5428  pow(const ::VectorizedArray<Number, width> &x, const Number p)
5429  {
5430  Number values[::VectorizedArray<Number, width>::size()];
5431  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5432  ++i)
5433  values[i] = std::pow(x[i], p);
5435  out.load(&values[0]);
5436  return out;
5437  }
5438 
5439 
5440 
5448  template <typename Number, std::size_t width>
5449  inline ::VectorizedArray<Number, width>
5450  abs(const ::VectorizedArray<Number, width> &x)
5451  {
5452  return x.get_abs();
5453  }
5454 
5455 
5456 
5464  template <typename Number, std::size_t width>
5465  inline ::VectorizedArray<Number, width>
5466  max(const ::VectorizedArray<Number, width> &x,
5467  const ::VectorizedArray<Number, width> &y)
5468  {
5469  return x.get_max(y);
5470  }
5471 
5472 
5473 
5481  template <typename Number, std::size_t width>
5482  inline ::VectorizedArray<Number, width>
5483  min(const ::VectorizedArray<Number, width> &x,
5484  const ::VectorizedArray<Number, width> &y)
5485  {
5486  return x.get_min(y);
5487  }
5488 
5489 
5490 
5494  template <class T>
5495  struct iterator_traits<::VectorizedArrayIterator<T>>
5496  {
5497  using iterator_category = random_access_iterator_tag;
5498  using value_type = typename T::value_type;
5499  using difference_type = std::ptrdiff_t;
5500  };
5501 
5502 } // namespace std
5503 
5504 #endif
std::exp
inline ::VectorizedArray< Number, width > exp(const ::VectorizedArray< Number, width > &x)
Definition: vectorization.h:5368
std::tan
inline ::VectorizedArray< Number, width > tan(const ::VectorizedArray< Number, width > &x)
Definition: vectorization.h:5346
VectorizedArray::store
void store(Number *ptr) const
Definition: vectorization.h:530
VectorizedArray::get_abs
VectorizedArray get_abs() const
Definition: vectorization.h:653
VectorizedArrayIterator::operator=
VectorizedArrayIterator< T > & operator=(const VectorizedArrayIterator< T > &other)=default
std::max
inline ::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &x, const ::VectorizedArray< Number, width > &y)
Definition: vectorization.h:5466
VectorizedArray::gather
void gather(const Number *base_ptr, const unsigned int *offsets)
Definition: vectorization.h:602
VectorizedArrayIterator::operator*
std::enable_if<!std::is_same< U, const U >::value, typename T::value_type >::type & operator*()
Definition: vectorization.h:161
VectorizedArrayIterator::data
T * data
Definition: vectorization.h:232
std::log
::VectorizedArray< Number, width > log(const ::VectorizedArray< Number, width > &)
Definition: vectorization.h:5390
std::sin
::VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &)
Definition: vectorization.h:5297
VectorizedArray::vectorized_transpose_and_store
void vectorized_transpose_and_store(const bool add_into, const unsigned int n_entries, const VectorizedArray< Number, width > *in, const unsigned int *offsets, Number *out)
Definition: vectorization.h:882
std::pow
::VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &, const Number p)
Definition: vectorization.h:5428
SymmetricTensor::operator+
constexpr SymmetricTensor< rank_, dim, typename ProductType< Number, OtherNumber >::type > operator+(const SymmetricTensor< rank_, dim, Number > &left, const SymmetricTensor< rank_, dim, OtherNumber > &right)
Definition: symmetric_tensor.h:2525
VectorizedArrayBase::end
VectorizedArrayIterator< T > end()
Definition: vectorization.h:289
VectorizedArray::get_min
VectorizedArray get_min(const VectorizedArray &other) const
Definition: vectorization.h:679
VectorizedArray::operator-=
VectorizedArray & operator-=(const VectorizedArray &vec)
Definition: vectorization.h:481
internal::VectorizedArrayWidthSpecifier
Definition: numbers.h:62
VectorizedArrayIterator::operator+
VectorizedArrayIterator< T > operator+(const std::size_t &offset) const
Definition: vectorization.h:212
AssertIndexRange
#define AssertIndexRange(index, range)
Definition: exceptions.h:1649
VectorizedArray
Definition: vectorization.h:395
VectorizedArray::operator[]
const Number & operator[](const unsigned int comp) const
Definition: vectorization.h:458
VectorizedArray::operator=
VectorizedArray & operator=(const Number scalar)
Definition: vectorization.h:435
DEAL_II_ALWAYS_INLINE
#define DEAL_II_ALWAYS_INLINE
Definition: config.h:99
std::iterator_traits<::VectorizedArrayIterator< T > >::iterator_category
random_access_iterator_tag iterator_category
Definition: vectorization.h:5497
VectorizedArray::make_vectorized_array
VectorizedArray< Number, width > make_vectorized_array(const Number &u)
Definition: vectorization.h:727
VectorizedArray::data
Number data
Definition: vectorization.h:631
std::tan
::VectorizedArray< Number, width > tan(const ::VectorizedArray< Number, width > &)
Definition: vectorization.h:5346
std::cos
inline ::VectorizedArray< Number, width > cos(const ::VectorizedArray< Number, width > &x)
Definition: vectorization.h:5324
SIMDComparison::greater_than_or_equal
@ greater_than_or_equal
VectorizedArrayIterator::operator==
bool operator==(const VectorizedArrayIterator< T > &other) const
Definition: vectorization.h:117
VectorizedArrayBase::end
VectorizedArrayIterator< const T > end() const
Definition: vectorization.h:299
VectorizedArray::vectorized_load_and_transpose
void vectorized_load_and_transpose(const unsigned int n_entries, const Number *in, const unsigned int *offsets, VectorizedArray< Number, width > *out)
Definition: vectorization.h:807
VectorizedArrayIterator
Definition: vectorization.h:99
VectorizedArrayIterator::operator-
std::ptrdiff_t operator-(const VectorizedArrayIterator< T > &other) const
Definition: vectorization.h:222
Differentiation::SD::fabs
Expression fabs(const Expression &x)
Definition: symengine_math.cc:273
VectorizedArray::get_sqrt
VectorizedArray get_sqrt() const
Definition: vectorization.h:640
VectorizedArray::operator[]
Number & operator[](const unsigned int comp)
Definition: vectorization.h:446
SIMDComparison::not_equal
@ not_equal
VectorizedArrayIterator::operator!=
bool operator!=(const VectorizedArrayIterator< T > &other) const
Definition: vectorization.h:129
LAPACKSupport::T
static const char T
Definition: lapack_support.h:163
Point::operator*
Point< dim, typename ProductType< Number, typename EnableIfScalar< OtherNumber >::type >::type > operator*(const OtherNumber) const
std::sqrt
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
Definition: vectorization.h:5412
StandardExceptions::ExcMessage
static ::ExceptionBase & ExcMessage(std::string arg1)
double
VectorizedArray::VectorizedArray
VectorizedArray(const Number scalar)
Definition: vectorization.h:425
VectorizedArray::operator*=
VectorizedArray & operator*=(const VectorizedArray &vec)
Definition: vectorization.h:492
std::exp
::VectorizedArray< Number, width > exp(const ::VectorizedArray< Number, width > &)
Definition: vectorization.h:5368
VectorizedArray::get_max
VectorizedArray get_max(const VectorizedArray &other) const
Definition: vectorization.h:666
LinearAlgebra::CUDAWrappers::kernel::vec_add
__global__ void vec_add(Number *val, const Number a, const size_type N)
SIMDComparison
SIMDComparison
Definition: vectorization.h:4987
DEAL_II_NAMESPACE_OPEN
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:358
std::iterator_traits<::VectorizedArrayIterator< T > >::difference_type
std::ptrdiff_t difference_type
Definition: vectorization.h:5499
VectorizedArrayBase::size
static constexpr std::size_t size()
Definition: vectorization.h:261
SIMDComparison::greater_than
@ greater_than
VectorizedArrayIterator::operator+=
VectorizedArrayIterator< T > & operator+=(const std::size_t offset)
Definition: vectorization.h:185
VectorizedArray::scatter
void scatter(const unsigned int *offsets, Number *base_ptr) const
Definition: vectorization.h:621
DEAL_II_DEPRECATED
#define DEAL_II_DEPRECATED
Definition: config.h:98
std::abs
inline ::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &x)
Definition: vectorization.h:5450
VectorizedArrayIterator::operator*
const T::value_type & operator*() const
Definition: vectorization.h:147
VectorizedArray::load
void load(const Number *ptr)
Definition: vectorization.h:517
VectorizedArrayIterator::VectorizedArrayIterator
VectorizedArrayIterator(T &data, const std::size_t lane)
Definition: vectorization.h:108
SymmetricTensor::operator-
constexpr SymmetricTensor< rank_, dim, typename ProductType< Number, OtherNumber >::type > operator-(const SymmetricTensor< rank_, dim, Number > &left, const SymmetricTensor< rank_, dim, OtherNumber > &right)
Definition: symmetric_tensor.h:2550
exceptions.h
compare_and_apply_mask
Number compare_and_apply_mask(const Number &left, const Number &right, const Number &true_value, const Number &false_value)
Definition: vectorization.h:5072
SIMDComparison::equal
@ equal
std::abs
::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)
Definition: vectorization.h:5450
std::iterator_traits<::VectorizedArrayIterator< T > >::value_type
typename T::value_type value_type
Definition: vectorization.h:5498
value
static const bool value
Definition: dof_tools_constraints.cc:433
std::min
inline ::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &x, const ::VectorizedArray< Number, width > &y)
Definition: vectorization.h:5483
std::sqrt
inline ::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &x)
Definition: vectorization.h:5412
Assert
#define Assert(cond, exc)
Definition: exceptions.h:1419
VectorizedArrayIterator::operator--
VectorizedArrayIterator< T > & operator--()
Definition: vectorization.h:198
Algorithms::OutputOperator::operator<<
OutputOperator< VectorType > & operator<<(OutputOperator< VectorType > &out, unsigned int step)
Definition: operator.h:173
VectorizedArrayBase::begin
VectorizedArrayIterator< T > begin()
Definition: vectorization.h:270
ProductType::operator/
std::enable_if< std::is_floating_point< T >::value &&std::is_floating_point< U >::value, typename ProductType< std::complex< T >, std::complex< U > >::type >::type operator/(const std::complex< T > &left, const std::complex< U > &right)
Definition: complex_overloads.h:61
VectorizedArrayIterator::lane
std::size_t lane
Definition: vectorization.h:237
std::pow
inline ::VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &x, const Number p)
Definition: vectorization.h:5428
SIMDComparison::less_than_or_equal
@ less_than_or_equal
VectorizedArray::operator+=
VectorizedArray & operator+=(const VectorizedArray &vec)
Definition: vectorization.h:470
AlignedVector::operator==
bool operator==(const AlignedVector< T > &lhs, const AlignedVector< T > &rhs)
Definition: aligned_vector.h:1170
VectorizedArray::n_array_elements
static const unsigned int n_array_elements
Definition: vectorization.h:411
VectorizedArrayIterator::operator++
VectorizedArrayIterator< T > & operator++()
Definition: vectorization.h:173
template_constraints.h
VectorizedArray::streaming_store
void streaming_store(Number *ptr) const
Definition: vectorization.h:583
config.h
EnableIfScalar
Definition: template_constraints.h:534
VectorizedArray::VectorizedArray
VectorizedArray()=default
std::log
inline ::VectorizedArray< Number, width > log(const ::VectorizedArray< Number, width > &x)
Definition: vectorization.h:5390
VectorizedArrayBase
Definition: vectorization.h:254
DEAL_II_NAMESPACE_CLOSE
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:359
value_type
VectorizedArray< typename LinearAlgebra::distributed::Vector< double > ::value_type >::value_type
typename LinearAlgebra::distributed::Vector< double > ::value_type value_type
Definition: vectorization.h:402
gather
void gather(VectorizedArray< Number, width > &out, const std::array< Number *, width > &ptrs, const unsigned int offset)
Definition: vectorization.h:770
VectorizedArrayBase::begin
VectorizedArrayIterator< const T > begin() const
Definition: vectorization.h:280
VectorizedArray::operator/=
VectorizedArray & operator/=(const VectorizedArray &vec)
Definition: vectorization.h:503
std::cos
::VectorizedArray< Number, width > cos(const ::VectorizedArray< Number, width > &)
Definition: vectorization.h:5324
SIMDComparison::less_than
@ less_than
int
std::sin
inline ::VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &x)
Definition: vectorization.h:5297