Reference documentation for deal.II version Git 497f915867 2021-09-17 22:46:48 +0200
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
vectorization.h
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2011 - 2020 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #ifndef dealii_vectorization_h
18 #define dealii_vectorization_h
19 
20 #include <deal.II/base/config.h>
21 
24 
25 #include <array>
26 #include <cmath>
27 
28 // Note:
29 // The flag DEAL_II_VECTORIZATION_WIDTH_IN_BITS is essentially constructed
30 // according to the following scheme (on x86-based architectures)
31 // #ifdef __AVX512F__
32 // #define DEAL_II_VECTORIZATION_WIDTH_IN_BITS 512
33 // #elif defined (__AVX__)
34 // #define DEAL_II_VECTORIZATION_WIDTH_IN_BITS 256
35 // #elif defined (__SSE2__)
36 // #define DEAL_II_VECTORIZATION_WIDTH_IN_BITS 128
37 // #else
38 // #define DEAL_II_VECTORIZATION_WIDTH_IN_BITS 0
39 // #endif
40 // In addition to checking the flags __AVX512F__, __AVX__ and __SSE2__, a CMake
41 // test, 'check_01_cpu_features.cmake', ensures that these feature are not only
42 // present in the compilation unit but also working properly.
43 
44 #if DEAL_II_VECTORIZATION_WIDTH_IN_BITS > 0
45 
46 // These error messages try to detect the case that deal.II was compiled with
47 // a wider instruction set extension as the current compilation unit, for
48 // example because deal.II was compiled with AVX, but a user project does not
49 // add -march=native or similar flags, making it fall to SSE2. This leads to
50 // very strange errors as the size of data structures differs between the
51 // compiled deal.II code sitting in libdeal_II.so and the user code if not
52 // detected.
53 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && !defined(__AVX__)
54 # error \
55  "Mismatch in vectorization capabilities: AVX was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native."
56 # endif
57 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && !defined(__AVX512F__)
58 # error \
59  "Mismatch in vectorization capabilities: AVX-512F was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native."
60 # endif
61 
62 # ifdef _MSC_VER
63 # include <intrin.h>
64 # elif defined(__ALTIVEC__)
65 # include <altivec.h>
66 
67 // altivec.h defines vector, pixel, bool, but we do not use them, so undefine
68 // them before they make trouble
69 # undef vector
70 # undef pixel
71 # undef bool
72 # else
73 # include <x86intrin.h>
74 # endif
75 
76 #endif
77 
78 
80 
81 
82 // Enable the EnableIfScalar type trait for VectorizedArray<Number> such
83 // that it can be used as a Number type in Tensor<rank,dim,Number>, etc.
84 
85 template <typename Number, std::size_t width>
86 struct EnableIfScalar<VectorizedArray<Number, width>>
87 {
89 };
90 
91 
92 
96 template <typename T>
98 {
99 public:
106  VectorizedArrayIterator(T &data, const std::size_t lane)
107  : data(&data)
108  , lane(lane)
109  {}
110 
114  bool
116  {
117  Assert(this->data == other.data,
118  ExcMessage(
119  "You are trying to compare iterators into different arrays."));
120  return this->lane == other.lane;
121  }
122 
126  bool
128  {
129  Assert(this->data == other.data,
130  ExcMessage(
131  "You are trying to compare iterators into different arrays."));
132  return this->lane != other.lane;
133  }
134 
139  operator=(const VectorizedArrayIterator<T> &other) = default;
140 
145  const typename T::value_type &
146  operator*() const
147  {
148  AssertIndexRange(lane, T::size());
149  return (*data)[lane];
150  }
151 
152 
157  template <typename U = T>
158  typename std::enable_if<!std::is_same<U, const U>::value,
159  typename T::value_type>::type &
161  {
162  AssertIndexRange(lane, T::size());
163  return (*data)[lane];
164  }
165 
173  {
174  AssertIndexRange(lane + 1, T::size() + 1);
175  lane++;
176  return *this;
177  }
178 
184  operator+=(const std::size_t offset)
185  {
186  AssertIndexRange(lane + offset, T::size() + 1);
187  lane += offset;
188  return *this;
189  }
190 
198  {
199  Assert(
200  lane > 0,
201  ExcMessage(
202  "You can't decrement an iterator that is already at the beginning of the range."));
203  --lane;
204  return *this;
205  }
206 
211  operator+(const std::size_t &offset) const
212  {
213  AssertIndexRange(lane + offset, T::size() + 1);
214  return VectorizedArrayIterator<T>(*data, lane + offset);
215  }
216 
220  std::ptrdiff_t
222  {
223  return static_cast<std::ptrdiff_t>(lane) -
224  static_cast<ptrdiff_t>(other.lane);
225  }
226 
227 private:
231  T *data;
232 
236  std::size_t lane;
237 };
238 
239 
240 
250 template <typename T, std::size_t width>
252 {
253 public:
257  static constexpr std::size_t
259  {
260  return width;
261  }
262 
268  {
269  return VectorizedArrayIterator<T>(static_cast<T &>(*this), 0);
270  }
271 
277  begin() const
278  {
279  return VectorizedArrayIterator<const T>(static_cast<const T &>(*this), 0);
280  }
281 
286  end()
287  {
288  return VectorizedArrayIterator<T>(static_cast<T &>(*this), width);
289  }
290 
296  end() const
297  {
298  return VectorizedArrayIterator<const T>(static_cast<const T &>(*this),
299  width);
300  }
301 };
302 
303 
304 
389 template <typename Number, std::size_t width>
391  : public VectorizedArrayBase<VectorizedArray<Number, width>, 1>
392 {
393 public:
397  using value_type = Number;
398 
399  static_assert(width == 1,
400  "You specified an illegal width that is not supported.");
401 
406  VectorizedArray() = default;
407 
411  VectorizedArray(const Number scalar)
412  {
413  this->operator=(scalar);
414  }
415 
421  operator=(const Number scalar)
422  {
423  data = scalar;
424  return *this;
425  }
426 
432  Number &
433  operator[](const unsigned int comp)
434  {
435  (void)comp;
436  AssertIndexRange(comp, 1);
437  return data;
438  }
439 
445  const Number &
446  operator[](const unsigned int comp) const
447  {
448  (void)comp;
449  AssertIndexRange(comp, 1);
450  return data;
451  }
452 
459  {
460  data += vec.data;
461  return *this;
462  }
463 
470  {
471  data -= vec.data;
472  return *this;
473  }
474 
481  {
482  data *= vec.data;
483  return *this;
484  }
485 
492  {
493  data /= vec.data;
494  return *this;
495  }
496 
504  void
505  load(const Number *ptr)
506  {
507  data = *ptr;
508  }
509 
517  void
518  store(Number *ptr) const
519  {
520  *ptr = data;
521  }
522 
570  void
571  streaming_store(Number *ptr) const
572  {
573  *ptr = data;
574  }
575 
589  void
590  gather(const Number *base_ptr, const unsigned int *offsets)
591  {
592  data = base_ptr[offsets[0]];
593  }
594 
608  void
609  scatter(const unsigned int *offsets, Number *base_ptr) const
610  {
611  base_ptr[offsets[0]] = data;
612  }
613 
619  Number data;
620 
621 private:
628  get_sqrt() const
629  {
630  VectorizedArray res;
631  res.data = std::sqrt(data);
632  return res;
633  }
634 
641  get_abs() const
642  {
643  VectorizedArray res;
644  res.data = std::fabs(data);
645  return res;
646  }
647 
654  get_max(const VectorizedArray &other) const
655  {
656  VectorizedArray res;
657  res.data = std::max(data, other.data);
658  return res;
659  }
660 
667  get_min(const VectorizedArray &other) const
668  {
669  VectorizedArray res;
670  res.data = std::min(data, other.data);
671  return res;
672  }
673 
674  // Make a few functions friends.
675  template <typename Number2, std::size_t width2>
677  std::sqrt(const VectorizedArray<Number2, width2> &);
678  template <typename Number2, std::size_t width2>
680  std::abs(const VectorizedArray<Number2, width2> &);
681  template <typename Number2, std::size_t width2>
683  std::max(const VectorizedArray<Number2, width2> &,
685  template <typename Number2, std::size_t width2>
687  std::min(const VectorizedArray<Number2, width2> &,
689 };
690 
691 
692 
697 
698 
705 template <typename Number,
706  std::size_t width =
709  make_vectorized_array(const Number &u)
710 {
712  return result;
713 }
714 
715 
716 
723 template <typename VectorizedArrayType>
724 inline DEAL_II_ALWAYS_INLINE VectorizedArrayType
725 make_vectorized_array(const typename VectorizedArrayType::value_type &u)
726 {
727  static_assert(
728  std::is_same<VectorizedArrayType,
729  VectorizedArray<typename VectorizedArrayType::value_type,
730  VectorizedArrayType::size()>>::value,
731  "VectorizedArrayType is not a VectorizedArray.");
732 
733  VectorizedArrayType result = u;
734  return result;
735 }
736 
737 
738 
750 template <typename Number, std::size_t width>
751 inline DEAL_II_ALWAYS_INLINE void
753  const std::array<Number *, width> &ptrs,
754  const unsigned int offset)
755 {
756  for (unsigned int v = 0; v < width; ++v)
757  out.data[v] = ptrs[v][offset];
758 }
759 
760 
761 
787 template <typename Number, std::size_t width>
788 inline DEAL_II_ALWAYS_INLINE void
789 vectorized_load_and_transpose(const unsigned int n_entries,
790  const Number * in,
791  const unsigned int * offsets,
793 {
794  for (unsigned int i = 0; i < n_entries; ++i)
795  for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
796  out[i][v] = in[offsets[v] + i];
797 }
798 
799 
811 template <typename Number, std::size_t width>
812 inline DEAL_II_ALWAYS_INLINE void
813 vectorized_load_and_transpose(const unsigned int n_entries,
814  const std::array<Number *, width> &in,
816 {
817  for (unsigned int i = 0; i < n_entries; ++i)
818  for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
819  out[i][v] = in[v][i];
820 }
821 
822 
823 
862 template <typename Number, std::size_t width>
863 inline DEAL_II_ALWAYS_INLINE void
864 vectorized_transpose_and_store(const bool add_into,
865  const unsigned int n_entries,
867  const unsigned int * offsets,
868  Number * out)
869 {
870  if (add_into)
871  for (unsigned int i = 0; i < n_entries; ++i)
872  for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
873  out[offsets[v] + i] += in[i][v];
874  else
875  for (unsigned int i = 0; i < n_entries; ++i)
876  for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
877  out[offsets[v] + i] = in[i][v];
878 }
879 
880 
892 template <typename Number, std::size_t width>
893 inline DEAL_II_ALWAYS_INLINE void
894 vectorized_transpose_and_store(const bool add_into,
895  const unsigned int n_entries,
897  std::array<Number *, width> & out)
898 {
899  if (add_into)
900  for (unsigned int i = 0; i < n_entries; ++i)
901  for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
902  out[v][i] += in[i][v];
903  else
904  for (unsigned int i = 0; i < n_entries; ++i)
905  for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
906  out[v][i] = in[i][v];
907 }
908 
909 
911 
912 #ifndef DOXYGEN
913 
914 // for safety, also check that __AVX512F__ is defined in case the user manually
915 // set some conflicting compile flags which prevent compilation
916 
917 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && defined(__AVX512F__)
918 
922 template <>
923 class VectorizedArray<double, 8>
924  : public VectorizedArrayBase<VectorizedArray<double, 8>, 8>
925 {
926 public:
930  using value_type = double;
931 
936  VectorizedArray() = default;
937 
941  VectorizedArray(const double scalar)
942  {
943  this->operator=(scalar);
944  }
945 
951  operator=(const double x)
952  {
953  data = _mm512_set1_pd(x);
954  return *this;
955  }
956 
961  double &
962  operator[](const unsigned int comp)
963  {
964  AssertIndexRange(comp, 8);
965  return *(reinterpret_cast<double *>(&data) + comp);
966  }
967 
972  const double &
973  operator[](const unsigned int comp) const
974  {
975  AssertIndexRange(comp, 8);
976  return *(reinterpret_cast<const double *>(&data) + comp);
977  }
978 
984  operator+=(const VectorizedArray &vec)
985  {
986  // if the compiler supports vector arithmetic, we can simply use +=
987  // operator on the given data type. this allows the compiler to combine
988  // additions with multiplication (fused multiply-add) if those
989  // instructions are available. Otherwise, we need to use the built-in
990  // intrinsic command for __m512d
991 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
992  data += vec.data;
993 # else
994  data = _mm512_add_pd(data, vec.data);
995 # endif
996  return *this;
997  }
998 
1003  VectorizedArray &
1004  operator-=(const VectorizedArray &vec)
1005  {
1006 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1007  data -= vec.data;
1008 # else
1009  data = _mm512_sub_pd(data, vec.data);
1010 # endif
1011  return *this;
1012  }
1017  VectorizedArray &
1018  operator*=(const VectorizedArray &vec)
1019  {
1020 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1021  data *= vec.data;
1022 # else
1023  data = _mm512_mul_pd(data, vec.data);
1024 # endif
1025  return *this;
1026  }
1027 
1032  VectorizedArray &
1033  operator/=(const VectorizedArray &vec)
1034  {
1035 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1036  data /= vec.data;
1037 # else
1038  data = _mm512_div_pd(data, vec.data);
1039 # endif
1040  return *this;
1041  }
1042 
1049  void
1050  load(const double *ptr)
1051  {
1052  data = _mm512_loadu_pd(ptr);
1053  }
1054 
1062  void
1063  store(double *ptr) const
1064  {
1065  _mm512_storeu_pd(ptr, data);
1066  }
1067 
1072  void
1073  streaming_store(double *ptr) const
1074  {
1075  Assert(reinterpret_cast<std::size_t>(ptr) % 64 == 0,
1076  ExcMessage("Memory not aligned"));
1077  _mm512_stream_pd(ptr, data);
1078  }
1079 
1093  void
1094  gather(const double *base_ptr, const unsigned int *offsets)
1095  {
1096  // unfortunately, there does not appear to be a 256 bit integer load, so
1097  // do it by some reinterpret casts here. this is allowed because the Intel
1098  // API allows aliasing between different vector types.
1099  const __m256 index_val =
1100  _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
1101  const __m256i index = *reinterpret_cast<const __m256i *>(&index_val);
1102  data = _mm512_i32gather_pd(index, base_ptr, 8);
1103  }
1104 
1118  void
1119  scatter(const unsigned int *offsets, double *base_ptr) const
1120  {
1121  for (unsigned int i = 0; i < 8; ++i)
1122  for (unsigned int j = i + 1; j < 8; ++j)
1123  Assert(offsets[i] != offsets[j],
1124  ExcMessage("Result of scatter undefined if two offset elements"
1125  " point to the same position"));
1126 
1127  // unfortunately, there does not appear to be a 256 bit integer load, so
1128  // do it by some reinterpret casts here. this is allowed because the Intel
1129  // API allows aliasing between different vector types.
1130  const __m256 index_val =
1131  _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
1132  const __m256i index = *reinterpret_cast<const __m256i *>(&index_val);
1133  _mm512_i32scatter_pd(base_ptr, index, data, 8);
1134  }
1135 
1141  __m512d data;
1142 
1143 private:
1150  get_sqrt() const
1151  {
1152  VectorizedArray res;
1153  res.data = _mm512_sqrt_pd(data);
1154  return res;
1155  }
1156 
1163  get_abs() const
1164  {
1165  // to compute the absolute value, perform bitwise andnot with -0. This
1166  // will leave all value and exponent bits unchanged but force the sign
1167  // value to +. Since there is no andnot for AVX512, we interpret the data
1168  // as 64 bit integers and do the andnot on those types (note that andnot
1169  // is a bitwise operation so the data type does not matter)
1170  __m512d mask = _mm512_set1_pd(-0.);
1171  VectorizedArray res;
1172  res.data = reinterpret_cast<__m512d>(
1173  _mm512_andnot_epi64(reinterpret_cast<__m512i>(mask),
1174  reinterpret_cast<__m512i>(data)));
1175  return res;
1176  }
1177 
1184  get_max(const VectorizedArray &other) const
1185  {
1186  VectorizedArray res;
1187  res.data = _mm512_max_pd(data, other.data);
1188  return res;
1189  }
1190 
1197  get_min(const VectorizedArray &other) const
1198  {
1199  VectorizedArray res;
1200  res.data = _mm512_min_pd(data, other.data);
1201  return res;
1202  }
1203 
1204  // Make a few functions friends.
1205  template <typename Number2, std::size_t width2>
1207  std::sqrt(const VectorizedArray<Number2, width2> &);
1208  template <typename Number2, std::size_t width2>
1210  std::abs(const VectorizedArray<Number2, width2> &);
1211  template <typename Number2, std::size_t width2>
1213  std::max(const VectorizedArray<Number2, width2> &,
1215  template <typename Number2, std::size_t width2>
1217  std::min(const VectorizedArray<Number2, width2> &,
1219 };
1220 
1221 
1222 
1226 template <>
1227 inline DEAL_II_ALWAYS_INLINE void
1228 vectorized_load_and_transpose(const unsigned int n_entries,
1229  const double * in,
1230  const unsigned int * offsets,
1232 {
1233  // do not do full transpose because the code is long and will most
1234  // likely not pay off because many processors have two load units
1235  // (for the top 8 instructions) but only 1 permute unit (for the 8
1236  // shuffle/unpack instructions). rather start the transposition on the
1237  // vectorized array of half the size with 256 bits
1238  const unsigned int n_chunks = n_entries / 4;
1239  for (unsigned int i = 0; i < n_chunks; ++i)
1240  {
1241  __m512d t0, t1, t2, t3 = {};
1242 
1243  t0 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[0] + 4 * i), 0);
1244  t0 = _mm512_insertf64x4(t0, _mm256_loadu_pd(in + offsets[2] + 4 * i), 1);
1245  t1 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[1] + 4 * i), 0);
1246  t1 = _mm512_insertf64x4(t1, _mm256_loadu_pd(in + offsets[3] + 4 * i), 1);
1247  t2 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[4] + 4 * i), 0);
1248  t2 = _mm512_insertf64x4(t2, _mm256_loadu_pd(in + offsets[6] + 4 * i), 1);
1249  t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[5] + 4 * i), 0);
1250  t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[7] + 4 * i), 1);
1251 
1252  __m512d v0 = _mm512_shuffle_f64x2(t0, t2, 0x88);
1253  __m512d v1 = _mm512_shuffle_f64x2(t0, t2, 0xdd);
1254  __m512d v2 = _mm512_shuffle_f64x2(t1, t3, 0x88);
1255  __m512d v3 = _mm512_shuffle_f64x2(t1, t3, 0xdd);
1256  out[4 * i + 0].data = _mm512_unpacklo_pd(v0, v2);
1257  out[4 * i + 1].data = _mm512_unpackhi_pd(v0, v2);
1258  out[4 * i + 2].data = _mm512_unpacklo_pd(v1, v3);
1259  out[4 * i + 3].data = _mm512_unpackhi_pd(v1, v3);
1260  }
1261  // remainder loop of work that does not divide by 4
1262  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1263  out[i].gather(in + i, offsets);
1264 }
1265 
1266 
1267 
1271 template <>
1272 inline DEAL_II_ALWAYS_INLINE void
1273 vectorized_load_and_transpose(const unsigned int n_entries,
1274  const std::array<double *, 8> &in,
1276 {
1277  const unsigned int n_chunks = n_entries / 4;
1278  for (unsigned int i = 0; i < n_chunks; ++i)
1279  {
1280  __m512d t0, t1, t2, t3 = {};
1281 
1282  t0 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[0] + 4 * i), 0);
1283  t0 = _mm512_insertf64x4(t0, _mm256_loadu_pd(in[2] + 4 * i), 1);
1284  t1 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[1] + 4 * i), 0);
1285  t1 = _mm512_insertf64x4(t1, _mm256_loadu_pd(in[3] + 4 * i), 1);
1286  t2 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[4] + 4 * i), 0);
1287  t2 = _mm512_insertf64x4(t2, _mm256_loadu_pd(in[6] + 4 * i), 1);
1288  t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[5] + 4 * i), 0);
1289  t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[7] + 4 * i), 1);
1290 
1291  __m512d v0 = _mm512_shuffle_f64x2(t0, t2, 0x88);
1292  __m512d v1 = _mm512_shuffle_f64x2(t0, t2, 0xdd);
1293  __m512d v2 = _mm512_shuffle_f64x2(t1, t3, 0x88);
1294  __m512d v3 = _mm512_shuffle_f64x2(t1, t3, 0xdd);
1295  out[4 * i + 0].data = _mm512_unpacklo_pd(v0, v2);
1296  out[4 * i + 1].data = _mm512_unpackhi_pd(v0, v2);
1297  out[4 * i + 2].data = _mm512_unpacklo_pd(v1, v3);
1298  out[4 * i + 3].data = _mm512_unpackhi_pd(v1, v3);
1299  }
1300 
1301  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1302  gather(out[i], in, i);
1303 }
1304 
1305 
1306 
1310 template <>
1311 inline DEAL_II_ALWAYS_INLINE void
1312 vectorized_transpose_and_store(const bool add_into,
1313  const unsigned int n_entries,
1314  const VectorizedArray<double, 8> *in,
1315  const unsigned int * offsets,
1316  double * out)
1317 {
1318  // as for the load, we split the store operations into 256 bit units to
1319  // better balance between code size, shuffle instructions, and stores
1320  const unsigned int n_chunks = n_entries / 4;
1321  __m512i mask1 = _mm512_set_epi64(0xd, 0xc, 0x5, 0x4, 0x9, 0x8, 0x1, 0x0);
1322  __m512i mask2 = _mm512_set_epi64(0xf, 0xe, 0x7, 0x6, 0xb, 0xa, 0x3, 0x2);
1323  for (unsigned int i = 0; i < n_chunks; ++i)
1324  {
1325  __m512d t0 = _mm512_unpacklo_pd(in[i * 4].data, in[i * 4 + 1].data);
1326  __m512d t1 = _mm512_unpackhi_pd(in[i * 4].data, in[i * 4 + 1].data);
1327  __m512d t2 = _mm512_unpacklo_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
1328  __m512d t3 = _mm512_unpackhi_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
1329  __m512d v0 = _mm512_permutex2var_pd(t0, mask1, t2);
1330  __m512d v1 = _mm512_permutex2var_pd(t0, mask2, t2);
1331  __m512d v2 = _mm512_permutex2var_pd(t1, mask1, t3);
1332  __m512d v3 = _mm512_permutex2var_pd(t1, mask2, t3);
1333  __m256d res0 = _mm512_extractf64x4_pd(v0, 0);
1334  __m256d res4 = _mm512_extractf64x4_pd(v0, 1);
1335  __m256d res1 = _mm512_extractf64x4_pd(v2, 0);
1336  __m256d res5 = _mm512_extractf64x4_pd(v2, 1);
1337  __m256d res2 = _mm512_extractf64x4_pd(v1, 0);
1338  __m256d res6 = _mm512_extractf64x4_pd(v1, 1);
1339  __m256d res3 = _mm512_extractf64x4_pd(v3, 0);
1340  __m256d res7 = _mm512_extractf64x4_pd(v3, 1);
1341 
1342  // Cannot use the same store instructions in both paths of the 'if'
1343  // because the compiler cannot know that there is no aliasing
1344  // between pointers
1345  if (add_into)
1346  {
1347  res0 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[0]), res0);
1348  _mm256_storeu_pd(out + 4 * i + offsets[0], res0);
1349  res1 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[1]), res1);
1350  _mm256_storeu_pd(out + 4 * i + offsets[1], res1);
1351  res2 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[2]), res2);
1352  _mm256_storeu_pd(out + 4 * i + offsets[2], res2);
1353  res3 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[3]), res3);
1354  _mm256_storeu_pd(out + 4 * i + offsets[3], res3);
1355  res4 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[4]), res4);
1356  _mm256_storeu_pd(out + 4 * i + offsets[4], res4);
1357  res5 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[5]), res5);
1358  _mm256_storeu_pd(out + 4 * i + offsets[5], res5);
1359  res6 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[6]), res6);
1360  _mm256_storeu_pd(out + 4 * i + offsets[6], res6);
1361  res7 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[7]), res7);
1362  _mm256_storeu_pd(out + 4 * i + offsets[7], res7);
1363  }
1364  else
1365  {
1366  _mm256_storeu_pd(out + 4 * i + offsets[0], res0);
1367  _mm256_storeu_pd(out + 4 * i + offsets[1], res1);
1368  _mm256_storeu_pd(out + 4 * i + offsets[2], res2);
1369  _mm256_storeu_pd(out + 4 * i + offsets[3], res3);
1370  _mm256_storeu_pd(out + 4 * i + offsets[4], res4);
1371  _mm256_storeu_pd(out + 4 * i + offsets[5], res5);
1372  _mm256_storeu_pd(out + 4 * i + offsets[6], res6);
1373  _mm256_storeu_pd(out + 4 * i + offsets[7], res7);
1374  }
1375  }
1376 
1377  // remainder loop of work that does not divide by 4
1378  if (add_into)
1379  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1380  for (unsigned int v = 0; v < 8; ++v)
1381  out[offsets[v] + i] += in[i][v];
1382  else
1383  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1384  for (unsigned int v = 0; v < 8; ++v)
1385  out[offsets[v] + i] = in[i][v];
1386 }
1387 
1388 
1389 
1393 template <>
1394 inline DEAL_II_ALWAYS_INLINE void
1395 vectorized_transpose_and_store(const bool add_into,
1396  const unsigned int n_entries,
1397  const VectorizedArray<double, 8> *in,
1398  std::array<double *, 8> & out)
1399 {
1400  // see the comments in the vectorized_transpose_and_store above
1401 
1402  const unsigned int n_chunks = n_entries / 4;
1403  __m512i mask1 = _mm512_set_epi64(0xd, 0xc, 0x5, 0x4, 0x9, 0x8, 0x1, 0x0);
1404  __m512i mask2 = _mm512_set_epi64(0xf, 0xe, 0x7, 0x6, 0xb, 0xa, 0x3, 0x2);
1405  for (unsigned int i = 0; i < n_chunks; ++i)
1406  {
1407  __m512d t0 = _mm512_unpacklo_pd(in[i * 4].data, in[i * 4 + 1].data);
1408  __m512d t1 = _mm512_unpackhi_pd(in[i * 4].data, in[i * 4 + 1].data);
1409  __m512d t2 = _mm512_unpacklo_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
1410  __m512d t3 = _mm512_unpackhi_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
1411  __m512d v0 = _mm512_permutex2var_pd(t0, mask1, t2);
1412  __m512d v1 = _mm512_permutex2var_pd(t0, mask2, t2);
1413  __m512d v2 = _mm512_permutex2var_pd(t1, mask1, t3);
1414  __m512d v3 = _mm512_permutex2var_pd(t1, mask2, t3);
1415  __m256d res0 = _mm512_extractf64x4_pd(v0, 0);
1416  __m256d res4 = _mm512_extractf64x4_pd(v0, 1);
1417  __m256d res1 = _mm512_extractf64x4_pd(v2, 0);
1418  __m256d res5 = _mm512_extractf64x4_pd(v2, 1);
1419  __m256d res2 = _mm512_extractf64x4_pd(v1, 0);
1420  __m256d res6 = _mm512_extractf64x4_pd(v1, 1);
1421  __m256d res3 = _mm512_extractf64x4_pd(v3, 0);
1422  __m256d res7 = _mm512_extractf64x4_pd(v3, 1);
1423 
1424  if (add_into)
1425  {
1426  res0 = _mm256_add_pd(_mm256_loadu_pd(out[0] + 4 * i), res0);
1427  _mm256_storeu_pd(out[0] + 4 * i, res0);
1428  res1 = _mm256_add_pd(_mm256_loadu_pd(out[1] + 4 * i), res1);
1429  _mm256_storeu_pd(out[1] + 4 * i, res1);
1430  res2 = _mm256_add_pd(_mm256_loadu_pd(out[2] + 4 * i), res2);
1431  _mm256_storeu_pd(out[2] + 4 * i, res2);
1432  res3 = _mm256_add_pd(_mm256_loadu_pd(out[3] + 4 * i), res3);
1433  _mm256_storeu_pd(out[3] + 4 * i, res3);
1434  res4 = _mm256_add_pd(_mm256_loadu_pd(out[4] + 4 * i), res4);
1435  _mm256_storeu_pd(out[4] + 4 * i, res4);
1436  res5 = _mm256_add_pd(_mm256_loadu_pd(out[5] + 4 * i), res5);
1437  _mm256_storeu_pd(out[5] + 4 * i, res5);
1438  res6 = _mm256_add_pd(_mm256_loadu_pd(out[6] + 4 * i), res6);
1439  _mm256_storeu_pd(out[6] + 4 * i, res6);
1440  res7 = _mm256_add_pd(_mm256_loadu_pd(out[7] + 4 * i), res7);
1441  _mm256_storeu_pd(out[7] + 4 * i, res7);
1442  }
1443  else
1444  {
1445  _mm256_storeu_pd(out[0] + 4 * i, res0);
1446  _mm256_storeu_pd(out[1] + 4 * i, res1);
1447  _mm256_storeu_pd(out[2] + 4 * i, res2);
1448  _mm256_storeu_pd(out[3] + 4 * i, res3);
1449  _mm256_storeu_pd(out[4] + 4 * i, res4);
1450  _mm256_storeu_pd(out[5] + 4 * i, res5);
1451  _mm256_storeu_pd(out[6] + 4 * i, res6);
1452  _mm256_storeu_pd(out[7] + 4 * i, res7);
1453  }
1454  }
1455 
1456  if (add_into)
1457  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1458  for (unsigned int v = 0; v < 8; ++v)
1459  out[v][i] += in[i][v];
1460  else
1461  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1462  for (unsigned int v = 0; v < 8; ++v)
1463  out[v][i] = in[i][v];
1464 }
1465 
1466 
1467 
1471 template <>
1472 class VectorizedArray<float, 16>
1473  : public VectorizedArrayBase<VectorizedArray<float, 16>, 16>
1474 {
1475 public:
1479  using value_type = float;
1480 
1485  VectorizedArray() = default;
1486 
1490  VectorizedArray(const float scalar)
1491  {
1492  this->operator=(scalar);
1493  }
1494 
1499  VectorizedArray &
1500  operator=(const float x)
1501  {
1502  data = _mm512_set1_ps(x);
1503  return *this;
1504  }
1505 
1510  float &
1511  operator[](const unsigned int comp)
1512  {
1513  AssertIndexRange(comp, 16);
1514  return *(reinterpret_cast<float *>(&data) + comp);
1515  }
1516 
1521  const float &
1522  operator[](const unsigned int comp) const
1523  {
1524  AssertIndexRange(comp, 16);
1525  return *(reinterpret_cast<const float *>(&data) + comp);
1526  }
1527 
1532  VectorizedArray &
1533  operator+=(const VectorizedArray &vec)
1534  {
1535  // if the compiler supports vector arithmetic, we can simply use +=
1536  // operator on the given data type. this allows the compiler to combine
1537  // additions with multiplication (fused multiply-add) if those
1538  // instructions are available. Otherwise, we need to use the built-in
1539  // intrinsic command for __m512d
1540 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1541  data += vec.data;
1542 # else
1543  data = _mm512_add_ps(data, vec.data);
1544 # endif
1545  return *this;
1546  }
1547 
1552  VectorizedArray &
1553  operator-=(const VectorizedArray &vec)
1554  {
1555 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1556  data -= vec.data;
1557 # else
1558  data = _mm512_sub_ps(data, vec.data);
1559 # endif
1560  return *this;
1561  }
1566  VectorizedArray &
1567  operator*=(const VectorizedArray &vec)
1568  {
1569 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1570  data *= vec.data;
1571 # else
1572  data = _mm512_mul_ps(data, vec.data);
1573 # endif
1574  return *this;
1575  }
1576 
1581  VectorizedArray &
1582  operator/=(const VectorizedArray &vec)
1583  {
1584 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1585  data /= vec.data;
1586 # else
1587  data = _mm512_div_ps(data, vec.data);
1588 # endif
1589  return *this;
1590  }
1591 
1598  void
1599  load(const float *ptr)
1600  {
1601  data = _mm512_loadu_ps(ptr);
1602  }
1603 
1611  void
1612  store(float *ptr) const
1613  {
1614  _mm512_storeu_ps(ptr, data);
1615  }
1616 
1621  void
1622  streaming_store(float *ptr) const
1623  {
1624  Assert(reinterpret_cast<std::size_t>(ptr) % 64 == 0,
1625  ExcMessage("Memory not aligned"));
1626  _mm512_stream_ps(ptr, data);
1627  }
1628 
1642  void
1643  gather(const float *base_ptr, const unsigned int *offsets)
1644  {
1645  // unfortunately, there does not appear to be a 512 bit integer load, so
1646  // do it by some reinterpret casts here. this is allowed because the Intel
1647  // API allows aliasing between different vector types.
1648  const __m512 index_val =
1649  _mm512_loadu_ps(reinterpret_cast<const float *>(offsets));
1650  const __m512i index = *reinterpret_cast<const __m512i *>(&index_val);
1651  data = _mm512_i32gather_ps(index, base_ptr, 4);
1652  }
1653 
1667  void
1668  scatter(const unsigned int *offsets, float *base_ptr) const
1669  {
1670  for (unsigned int i = 0; i < 16; ++i)
1671  for (unsigned int j = i + 1; j < 16; ++j)
1672  Assert(offsets[i] != offsets[j],
1673  ExcMessage("Result of scatter undefined if two offset elements"
1674  " point to the same position"));
1675 
1676  // unfortunately, there does not appear to be a 512 bit integer load, so
1677  // do it by some reinterpret casts here. this is allowed because the Intel
1678  // API allows aliasing between different vector types.
1679  const __m512 index_val =
1680  _mm512_loadu_ps(reinterpret_cast<const float *>(offsets));
1681  const __m512i index = *reinterpret_cast<const __m512i *>(&index_val);
1682  _mm512_i32scatter_ps(base_ptr, index, data, 4);
1683  }
1684 
1690  __m512 data;
1691 
1692 private:
1699  get_sqrt() const
1700  {
1701  VectorizedArray res;
1702  res.data = _mm512_sqrt_ps(data);
1703  return res;
1704  }
1705 
1712  get_abs() const
1713  {
1714  // to compute the absolute value, perform bitwise andnot with -0. This
1715  // will leave all value and exponent bits unchanged but force the sign
1716  // value to +. Since there is no andnot for AVX512, we interpret the data
1717  // as 32 bit integers and do the andnot on those types (note that andnot
1718  // is a bitwise operation so the data type does not matter)
1719  __m512 mask = _mm512_set1_ps(-0.f);
1720  VectorizedArray res;
1721  res.data = reinterpret_cast<__m512>(
1722  _mm512_andnot_epi32(reinterpret_cast<__m512i>(mask),
1723  reinterpret_cast<__m512i>(data)));
1724  return res;
1725  }
1726 
1733  get_max(const VectorizedArray &other) const
1734  {
1735  VectorizedArray res;
1736  res.data = _mm512_max_ps(data, other.data);
1737  return res;
1738  }
1739 
1746  get_min(const VectorizedArray &other) const
1747  {
1748  VectorizedArray res;
1749  res.data = _mm512_min_ps(data, other.data);
1750  return res;
1751  }
1752 
1753  // Make a few functions friends.
1754  template <typename Number2, std::size_t width2>
1756  std::sqrt(const VectorizedArray<Number2, width2> &);
1757  template <typename Number2, std::size_t width2>
1759  std::abs(const VectorizedArray<Number2, width2> &);
1760  template <typename Number2, std::size_t width2>
1762  std::max(const VectorizedArray<Number2, width2> &,
1764  template <typename Number2, std::size_t width2>
1766  std::min(const VectorizedArray<Number2, width2> &,
1768 };
1769 
1770 
1771 
1775 template <>
1776 inline DEAL_II_ALWAYS_INLINE void
1777 vectorized_load_and_transpose(const unsigned int n_entries,
1778  const float * in,
1779  const unsigned int * offsets,
1781 {
1782  // Similar to the double case, we perform the work on smaller entities. In
1783  // this case, we start from 128 bit arrays and insert them into a full 512
1784  // bit index. This reduces the code size and register pressure because we do
1785  // shuffles on 4 numbers rather than 16.
1786  const unsigned int n_chunks = n_entries / 4;
1787 
1788  // To avoid warnings about uninitialized variables, need to initialize one
1789  // variable to a pre-exisiting value in out, which will never get used in
1790  // the end. Keep the initialization outside the loop because of a bug in
1791  // gcc-9.1 which generates a "vmovapd" instruction instead of "vmovupd" in
1792  // case t3 is initialized to zero (inside/outside of loop), see
1793  // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90991
1794  __m512 t0, t1, t2, t3;
1795  if (n_chunks > 0)
1796  t3 = out[0].data;
1797  for (unsigned int i = 0; i < n_chunks; ++i)
1798  {
1799  t0 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[0] + 4 * i), 0);
1800  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[4] + 4 * i), 1);
1801  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[8] + 4 * i), 2);
1802  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[12] + 4 * i), 3);
1803  t1 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[1] + 4 * i), 0);
1804  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[5] + 4 * i), 1);
1805  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[9] + 4 * i), 2);
1806  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[13] + 4 * i), 3);
1807  t2 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[2] + 4 * i), 0);
1808  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[6] + 4 * i), 1);
1809  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[10] + 4 * i), 2);
1810  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[14] + 4 * i), 3);
1811  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[3] + 4 * i), 0);
1812  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[7] + 4 * i), 1);
1813  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[11] + 4 * i), 2);
1814  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[15] + 4 * i), 3);
1815 
1816  __m512 v0 = _mm512_shuffle_ps(t0, t1, 0x44);
1817  __m512 v1 = _mm512_shuffle_ps(t0, t1, 0xee);
1818  __m512 v2 = _mm512_shuffle_ps(t2, t3, 0x44);
1819  __m512 v3 = _mm512_shuffle_ps(t2, t3, 0xee);
1820 
1821  out[4 * i + 0].data = _mm512_shuffle_ps(v0, v2, 0x88);
1822  out[4 * i + 1].data = _mm512_shuffle_ps(v0, v2, 0xdd);
1823  out[4 * i + 2].data = _mm512_shuffle_ps(v1, v3, 0x88);
1824  out[4 * i + 3].data = _mm512_shuffle_ps(v1, v3, 0xdd);
1825  }
1826 
1827  // remainder loop of work that does not divide by 4
1828  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1829  out[i].gather(in + i, offsets);
1830 }
1831 
1832 
1833 
1837 template <>
1838 inline DEAL_II_ALWAYS_INLINE void
1839 vectorized_load_and_transpose(const unsigned int n_entries,
1840  const std::array<float *, 16> &in,
1842 {
1843  // see the comments in the vectorized_load_and_transpose above
1844 
1845  const unsigned int n_chunks = n_entries / 4;
1846 
1847  __m512 t0, t1, t2, t3;
1848  if (n_chunks > 0)
1849  t3 = out[0].data;
1850  for (unsigned int i = 0; i < n_chunks; ++i)
1851  {
1852  t0 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[0] + 4 * i), 0);
1853  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[4] + 4 * i), 1);
1854  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[8] + 4 * i), 2);
1855  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[12] + 4 * i), 3);
1856  t1 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[1] + 4 * i), 0);
1857  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[5] + 4 * i), 1);
1858  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[9] + 4 * i), 2);
1859  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[13] + 4 * i), 3);
1860  t2 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[2] + 4 * i), 0);
1861  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[6] + 4 * i), 1);
1862  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[10] + 4 * i), 2);
1863  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[14] + 4 * i), 3);
1864  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[3] + 4 * i), 0);
1865  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[7] + 4 * i), 1);
1866  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[11] + 4 * i), 2);
1867  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[15] + 4 * i), 3);
1868 
1869  __m512 v0 = _mm512_shuffle_ps(t0, t1, 0x44);
1870  __m512 v1 = _mm512_shuffle_ps(t0, t1, 0xee);
1871  __m512 v2 = _mm512_shuffle_ps(t2, t3, 0x44);
1872  __m512 v3 = _mm512_shuffle_ps(t2, t3, 0xee);
1873 
1874  out[4 * i + 0].data = _mm512_shuffle_ps(v0, v2, 0x88);
1875  out[4 * i + 1].data = _mm512_shuffle_ps(v0, v2, 0xdd);
1876  out[4 * i + 2].data = _mm512_shuffle_ps(v1, v3, 0x88);
1877  out[4 * i + 3].data = _mm512_shuffle_ps(v1, v3, 0xdd);
1878  }
1879 
1880  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1881  gather(out[i], in, i);
1882 }
1883 
1884 
1885 
1889 template <>
1890 inline DEAL_II_ALWAYS_INLINE void
1891 vectorized_transpose_and_store(const bool add_into,
1892  const unsigned int n_entries,
1893  const VectorizedArray<float, 16> *in,
1894  const unsigned int * offsets,
1895  float * out)
1896 {
1897  const unsigned int n_chunks = n_entries / 4;
1898  for (unsigned int i = 0; i < n_chunks; ++i)
1899  {
1900  __m512 t0 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0x44);
1901  __m512 t1 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0xee);
1902  __m512 t2 =
1903  _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0x44);
1904  __m512 t3 =
1905  _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0xee);
1906  __m512 u0 = _mm512_shuffle_ps(t0, t2, 0x88);
1907  __m512 u1 = _mm512_shuffle_ps(t0, t2, 0xdd);
1908  __m512 u2 = _mm512_shuffle_ps(t1, t3, 0x88);
1909  __m512 u3 = _mm512_shuffle_ps(t1, t3, 0xdd);
1910 
1911  __m128 res0 = _mm512_extractf32x4_ps(u0, 0);
1912  __m128 res4 = _mm512_extractf32x4_ps(u0, 1);
1913  __m128 res8 = _mm512_extractf32x4_ps(u0, 2);
1914  __m128 res12 = _mm512_extractf32x4_ps(u0, 3);
1915  __m128 res1 = _mm512_extractf32x4_ps(u1, 0);
1916  __m128 res5 = _mm512_extractf32x4_ps(u1, 1);
1917  __m128 res9 = _mm512_extractf32x4_ps(u1, 2);
1918  __m128 res13 = _mm512_extractf32x4_ps(u1, 3);
1919  __m128 res2 = _mm512_extractf32x4_ps(u2, 0);
1920  __m128 res6 = _mm512_extractf32x4_ps(u2, 1);
1921  __m128 res10 = _mm512_extractf32x4_ps(u2, 2);
1922  __m128 res14 = _mm512_extractf32x4_ps(u2, 3);
1923  __m128 res3 = _mm512_extractf32x4_ps(u3, 0);
1924  __m128 res7 = _mm512_extractf32x4_ps(u3, 1);
1925  __m128 res11 = _mm512_extractf32x4_ps(u3, 2);
1926  __m128 res15 = _mm512_extractf32x4_ps(u3, 3);
1927 
1928  // Cannot use the same store instructions in both paths of the 'if'
1929  // because the compiler cannot know that there is no aliasing between
1930  // pointers
1931  if (add_into)
1932  {
1933  res0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), res0);
1934  _mm_storeu_ps(out + 4 * i + offsets[0], res0);
1935  res1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), res1);
1936  _mm_storeu_ps(out + 4 * i + offsets[1], res1);
1937  res2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), res2);
1938  _mm_storeu_ps(out + 4 * i + offsets[2], res2);
1939  res3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), res3);
1940  _mm_storeu_ps(out + 4 * i + offsets[3], res3);
1941  res4 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[4]), res4);
1942  _mm_storeu_ps(out + 4 * i + offsets[4], res4);
1943  res5 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[5]), res5);
1944  _mm_storeu_ps(out + 4 * i + offsets[5], res5);
1945  res6 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[6]), res6);
1946  _mm_storeu_ps(out + 4 * i + offsets[6], res6);
1947  res7 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[7]), res7);
1948  _mm_storeu_ps(out + 4 * i + offsets[7], res7);
1949  res8 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[8]), res8);
1950  _mm_storeu_ps(out + 4 * i + offsets[8], res8);
1951  res9 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[9]), res9);
1952  _mm_storeu_ps(out + 4 * i + offsets[9], res9);
1953  res10 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[10]), res10);
1954  _mm_storeu_ps(out + 4 * i + offsets[10], res10);
1955  res11 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[11]), res11);
1956  _mm_storeu_ps(out + 4 * i + offsets[11], res11);
1957  res12 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[12]), res12);
1958  _mm_storeu_ps(out + 4 * i + offsets[12], res12);
1959  res13 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[13]), res13);
1960  _mm_storeu_ps(out + 4 * i + offsets[13], res13);
1961  res14 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[14]), res14);
1962  _mm_storeu_ps(out + 4 * i + offsets[14], res14);
1963  res15 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[15]), res15);
1964  _mm_storeu_ps(out + 4 * i + offsets[15], res15);
1965  }
1966  else
1967  {
1968  _mm_storeu_ps(out + 4 * i + offsets[0], res0);
1969  _mm_storeu_ps(out + 4 * i + offsets[1], res1);
1970  _mm_storeu_ps(out + 4 * i + offsets[2], res2);
1971  _mm_storeu_ps(out + 4 * i + offsets[3], res3);
1972  _mm_storeu_ps(out + 4 * i + offsets[4], res4);
1973  _mm_storeu_ps(out + 4 * i + offsets[5], res5);
1974  _mm_storeu_ps(out + 4 * i + offsets[6], res6);
1975  _mm_storeu_ps(out + 4 * i + offsets[7], res7);
1976  _mm_storeu_ps(out + 4 * i + offsets[8], res8);
1977  _mm_storeu_ps(out + 4 * i + offsets[9], res9);
1978  _mm_storeu_ps(out + 4 * i + offsets[10], res10);
1979  _mm_storeu_ps(out + 4 * i + offsets[11], res11);
1980  _mm_storeu_ps(out + 4 * i + offsets[12], res12);
1981  _mm_storeu_ps(out + 4 * i + offsets[13], res13);
1982  _mm_storeu_ps(out + 4 * i + offsets[14], res14);
1983  _mm_storeu_ps(out + 4 * i + offsets[15], res15);
1984  }
1985  }
1986 
1987  // remainder loop of work that does not divide by 4
1988  if (add_into)
1989  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1990  for (unsigned int v = 0; v < 16; ++v)
1991  out[offsets[v] + i] += in[i][v];
1992  else
1993  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1994  for (unsigned int v = 0; v < 16; ++v)
1995  out[offsets[v] + i] = in[i][v];
1996 }
1997 
1998 
1999 
2003 template <>
2004 inline DEAL_II_ALWAYS_INLINE void
2005 vectorized_transpose_and_store(const bool add_into,
2006  const unsigned int n_entries,
2007  const VectorizedArray<float, 16> *in,
2008  std::array<float *, 16> & out)
2009 {
2010  // see the comments in the vectorized_transpose_and_store above
2011 
2012  const unsigned int n_chunks = n_entries / 4;
2013  for (unsigned int i = 0; i < n_chunks; ++i)
2014  {
2015  __m512 t0 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0x44);
2016  __m512 t1 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0xee);
2017  __m512 t2 =
2018  _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0x44);
2019  __m512 t3 =
2020  _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0xee);
2021  __m512 u0 = _mm512_shuffle_ps(t0, t2, 0x88);
2022  __m512 u1 = _mm512_shuffle_ps(t0, t2, 0xdd);
2023  __m512 u2 = _mm512_shuffle_ps(t1, t3, 0x88);
2024  __m512 u3 = _mm512_shuffle_ps(t1, t3, 0xdd);
2025 
2026  __m128 res0 = _mm512_extractf32x4_ps(u0, 0);
2027  __m128 res4 = _mm512_extractf32x4_ps(u0, 1);
2028  __m128 res8 = _mm512_extractf32x4_ps(u0, 2);
2029  __m128 res12 = _mm512_extractf32x4_ps(u0, 3);
2030  __m128 res1 = _mm512_extractf32x4_ps(u1, 0);
2031  __m128 res5 = _mm512_extractf32x4_ps(u1, 1);
2032  __m128 res9 = _mm512_extractf32x4_ps(u1, 2);
2033  __m128 res13 = _mm512_extractf32x4_ps(u1, 3);
2034  __m128 res2 = _mm512_extractf32x4_ps(u2, 0);
2035  __m128 res6 = _mm512_extractf32x4_ps(u2, 1);
2036  __m128 res10 = _mm512_extractf32x4_ps(u2, 2);
2037  __m128 res14 = _mm512_extractf32x4_ps(u2, 3);
2038  __m128 res3 = _mm512_extractf32x4_ps(u3, 0);
2039  __m128 res7 = _mm512_extractf32x4_ps(u3, 1);
2040  __m128 res11 = _mm512_extractf32x4_ps(u3, 2);
2041  __m128 res15 = _mm512_extractf32x4_ps(u3, 3);
2042 
2043  if (add_into)
2044  {
2045  res0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), res0);
2046  _mm_storeu_ps(out[0] + 4 * i, res0);
2047  res1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), res1);
2048  _mm_storeu_ps(out[1] + 4 * i, res1);
2049  res2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), res2);
2050  _mm_storeu_ps(out[2] + 4 * i, res2);
2051  res3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), res3);
2052  _mm_storeu_ps(out[3] + 4 * i, res3);
2053  res4 = _mm_add_ps(_mm_loadu_ps(out[4] + 4 * i), res4);
2054  _mm_storeu_ps(out[4] + 4 * i, res4);
2055  res5 = _mm_add_ps(_mm_loadu_ps(out[5] + 4 * i), res5);
2056  _mm_storeu_ps(out[5] + 4 * i, res5);
2057  res6 = _mm_add_ps(_mm_loadu_ps(out[6] + 4 * i), res6);
2058  _mm_storeu_ps(out[6] + 4 * i, res6);
2059  res7 = _mm_add_ps(_mm_loadu_ps(out[7] + 4 * i), res7);
2060  _mm_storeu_ps(out[7] + 4 * i, res7);
2061  res8 = _mm_add_ps(_mm_loadu_ps(out[8] + 4 * i), res8);
2062  _mm_storeu_ps(out[8] + 4 * i, res8);
2063  res9 = _mm_add_ps(_mm_loadu_ps(out[9] + 4 * i), res9);
2064  _mm_storeu_ps(out[9] + 4 * i, res9);
2065  res10 = _mm_add_ps(_mm_loadu_ps(out[10] + 4 * i), res10);
2066  _mm_storeu_ps(out[10] + 4 * i, res10);
2067  res11 = _mm_add_ps(_mm_loadu_ps(out[11] + 4 * i), res11);
2068  _mm_storeu_ps(out[11] + 4 * i, res11);
2069  res12 = _mm_add_ps(_mm_loadu_ps(out[12] + 4 * i), res12);
2070  _mm_storeu_ps(out[12] + 4 * i, res12);
2071  res13 = _mm_add_ps(_mm_loadu_ps(out[13] + 4 * i), res13);
2072  _mm_storeu_ps(out[13] + 4 * i, res13);
2073  res14 = _mm_add_ps(_mm_loadu_ps(out[14] + 4 * i), res14);
2074  _mm_storeu_ps(out[14] + 4 * i, res14);
2075  res15 = _mm_add_ps(_mm_loadu_ps(out[15] + 4 * i), res15);
2076  _mm_storeu_ps(out[15] + 4 * i, res15);
2077  }
2078  else
2079  {
2080  _mm_storeu_ps(out[0] + 4 * i, res0);
2081  _mm_storeu_ps(out[1] + 4 * i, res1);
2082  _mm_storeu_ps(out[2] + 4 * i, res2);
2083  _mm_storeu_ps(out[3] + 4 * i, res3);
2084  _mm_storeu_ps(out[4] + 4 * i, res4);
2085  _mm_storeu_ps(out[5] + 4 * i, res5);
2086  _mm_storeu_ps(out[6] + 4 * i, res6);
2087  _mm_storeu_ps(out[7] + 4 * i, res7);
2088  _mm_storeu_ps(out[8] + 4 * i, res8);
2089  _mm_storeu_ps(out[9] + 4 * i, res9);
2090  _mm_storeu_ps(out[10] + 4 * i, res10);
2091  _mm_storeu_ps(out[11] + 4 * i, res11);
2092  _mm_storeu_ps(out[12] + 4 * i, res12);
2093  _mm_storeu_ps(out[13] + 4 * i, res13);
2094  _mm_storeu_ps(out[14] + 4 * i, res14);
2095  _mm_storeu_ps(out[15] + 4 * i, res15);
2096  }
2097  }
2098 
2099  if (add_into)
2100  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2101  for (unsigned int v = 0; v < 16; ++v)
2102  out[v][i] += in[i][v];
2103  else
2104  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2105  for (unsigned int v = 0; v < 16; ++v)
2106  out[v][i] = in[i][v];
2107 }
2108 
2109 # endif
2110 
2111 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
2112 
2116 template <>
2117 class VectorizedArray<double, 4>
2118  : public VectorizedArrayBase<VectorizedArray<double, 4>, 4>
2119 {
2120 public:
2124  using value_type = double;
2125 
2130  VectorizedArray() = default;
2131 
2135  VectorizedArray(const double scalar)
2136  {
2137  this->operator=(scalar);
2138  }
2139 
2144  VectorizedArray &
2145  operator=(const double x)
2146  {
2147  data = _mm256_set1_pd(x);
2148  return *this;
2149  }
2150 
2155  double &
2156  operator[](const unsigned int comp)
2157  {
2158  AssertIndexRange(comp, 4);
2159  return *(reinterpret_cast<double *>(&data) + comp);
2160  }
2161 
2166  const double &
2167  operator[](const unsigned int comp) const
2168  {
2169  AssertIndexRange(comp, 4);
2170  return *(reinterpret_cast<const double *>(&data) + comp);
2171  }
2172 
2177  VectorizedArray &
2178  operator+=(const VectorizedArray &vec)
2179  {
2180  // if the compiler supports vector arithmetic, we can simply use +=
2181  // operator on the given data type. this allows the compiler to combine
2182  // additions with multiplication (fused multiply-add) if those
2183  // instructions are available. Otherwise, we need to use the built-in
2184  // intrinsic command for __m256d
2185 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2186  data += vec.data;
2187 # else
2188  data = _mm256_add_pd(data, vec.data);
2189 # endif
2190  return *this;
2191  }
2192 
2197  VectorizedArray &
2198  operator-=(const VectorizedArray &vec)
2199  {
2200 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2201  data -= vec.data;
2202 # else
2203  data = _mm256_sub_pd(data, vec.data);
2204 # endif
2205  return *this;
2206  }
2211  VectorizedArray &
2212  operator*=(const VectorizedArray &vec)
2213  {
2214 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2215  data *= vec.data;
2216 # else
2217  data = _mm256_mul_pd(data, vec.data);
2218 # endif
2219  return *this;
2220  }
2221 
2226  VectorizedArray &
2227  operator/=(const VectorizedArray &vec)
2228  {
2229 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2230  data /= vec.data;
2231 # else
2232  data = _mm256_div_pd(data, vec.data);
2233 # endif
2234  return *this;
2235  }
2236 
2243  void
2244  load(const double *ptr)
2245  {
2246  data = _mm256_loadu_pd(ptr);
2247  }
2248 
2256  void
2257  store(double *ptr) const
2258  {
2259  _mm256_storeu_pd(ptr, data);
2260  }
2261 
2266  void
2267  streaming_store(double *ptr) const
2268  {
2269  Assert(reinterpret_cast<std::size_t>(ptr) % 32 == 0,
2270  ExcMessage("Memory not aligned"));
2271  _mm256_stream_pd(ptr, data);
2272  }
2273 
2287  void
2288  gather(const double *base_ptr, const unsigned int *offsets)
2289  {
2290 # ifdef __AVX2__
2291  // unfortunately, there does not appear to be a 128 bit integer load, so
2292  // do it by some reinterpret casts here. this is allowed because the Intel
2293  // API allows aliasing between different vector types.
2294  const __m128 index_val =
2295  _mm_loadu_ps(reinterpret_cast<const float *>(offsets));
2296  const __m128i index = *reinterpret_cast<const __m128i *>(&index_val);
2297  data = _mm256_i32gather_pd(base_ptr, index, 8);
2298 # else
2299  for (unsigned int i = 0; i < 4; ++i)
2300  *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
2301 # endif
2302  }
2303 
2317  void
2318  scatter(const unsigned int *offsets, double *base_ptr) const
2319  {
2320  // no scatter operation in AVX/AVX2
2321  for (unsigned int i = 0; i < 4; ++i)
2322  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
2323  }
2324 
2330  __m256d data;
2331 
2332 private:
2339  get_sqrt() const
2340  {
2341  VectorizedArray res;
2342  res.data = _mm256_sqrt_pd(data);
2343  return res;
2344  }
2345 
2352  get_abs() const
2353  {
2354  // to compute the absolute value, perform bitwise andnot with -0. This
2355  // will leave all value and exponent bits unchanged but force the sign
2356  // value to +.
2357  __m256d mask = _mm256_set1_pd(-0.);
2358  VectorizedArray res;
2359  res.data = _mm256_andnot_pd(mask, data);
2360  return res;
2361  }
2362 
2369  get_max(const VectorizedArray &other) const
2370  {
2371  VectorizedArray res;
2372  res.data = _mm256_max_pd(data, other.data);
2373  return res;
2374  }
2375 
2382  get_min(const VectorizedArray &other) const
2383  {
2384  VectorizedArray res;
2385  res.data = _mm256_min_pd(data, other.data);
2386  return res;
2387  }
2388 
2389  // Make a few functions friends.
2390  template <typename Number2, std::size_t width2>
2392  std::sqrt(const VectorizedArray<Number2, width2> &);
2393  template <typename Number2, std::size_t width2>
2395  std::abs(const VectorizedArray<Number2, width2> &);
2396  template <typename Number2, std::size_t width2>
2398  std::max(const VectorizedArray<Number2, width2> &,
2400  template <typename Number2, std::size_t width2>
2402  std::min(const VectorizedArray<Number2, width2> &,
2404 };
2405 
2406 
2407 
2411 template <>
2412 inline DEAL_II_ALWAYS_INLINE void
2413 vectorized_load_and_transpose(const unsigned int n_entries,
2414  const double * in,
2415  const unsigned int * offsets,
2417 {
2418  const unsigned int n_chunks = n_entries / 4;
2419  const double * in0 = in + offsets[0];
2420  const double * in1 = in + offsets[1];
2421  const double * in2 = in + offsets[2];
2422  const double * in3 = in + offsets[3];
2423 
2424  for (unsigned int i = 0; i < n_chunks; ++i)
2425  {
2426  __m256d u0 = _mm256_loadu_pd(in0 + 4 * i);
2427  __m256d u1 = _mm256_loadu_pd(in1 + 4 * i);
2428  __m256d u2 = _mm256_loadu_pd(in2 + 4 * i);
2429  __m256d u3 = _mm256_loadu_pd(in3 + 4 * i);
2430  __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2431  __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2432  __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2433  __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2434  out[4 * i + 0].data = _mm256_unpacklo_pd(t0, t1);
2435  out[4 * i + 1].data = _mm256_unpackhi_pd(t0, t1);
2436  out[4 * i + 2].data = _mm256_unpacklo_pd(t2, t3);
2437  out[4 * i + 3].data = _mm256_unpackhi_pd(t2, t3);
2438  }
2439 
2440  // remainder loop of work that does not divide by 4
2441  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2442  out[i].gather(in + i, offsets);
2443 }
2444 
2445 
2446 
2450 template <>
2451 inline DEAL_II_ALWAYS_INLINE void
2452 vectorized_load_and_transpose(const unsigned int n_entries,
2453  const std::array<double *, 4> &in,
2455 {
2456  // see the comments in the vectorized_load_and_transpose above
2457 
2458  const unsigned int n_chunks = n_entries / 4;
2459  const double * in0 = in[0];
2460  const double * in1 = in[1];
2461  const double * in2 = in[2];
2462  const double * in3 = in[3];
2463 
2464  for (unsigned int i = 0; i < n_chunks; ++i)
2465  {
2466  __m256d u0 = _mm256_loadu_pd(in0 + 4 * i);
2467  __m256d u1 = _mm256_loadu_pd(in1 + 4 * i);
2468  __m256d u2 = _mm256_loadu_pd(in2 + 4 * i);
2469  __m256d u3 = _mm256_loadu_pd(in3 + 4 * i);
2470  __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2471  __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2472  __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2473  __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2474  out[4 * i + 0].data = _mm256_unpacklo_pd(t0, t1);
2475  out[4 * i + 1].data = _mm256_unpackhi_pd(t0, t1);
2476  out[4 * i + 2].data = _mm256_unpacklo_pd(t2, t3);
2477  out[4 * i + 3].data = _mm256_unpackhi_pd(t2, t3);
2478  }
2479 
2480  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2481  gather(out[i], in, i);
2482 }
2483 
2484 
2485 
2489 template <>
2490 inline DEAL_II_ALWAYS_INLINE void
2491 vectorized_transpose_and_store(const bool add_into,
2492  const unsigned int n_entries,
2493  const VectorizedArray<double, 4> *in,
2494  const unsigned int * offsets,
2495  double * out)
2496 {
2497  const unsigned int n_chunks = n_entries / 4;
2498  double * out0 = out + offsets[0];
2499  double * out1 = out + offsets[1];
2500  double * out2 = out + offsets[2];
2501  double * out3 = out + offsets[3];
2502  for (unsigned int i = 0; i < n_chunks; ++i)
2503  {
2504  __m256d u0 = in[4 * i + 0].data;
2505  __m256d u1 = in[4 * i + 1].data;
2506  __m256d u2 = in[4 * i + 2].data;
2507  __m256d u3 = in[4 * i + 3].data;
2508  __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2509  __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2510  __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2511  __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2512  __m256d res0 = _mm256_unpacklo_pd(t0, t1);
2513  __m256d res1 = _mm256_unpackhi_pd(t0, t1);
2514  __m256d res2 = _mm256_unpacklo_pd(t2, t3);
2515  __m256d res3 = _mm256_unpackhi_pd(t2, t3);
2516 
2517  // Cannot use the same store instructions in both paths of the 'if'
2518  // because the compiler cannot know that there is no aliasing between
2519  // pointers
2520  if (add_into)
2521  {
2522  res0 = _mm256_add_pd(_mm256_loadu_pd(out0 + 4 * i), res0);
2523  _mm256_storeu_pd(out0 + 4 * i, res0);
2524  res1 = _mm256_add_pd(_mm256_loadu_pd(out1 + 4 * i), res1);
2525  _mm256_storeu_pd(out1 + 4 * i, res1);
2526  res2 = _mm256_add_pd(_mm256_loadu_pd(out2 + 4 * i), res2);
2527  _mm256_storeu_pd(out2 + 4 * i, res2);
2528  res3 = _mm256_add_pd(_mm256_loadu_pd(out3 + 4 * i), res3);
2529  _mm256_storeu_pd(out3 + 4 * i, res3);
2530  }
2531  else
2532  {
2533  _mm256_storeu_pd(out0 + 4 * i, res0);
2534  _mm256_storeu_pd(out1 + 4 * i, res1);
2535  _mm256_storeu_pd(out2 + 4 * i, res2);
2536  _mm256_storeu_pd(out3 + 4 * i, res3);
2537  }
2538  }
2539 
2540  // remainder loop of work that does not divide by 4
2541  if (add_into)
2542  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2543  for (unsigned int v = 0; v < 4; ++v)
2544  out[offsets[v] + i] += in[i][v];
2545  else
2546  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2547  for (unsigned int v = 0; v < 4; ++v)
2548  out[offsets[v] + i] = in[i][v];
2549 }
2550 
2551 
2552 
2556 template <>
2557 inline DEAL_II_ALWAYS_INLINE void
2558 vectorized_transpose_and_store(const bool add_into,
2559  const unsigned int n_entries,
2560  const VectorizedArray<double, 4> *in,
2561  std::array<double *, 4> & out)
2562 {
2563  // see the comments in the vectorized_transpose_and_store above
2564 
2565  const unsigned int n_chunks = n_entries / 4;
2566  double * out0 = out[0];
2567  double * out1 = out[1];
2568  double * out2 = out[2];
2569  double * out3 = out[3];
2570  for (unsigned int i = 0; i < n_chunks; ++i)
2571  {
2572  __m256d u0 = in[4 * i + 0].data;
2573  __m256d u1 = in[4 * i + 1].data;
2574  __m256d u2 = in[4 * i + 2].data;
2575  __m256d u3 = in[4 * i + 3].data;
2576  __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2577  __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2578  __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2579  __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2580  __m256d res0 = _mm256_unpacklo_pd(t0, t1);
2581  __m256d res1 = _mm256_unpackhi_pd(t0, t1);
2582  __m256d res2 = _mm256_unpacklo_pd(t2, t3);
2583  __m256d res3 = _mm256_unpackhi_pd(t2, t3);
2584 
2585  // Cannot use the same store instructions in both paths of the 'if'
2586  // because the compiler cannot know that there is no aliasing between
2587  // pointers
2588  if (add_into)
2589  {
2590  res0 = _mm256_add_pd(_mm256_loadu_pd(out0 + 4 * i), res0);
2591  _mm256_storeu_pd(out0 + 4 * i, res0);
2592  res1 = _mm256_add_pd(_mm256_loadu_pd(out1 + 4 * i), res1);
2593  _mm256_storeu_pd(out1 + 4 * i, res1);
2594  res2 = _mm256_add_pd(_mm256_loadu_pd(out2 + 4 * i), res2);
2595  _mm256_storeu_pd(out2 + 4 * i, res2);
2596  res3 = _mm256_add_pd(_mm256_loadu_pd(out3 + 4 * i), res3);
2597  _mm256_storeu_pd(out3 + 4 * i, res3);
2598  }
2599  else
2600  {
2601  _mm256_storeu_pd(out0 + 4 * i, res0);
2602  _mm256_storeu_pd(out1 + 4 * i, res1);
2603  _mm256_storeu_pd(out2 + 4 * i, res2);
2604  _mm256_storeu_pd(out3 + 4 * i, res3);
2605  }
2606  }
2607 
2608  // remainder loop of work that does not divide by 4
2609  if (add_into)
2610  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2611  for (unsigned int v = 0; v < 4; ++v)
2612  out[v][i] += in[i][v];
2613  else
2614  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2615  for (unsigned int v = 0; v < 4; ++v)
2616  out[v][i] = in[i][v];
2617 }
2618 
2619 
2620 
2624 template <>
2625 class VectorizedArray<float, 8>
2626  : public VectorizedArrayBase<VectorizedArray<float, 8>, 8>
2627 {
2628 public:
2632  using value_type = float;
2633 
2638  VectorizedArray() = default;
2639 
2643  VectorizedArray(const float scalar)
2644  {
2645  this->operator=(scalar);
2646  }
2647 
2652  VectorizedArray &
2653  operator=(const float x)
2654  {
2655  data = _mm256_set1_ps(x);
2656  return *this;
2657  }
2658 
2663  float &
2664  operator[](const unsigned int comp)
2665  {
2666  AssertIndexRange(comp, 8);
2667  return *(reinterpret_cast<float *>(&data) + comp);
2668  }
2669 
2674  const float &
2675  operator[](const unsigned int comp) const
2676  {
2677  AssertIndexRange(comp, 8);
2678  return *(reinterpret_cast<const float *>(&data) + comp);
2679  }
2680 
2685  VectorizedArray &
2686  operator+=(const VectorizedArray &vec)
2687  {
2688  // if the compiler supports vector arithmetic, we can simply use +=
2689  // operator on the given data type. this allows the compiler to combine
2690  // additions with multiplication (fused multiply-add) if those
2691  // instructions are available. Otherwise, we need to use the built-in
2692  // intrinsic command for __m256d
2693 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2694  data += vec.data;
2695 # else
2696  data = _mm256_add_ps(data, vec.data);
2697 # endif
2698  return *this;
2699  }
2700 
2705  VectorizedArray &
2706  operator-=(const VectorizedArray &vec)
2707  {
2708 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2709  data -= vec.data;
2710 # else
2711  data = _mm256_sub_ps(data, vec.data);
2712 # endif
2713  return *this;
2714  }
2719  VectorizedArray &
2720  operator*=(const VectorizedArray &vec)
2721  {
2722 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2723  data *= vec.data;
2724 # else
2725  data = _mm256_mul_ps(data, vec.data);
2726 # endif
2727  return *this;
2728  }
2729 
2734  VectorizedArray &
2735  operator/=(const VectorizedArray &vec)
2736  {
2737 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2738  data /= vec.data;
2739 # else
2740  data = _mm256_div_ps(data, vec.data);
2741 # endif
2742  return *this;
2743  }
2744 
2751  void
2752  load(const float *ptr)
2753  {
2754  data = _mm256_loadu_ps(ptr);
2755  }
2756 
2764  void
2765  store(float *ptr) const
2766  {
2767  _mm256_storeu_ps(ptr, data);
2768  }
2769 
2774  void
2775  streaming_store(float *ptr) const
2776  {
2777  Assert(reinterpret_cast<std::size_t>(ptr) % 32 == 0,
2778  ExcMessage("Memory not aligned"));
2779  _mm256_stream_ps(ptr, data);
2780  }
2781 
2795  void
2796  gather(const float *base_ptr, const unsigned int *offsets)
2797  {
2798 # ifdef __AVX2__
2799  // unfortunately, there does not appear to be a 256 bit integer load, so
2800  // do it by some reinterpret casts here. this is allowed because the Intel
2801  // API allows aliasing between different vector types.
2802  const __m256 index_val =
2803  _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
2804  const __m256i index = *reinterpret_cast<const __m256i *>(&index_val);
2805  data = _mm256_i32gather_ps(base_ptr, index, 4);
2806 # else
2807  for (unsigned int i = 0; i < 8; ++i)
2808  *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
2809 # endif
2810  }
2811 
2825  void
2826  scatter(const unsigned int *offsets, float *base_ptr) const
2827  {
2828  // no scatter operation in AVX/AVX2
2829  for (unsigned int i = 0; i < 8; ++i)
2830  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
2831  }
2832 
2838  __m256 data;
2839 
2840 private:
2847  get_sqrt() const
2848  {
2849  VectorizedArray res;
2850  res.data = _mm256_sqrt_ps(data);
2851  return res;
2852  }
2853 
2860  get_abs() const
2861  {
2862  // to compute the absolute value, perform bitwise andnot with -0. This
2863  // will leave all value and exponent bits unchanged but force the sign
2864  // value to +.
2865  __m256 mask = _mm256_set1_ps(-0.f);
2866  VectorizedArray res;
2867  res.data = _mm256_andnot_ps(mask, data);
2868  return res;
2869  }
2870 
2877  get_max(const VectorizedArray &other) const
2878  {
2879  VectorizedArray res;
2880  res.data = _mm256_max_ps(data, other.data);
2881  return res;
2882  }
2883 
2890  get_min(const VectorizedArray &other) const
2891  {
2892  VectorizedArray res;
2893  res.data = _mm256_min_ps(data, other.data);
2894  return res;
2895  }
2896 
2897  // Make a few functions friends.
2898  template <typename Number2, std::size_t width2>
2900  std::sqrt(const VectorizedArray<Number2, width2> &);
2901  template <typename Number2, std::size_t width2>
2903  std::abs(const VectorizedArray<Number2, width2> &);
2904  template <typename Number2, std::size_t width2>
2906  std::max(const VectorizedArray<Number2, width2> &,
2908  template <typename Number2, std::size_t width2>
2910  std::min(const VectorizedArray<Number2, width2> &,
2912 };
2913 
2914 
2915 
2919 template <>
2920 inline DEAL_II_ALWAYS_INLINE void
2921 vectorized_load_and_transpose(const unsigned int n_entries,
2922  const float * in,
2923  const unsigned int * offsets,
2925 {
2926  const unsigned int n_chunks = n_entries / 4;
2927  for (unsigned int i = 0; i < n_chunks; ++i)
2928  {
2929  // To avoid warnings about uninitialized variables, need to initialize
2930  // one variable with zero before using it.
2931  __m256 t0, t1, t2, t3 = {};
2932  t0 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[0]), 0);
2933  t0 = _mm256_insertf128_ps(t0, _mm_loadu_ps(in + 4 * i + offsets[4]), 1);
2934  t1 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[1]), 0);
2935  t1 = _mm256_insertf128_ps(t1, _mm_loadu_ps(in + 4 * i + offsets[5]), 1);
2936  t2 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[2]), 0);
2937  t2 = _mm256_insertf128_ps(t2, _mm_loadu_ps(in + 4 * i + offsets[6]), 1);
2938  t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[3]), 0);
2939  t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[7]), 1);
2940 
2941  __m256 v0 = _mm256_shuffle_ps(t0, t1, 0x44);
2942  __m256 v1 = _mm256_shuffle_ps(t0, t1, 0xee);
2943  __m256 v2 = _mm256_shuffle_ps(t2, t3, 0x44);
2944  __m256 v3 = _mm256_shuffle_ps(t2, t3, 0xee);
2945  out[4 * i + 0].data = _mm256_shuffle_ps(v0, v2, 0x88);
2946  out[4 * i + 1].data = _mm256_shuffle_ps(v0, v2, 0xdd);
2947  out[4 * i + 2].data = _mm256_shuffle_ps(v1, v3, 0x88);
2948  out[4 * i + 3].data = _mm256_shuffle_ps(v1, v3, 0xdd);
2949  }
2950 
2951  // remainder loop of work that does not divide by 4
2952  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2953  out[i].gather(in + i, offsets);
2954 }
2955 
2956 
2957 
2961 template <>
2962 inline DEAL_II_ALWAYS_INLINE void
2963 vectorized_load_and_transpose(const unsigned int n_entries,
2964  const std::array<float *, 8> &in,
2966 {
2967  // see the comments in the vectorized_load_and_transpose above
2968 
2969  const unsigned int n_chunks = n_entries / 4;
2970  for (unsigned int i = 0; i < n_chunks; ++i)
2971  {
2972  __m256 t0, t1, t2, t3 = {};
2973  t0 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[0] + 4 * i), 0);
2974  t0 = _mm256_insertf128_ps(t0, _mm_loadu_ps(in[4] + 4 * i), 1);
2975  t1 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[1] + 4 * i), 0);
2976  t1 = _mm256_insertf128_ps(t1, _mm_loadu_ps(in[5] + 4 * i), 1);
2977  t2 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[2] + 4 * i), 0);
2978  t2 = _mm256_insertf128_ps(t2, _mm_loadu_ps(in[6] + 4 * i), 1);
2979  t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[3] + 4 * i), 0);
2980  t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[7] + 4 * i), 1);
2981 
2982  __m256 v0 = _mm256_shuffle_ps(t0, t1, 0x44);
2983  __m256 v1 = _mm256_shuffle_ps(t0, t1, 0xee);
2984  __m256 v2 = _mm256_shuffle_ps(t2, t3, 0x44);
2985  __m256 v3 = _mm256_shuffle_ps(t2, t3, 0xee);
2986  out[4 * i + 0].data = _mm256_shuffle_ps(v0, v2, 0x88);
2987  out[4 * i + 1].data = _mm256_shuffle_ps(v0, v2, 0xdd);
2988  out[4 * i + 2].data = _mm256_shuffle_ps(v1, v3, 0x88);
2989  out[4 * i + 3].data = _mm256_shuffle_ps(v1, v3, 0xdd);
2990  }
2991 
2992  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2993  gather(out[i], in, i);
2994 }
2995 
2996 
2997 
3001 template <>
3002 inline DEAL_II_ALWAYS_INLINE void
3003 vectorized_transpose_and_store(const bool add_into,
3004  const unsigned int n_entries,
3005  const VectorizedArray<float, 8> *in,
3006  const unsigned int * offsets,
3007  float * out)
3008 {
3009  const unsigned int n_chunks = n_entries / 4;
3010  for (unsigned int i = 0; i < n_chunks; ++i)
3011  {
3012  __m256 u0 = in[4 * i + 0].data;
3013  __m256 u1 = in[4 * i + 1].data;
3014  __m256 u2 = in[4 * i + 2].data;
3015  __m256 u3 = in[4 * i + 3].data;
3016  __m256 t0 = _mm256_shuffle_ps(u0, u1, 0x44);
3017  __m256 t1 = _mm256_shuffle_ps(u0, u1, 0xee);
3018  __m256 t2 = _mm256_shuffle_ps(u2, u3, 0x44);
3019  __m256 t3 = _mm256_shuffle_ps(u2, u3, 0xee);
3020  u0 = _mm256_shuffle_ps(t0, t2, 0x88);
3021  u1 = _mm256_shuffle_ps(t0, t2, 0xdd);
3022  u2 = _mm256_shuffle_ps(t1, t3, 0x88);
3023  u3 = _mm256_shuffle_ps(t1, t3, 0xdd);
3024  __m128 res0 = _mm256_extractf128_ps(u0, 0);
3025  __m128 res4 = _mm256_extractf128_ps(u0, 1);
3026  __m128 res1 = _mm256_extractf128_ps(u1, 0);
3027  __m128 res5 = _mm256_extractf128_ps(u1, 1);
3028  __m128 res2 = _mm256_extractf128_ps(u2, 0);
3029  __m128 res6 = _mm256_extractf128_ps(u2, 1);
3030  __m128 res3 = _mm256_extractf128_ps(u3, 0);
3031  __m128 res7 = _mm256_extractf128_ps(u3, 1);
3032 
3033  // Cannot use the same store instructions in both paths of the 'if'
3034  // because the compiler cannot know that there is no aliasing between
3035  // pointers
3036  if (add_into)
3037  {
3038  res0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), res0);
3039  _mm_storeu_ps(out + 4 * i + offsets[0], res0);
3040  res1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), res1);
3041  _mm_storeu_ps(out + 4 * i + offsets[1], res1);
3042  res2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), res2);
3043  _mm_storeu_ps(out + 4 * i + offsets[2], res2);
3044  res3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), res3);
3045  _mm_storeu_ps(out + 4 * i + offsets[3], res3);
3046  res4 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[4]), res4);
3047  _mm_storeu_ps(out + 4 * i + offsets[4], res4);
3048  res5 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[5]), res5);
3049  _mm_storeu_ps(out + 4 * i + offsets[5], res5);
3050  res6 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[6]), res6);
3051  _mm_storeu_ps(out + 4 * i + offsets[6], res6);
3052  res7 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[7]), res7);
3053  _mm_storeu_ps(out + 4 * i + offsets[7], res7);
3054  }
3055  else
3056  {
3057  _mm_storeu_ps(out + 4 * i + offsets[0], res0);
3058  _mm_storeu_ps(out + 4 * i + offsets[1], res1);
3059  _mm_storeu_ps(out + 4 * i + offsets[2], res2);
3060  _mm_storeu_ps(out + 4 * i + offsets[3], res3);
3061  _mm_storeu_ps(out + 4 * i + offsets[4], res4);
3062  _mm_storeu_ps(out + 4 * i + offsets[5], res5);
3063  _mm_storeu_ps(out + 4 * i + offsets[6], res6);
3064  _mm_storeu_ps(out + 4 * i + offsets[7], res7);
3065  }
3066  }
3067 
3068  // remainder loop of work that does not divide by 4
3069  if (add_into)
3070  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3071  for (unsigned int v = 0; v < 8; ++v)
3072  out[offsets[v] + i] += in[i][v];
3073  else
3074  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3075  for (unsigned int v = 0; v < 8; ++v)
3076  out[offsets[v] + i] = in[i][v];
3077 }
3078 
3079 
3080 
3084 template <>
3085 inline DEAL_II_ALWAYS_INLINE void
3086 vectorized_transpose_and_store(const bool add_into,
3087  const unsigned int n_entries,
3088  const VectorizedArray<float, 8> *in,
3089  std::array<float *, 8> & out)
3090 {
3091  // see the comments in the vectorized_transpose_and_store above
3092 
3093  const unsigned int n_chunks = n_entries / 4;
3094  for (unsigned int i = 0; i < n_chunks; ++i)
3095  {
3096  __m256 u0 = in[4 * i + 0].data;
3097  __m256 u1 = in[4 * i + 1].data;
3098  __m256 u2 = in[4 * i + 2].data;
3099  __m256 u3 = in[4 * i + 3].data;
3100  __m256 t0 = _mm256_shuffle_ps(u0, u1, 0x44);
3101  __m256 t1 = _mm256_shuffle_ps(u0, u1, 0xee);
3102  __m256 t2 = _mm256_shuffle_ps(u2, u3, 0x44);
3103  __m256 t3 = _mm256_shuffle_ps(u2, u3, 0xee);
3104  u0 = _mm256_shuffle_ps(t0, t2, 0x88);
3105  u1 = _mm256_shuffle_ps(t0, t2, 0xdd);
3106  u2 = _mm256_shuffle_ps(t1, t3, 0x88);
3107  u3 = _mm256_shuffle_ps(t1, t3, 0xdd);
3108  __m128 res0 = _mm256_extractf128_ps(u0, 0);
3109  __m128 res4 = _mm256_extractf128_ps(u0, 1);
3110  __m128 res1 = _mm256_extractf128_ps(u1, 0);
3111  __m128 res5 = _mm256_extractf128_ps(u1, 1);
3112  __m128 res2 = _mm256_extractf128_ps(u2, 0);
3113  __m128 res6 = _mm256_extractf128_ps(u2, 1);
3114  __m128 res3 = _mm256_extractf128_ps(u3, 0);
3115  __m128 res7 = _mm256_extractf128_ps(u3, 1);
3116 
3117  if (add_into)
3118  {
3119  res0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), res0);
3120  _mm_storeu_ps(out[0] + 4 * i, res0);
3121  res1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), res1);
3122  _mm_storeu_ps(out[1] + 4 * i, res1);
3123  res2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), res2);
3124  _mm_storeu_ps(out[2] + 4 * i, res2);
3125  res3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), res3);
3126  _mm_storeu_ps(out[3] + 4 * i, res3);
3127  res4 = _mm_add_ps(_mm_loadu_ps(out[4] + 4 * i), res4);
3128  _mm_storeu_ps(out[4] + 4 * i, res4);
3129  res5 = _mm_add_ps(_mm_loadu_ps(out[5] + 4 * i), res5);
3130  _mm_storeu_ps(out[5] + 4 * i, res5);
3131  res6 = _mm_add_ps(_mm_loadu_ps(out[6] + 4 * i), res6);
3132  _mm_storeu_ps(out[6] + 4 * i, res6);
3133  res7 = _mm_add_ps(_mm_loadu_ps(out[7] + 4 * i), res7);
3134  _mm_storeu_ps(out[7] + 4 * i, res7);
3135  }
3136  else
3137  {
3138  _mm_storeu_ps(out[0] + 4 * i, res0);
3139  _mm_storeu_ps(out[1] + 4 * i, res1);
3140  _mm_storeu_ps(out[2] + 4 * i, res2);
3141  _mm_storeu_ps(out[3] + 4 * i, res3);
3142  _mm_storeu_ps(out[4] + 4 * i, res4);
3143  _mm_storeu_ps(out[5] + 4 * i, res5);
3144  _mm_storeu_ps(out[6] + 4 * i, res6);
3145  _mm_storeu_ps(out[7] + 4 * i, res7);
3146  }
3147  }
3148 
3149  if (add_into)
3150  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3151  for (unsigned int v = 0; v < 8; ++v)
3152  out[v][i] += in[i][v];
3153  else
3154  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3155  for (unsigned int v = 0; v < 8; ++v)
3156  out[v][i] = in[i][v];
3157 }
3158 
3159 # endif
3160 
3161 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__SSE2__)
3162 
3166 template <>
3167 class VectorizedArray<double, 2>
3168  : public VectorizedArrayBase<VectorizedArray<double, 2>, 2>
3169 {
3170 public:
3174  using value_type = double;
3175 
3180  VectorizedArray() = default;
3181 
3185  VectorizedArray(const double scalar)
3186  {
3187  this->operator=(scalar);
3188  }
3189 
3194  VectorizedArray &
3195  operator=(const double x)
3196  {
3197  data = _mm_set1_pd(x);
3198  return *this;
3199  }
3200 
3205  double &
3206  operator[](const unsigned int comp)
3207  {
3208  AssertIndexRange(comp, 2);
3209  return *(reinterpret_cast<double *>(&data) + comp);
3210  }
3211 
3216  const double &
3217  operator[](const unsigned int comp) const
3218  {
3219  AssertIndexRange(comp, 2);
3220  return *(reinterpret_cast<const double *>(&data) + comp);
3221  }
3222 
3227  VectorizedArray &
3228  operator+=(const VectorizedArray &vec)
3229  {
3230 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3231  data += vec.data;
3232 # else
3233  data = _mm_add_pd(data, vec.data);
3234 # endif
3235  return *this;
3236  }
3237 
3242  VectorizedArray &
3243  operator-=(const VectorizedArray &vec)
3244  {
3245 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3246  data -= vec.data;
3247 # else
3248  data = _mm_sub_pd(data, vec.data);
3249 # endif
3250  return *this;
3251  }
3252 
3257  VectorizedArray &
3258  operator*=(const VectorizedArray &vec)
3259  {
3260 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3261  data *= vec.data;
3262 # else
3263  data = _mm_mul_pd(data, vec.data);
3264 # endif
3265  return *this;
3266  }
3267 
3272  VectorizedArray &
3273  operator/=(const VectorizedArray &vec)
3274  {
3275 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3276  data /= vec.data;
3277 # else
3278  data = _mm_div_pd(data, vec.data);
3279 # endif
3280  return *this;
3281  }
3282 
3289  void
3290  load(const double *ptr)
3291  {
3292  data = _mm_loadu_pd(ptr);
3293  }
3294 
3302  void
3303  store(double *ptr) const
3304  {
3305  _mm_storeu_pd(ptr, data);
3306  }
3307 
3312  void
3313  streaming_store(double *ptr) const
3314  {
3315  Assert(reinterpret_cast<std::size_t>(ptr) % 16 == 0,
3316  ExcMessage("Memory not aligned"));
3317  _mm_stream_pd(ptr, data);
3318  }
3319 
3333  void
3334  gather(const double *base_ptr, const unsigned int *offsets)
3335  {
3336  for (unsigned int i = 0; i < 2; ++i)
3337  *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
3338  }
3339 
3353  void
3354  scatter(const unsigned int *offsets, double *base_ptr) const
3355  {
3356  for (unsigned int i = 0; i < 2; ++i)
3357  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
3358  }
3359 
3365  __m128d data;
3366 
3367 private:
3374  get_sqrt() const
3375  {
3376  VectorizedArray res;
3377  res.data = _mm_sqrt_pd(data);
3378  return res;
3379  }
3380 
3387  get_abs() const
3388  {
3389  // to compute the absolute value, perform
3390  // bitwise andnot with -0. This will leave all
3391  // value and exponent bits unchanged but force
3392  // the sign value to +.
3393  __m128d mask = _mm_set1_pd(-0.);
3394  VectorizedArray res;
3395  res.data = _mm_andnot_pd(mask, data);
3396  return res;
3397  }
3398 
3405  get_max(const VectorizedArray &other) const
3406  {
3407  VectorizedArray res;
3408  res.data = _mm_max_pd(data, other.data);
3409  return res;
3410  }
3411 
3418  get_min(const VectorizedArray &other) const
3419  {
3420  VectorizedArray res;
3421  res.data = _mm_min_pd(data, other.data);
3422  return res;
3423  }
3424 
3425  // Make a few functions friends.
3426  template <typename Number2, std::size_t width2>
3428  std::sqrt(const VectorizedArray<Number2, width2> &);
3429  template <typename Number2, std::size_t width2>
3431  std::abs(const VectorizedArray<Number2, width2> &);
3432  template <typename Number2, std::size_t width2>
3434  std::max(const VectorizedArray<Number2, width2> &,
3436  template <typename Number2, std::size_t width2>
3438  std::min(const VectorizedArray<Number2, width2> &,
3440 };
3441 
3442 
3443 
3447 template <>
3448 inline DEAL_II_ALWAYS_INLINE void
3449 vectorized_load_and_transpose(const unsigned int n_entries,
3450  const double * in,
3451  const unsigned int * offsets,
3453 {
3454  const unsigned int n_chunks = n_entries / 2;
3455  for (unsigned int i = 0; i < n_chunks; ++i)
3456  {
3457  __m128d u0 = _mm_loadu_pd(in + 2 * i + offsets[0]);
3458  __m128d u1 = _mm_loadu_pd(in + 2 * i + offsets[1]);
3459  out[2 * i + 0].data = _mm_unpacklo_pd(u0, u1);
3460  out[2 * i + 1].data = _mm_unpackhi_pd(u0, u1);
3461  }
3462 
3463  // remainder loop of work that does not divide by 2
3464  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3465  for (unsigned int v = 0; v < 2; ++v)
3466  out[i][v] = in[offsets[v] + i];
3467 }
3468 
3469 
3470 
3474 template <>
3475 inline DEAL_II_ALWAYS_INLINE void
3476 vectorized_load_and_transpose(const unsigned int n_entries,
3477  const std::array<double *, 2> &in,
3479 {
3480  // see the comments in the vectorized_load_and_transpose above
3481 
3482  const unsigned int n_chunks = n_entries / 2;
3483  for (unsigned int i = 0; i < n_chunks; ++i)
3484  {
3485  __m128d u0 = _mm_loadu_pd(in[0] + 2 * i);
3486  __m128d u1 = _mm_loadu_pd(in[1] + 2 * i);
3487  out[2 * i + 0].data = _mm_unpacklo_pd(u0, u1);
3488  out[2 * i + 1].data = _mm_unpackhi_pd(u0, u1);
3489  }
3490 
3491  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3492  for (unsigned int v = 0; v < 2; ++v)
3493  out[i][v] = in[v][i];
3494 }
3495 
3496 
3497 
3501 template <>
3502 inline DEAL_II_ALWAYS_INLINE void
3503 vectorized_transpose_and_store(const bool add_into,
3504  const unsigned int n_entries,
3505  const VectorizedArray<double, 2> *in,
3506  const unsigned int * offsets,
3507  double * out)
3508 {
3509  const unsigned int n_chunks = n_entries / 2;
3510  if (add_into)
3511  {
3512  for (unsigned int i = 0; i < n_chunks; ++i)
3513  {
3514  __m128d u0 = in[2 * i + 0].data;
3515  __m128d u1 = in[2 * i + 1].data;
3516  __m128d res0 = _mm_unpacklo_pd(u0, u1);
3517  __m128d res1 = _mm_unpackhi_pd(u0, u1);
3518  _mm_storeu_pd(out + 2 * i + offsets[0],
3519  _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[0]),
3520  res0));
3521  _mm_storeu_pd(out + 2 * i + offsets[1],
3522  _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[1]),
3523  res1));
3524  }
3525  // remainder loop of work that does not divide by 2
3526  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3527  for (unsigned int v = 0; v < 2; ++v)
3528  out[offsets[v] + i] += in[i][v];
3529  }
3530  else
3531  {
3532  for (unsigned int i = 0; i < n_chunks; ++i)
3533  {
3534  __m128d u0 = in[2 * i + 0].data;
3535  __m128d u1 = in[2 * i + 1].data;
3536  __m128d res0 = _mm_unpacklo_pd(u0, u1);
3537  __m128d res1 = _mm_unpackhi_pd(u0, u1);
3538  _mm_storeu_pd(out + 2 * i + offsets[0], res0);
3539  _mm_storeu_pd(out + 2 * i + offsets[1], res1);
3540  }
3541  // remainder loop of work that does not divide by 2
3542  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3543  for (unsigned int v = 0; v < 2; ++v)
3544  out[offsets[v] + i] = in[i][v];
3545  }
3546 }
3547 
3548 
3549 
3553 template <>
3554 inline DEAL_II_ALWAYS_INLINE void
3555 vectorized_transpose_and_store(const bool add_into,
3556  const unsigned int n_entries,
3557  const VectorizedArray<double, 2> *in,
3558  std::array<double *, 2> & out)
3559 {
3560  // see the comments in the vectorized_transpose_and_store above
3561 
3562  const unsigned int n_chunks = n_entries / 2;
3563  if (add_into)
3564  {
3565  for (unsigned int i = 0; i < n_chunks; ++i)
3566  {
3567  __m128d u0 = in[2 * i + 0].data;
3568  __m128d u1 = in[2 * i + 1].data;
3569  __m128d res0 = _mm_unpacklo_pd(u0, u1);
3570  __m128d res1 = _mm_unpackhi_pd(u0, u1);
3571  _mm_storeu_pd(out[0] + 2 * i,
3572  _mm_add_pd(_mm_loadu_pd(out[0] + 2 * i), res0));
3573  _mm_storeu_pd(out[1] + 2 * i,
3574  _mm_add_pd(_mm_loadu_pd(out[1] + 2 * i), res1));
3575  }
3576 
3577  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3578  for (unsigned int v = 0; v < 2; ++v)
3579  out[v][i] += in[i][v];
3580  }
3581  else
3582  {
3583  for (unsigned int i = 0; i < n_chunks; ++i)
3584  {
3585  __m128d u0 = in[2 * i + 0].data;
3586  __m128d u1 = in[2 * i + 1].data;
3587  __m128d res0 = _mm_unpacklo_pd(u0, u1);
3588  __m128d res1 = _mm_unpackhi_pd(u0, u1);
3589  _mm_storeu_pd(out[0] + 2 * i, res0);
3590  _mm_storeu_pd(out[1] + 2 * i, res1);
3591  }
3592 
3593  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3594  for (unsigned int v = 0; v < 2; ++v)
3595  out[v][i] = in[i][v];
3596  }
3597 }
3598 
3599 
3600 
3604 template <>
3605 class VectorizedArray<float, 4>
3606  : public VectorizedArrayBase<VectorizedArray<float, 4>, 4>
3607 {
3608 public:
3612  using value_type = float;
3613 
3622  VectorizedArray() = default;
3623 
3627  VectorizedArray(const float scalar)
3628  {
3629  this->operator=(scalar);
3630  }
3631 
3633  VectorizedArray &
3634  operator=(const float x)
3635  {
3636  data = _mm_set1_ps(x);
3637  return *this;
3638  }
3639 
3644  float &
3645  operator[](const unsigned int comp)
3646  {
3647  AssertIndexRange(comp, 4);
3648  return *(reinterpret_cast<float *>(&data) + comp);
3649  }
3650 
3655  const float &
3656  operator[](const unsigned int comp) const
3657  {
3658  AssertIndexRange(comp, 4);
3659  return *(reinterpret_cast<const float *>(&data) + comp);
3660  }
3661 
3666  VectorizedArray &
3667  operator+=(const VectorizedArray &vec)
3668  {
3669 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3670  data += vec.data;
3671 # else
3672  data = _mm_add_ps(data, vec.data);
3673 # endif
3674  return *this;
3675  }
3676 
3681  VectorizedArray &
3682  operator-=(const VectorizedArray &vec)
3683  {
3684 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3685  data -= vec.data;
3686 # else
3687  data = _mm_sub_ps(data, vec.data);
3688 # endif
3689  return *this;
3690  }
3691 
3696  VectorizedArray &
3697  operator*=(const VectorizedArray &vec)
3698  {
3699 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3700  data *= vec.data;
3701 # else
3702  data = _mm_mul_ps(data, vec.data);
3703 # endif
3704  return *this;
3705  }
3706 
3711  VectorizedArray &
3712  operator/=(const VectorizedArray &vec)
3713  {
3714 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3715  data /= vec.data;
3716 # else
3717  data = _mm_div_ps(data, vec.data);
3718 # endif
3719  return *this;
3720  }
3721 
3728  void
3729  load(const float *ptr)
3730  {
3731  data = _mm_loadu_ps(ptr);
3732  }
3733 
3741  void
3742  store(float *ptr) const
3743  {
3744  _mm_storeu_ps(ptr, data);
3745  }
3746 
3751  void
3752  streaming_store(float *ptr) const
3753  {
3754  Assert(reinterpret_cast<std::size_t>(ptr) % 16 == 0,
3755  ExcMessage("Memory not aligned"));
3756  _mm_stream_ps(ptr, data);
3757  }
3758 
3772  void
3773  gather(const float *base_ptr, const unsigned int *offsets)
3774  {
3775  for (unsigned int i = 0; i < 4; ++i)
3776  *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
3777  }
3778 
3792  void
3793  scatter(const unsigned int *offsets, float *base_ptr) const
3794  {
3795  for (unsigned int i = 0; i < 4; ++i)
3796  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
3797  }
3798 
3804  __m128 data;
3805 
3806 private:
3813  get_sqrt() const
3814  {
3815  VectorizedArray res;
3816  res.data = _mm_sqrt_ps(data);
3817  return res;
3818  }
3819 
3826  get_abs() const
3827  {
3828  // to compute the absolute value, perform bitwise andnot with -0. This
3829  // will leave all value and exponent bits unchanged but force the sign
3830  // value to +.
3831  __m128 mask = _mm_set1_ps(-0.f);
3832  VectorizedArray res;
3833  res.data = _mm_andnot_ps(mask, data);
3834  return res;
3835  }
3836 
3843  get_max(const VectorizedArray &other) const
3844  {
3845  VectorizedArray res;
3846  res.data = _mm_max_ps(data, other.data);
3847  return res;
3848  }
3849 
3856  get_min(const VectorizedArray &other) const
3857  {
3858  VectorizedArray res;
3859  res.data = _mm_min_ps(data, other.data);
3860  return res;
3861  }
3862 
3863  // Make a few functions friends.
3864  template <typename Number2, std::size_t width2>
3866  std::sqrt(const VectorizedArray<Number2, width2> &);
3867  template <typename Number2, std::size_t width2>
3869  std::abs(const VectorizedArray<Number2, width2> &);
3870  template <typename Number2, std::size_t width2>
3872  std::max(const VectorizedArray<Number2, width2> &,
3874  template <typename Number2, std::size_t width2>
3876  std::min(const VectorizedArray<Number2, width2> &,
3878 };
3879 
3880 
3881 
3885 template <>
3886 inline DEAL_II_ALWAYS_INLINE void
3887 vectorized_load_and_transpose(const unsigned int n_entries,
3888  const float * in,
3889  const unsigned int * offsets,
3891 {
3892  const unsigned int n_chunks = n_entries / 4;
3893  for (unsigned int i = 0; i < n_chunks; ++i)
3894  {
3895  __m128 u0 = _mm_loadu_ps(in + 4 * i + offsets[0]);
3896  __m128 u1 = _mm_loadu_ps(in + 4 * i + offsets[1]);
3897  __m128 u2 = _mm_loadu_ps(in + 4 * i + offsets[2]);
3898  __m128 u3 = _mm_loadu_ps(in + 4 * i + offsets[3]);
3899  __m128 v0 = _mm_shuffle_ps(u0, u1, 0x44);
3900  __m128 v1 = _mm_shuffle_ps(u0, u1, 0xee);
3901  __m128 v2 = _mm_shuffle_ps(u2, u3, 0x44);
3902  __m128 v3 = _mm_shuffle_ps(u2, u3, 0xee);
3903  out[4 * i + 0].data = _mm_shuffle_ps(v0, v2, 0x88);
3904  out[4 * i + 1].data = _mm_shuffle_ps(v0, v2, 0xdd);
3905  out[4 * i + 2].data = _mm_shuffle_ps(v1, v3, 0x88);
3906  out[4 * i + 3].data = _mm_shuffle_ps(v1, v3, 0xdd);
3907  }
3908 
3909  // remainder loop of work that does not divide by 4
3910  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3911  for (unsigned int v = 0; v < 4; ++v)
3912  out[i][v] = in[offsets[v] + i];
3913 }
3914 
3915 
3916 
3920 template <>
3921 inline DEAL_II_ALWAYS_INLINE void
3922 vectorized_load_and_transpose(const unsigned int n_entries,
3923  const std::array<float *, 4> &in,
3925 {
3926  // see the comments in the vectorized_load_and_transpose above
3927 
3928  const unsigned int n_chunks = n_entries / 4;
3929  for (unsigned int i = 0; i < n_chunks; ++i)
3930  {
3931  __m128 u0 = _mm_loadu_ps(in[0] + 4 * i);
3932  __m128 u1 = _mm_loadu_ps(in[1] + 4 * i);
3933  __m128 u2 = _mm_loadu_ps(in[2] + 4 * i);
3934  __m128 u3 = _mm_loadu_ps(in[3] + 4 * i);
3935  __m128 v0 = _mm_shuffle_ps(u0, u1, 0x44);
3936  __m128 v1 = _mm_shuffle_ps(u0, u1, 0xee);
3937  __m128 v2 = _mm_shuffle_ps(u2, u3, 0x44);
3938  __m128 v3 = _mm_shuffle_ps(u2, u3, 0xee);
3939  out[4 * i + 0].data = _mm_shuffle_ps(v0, v2, 0x88);
3940  out[4 * i + 1].data = _mm_shuffle_ps(v0, v2, 0xdd);
3941  out[4 * i + 2].data = _mm_shuffle_ps(v1, v3, 0x88);
3942  out[4 * i + 3].data = _mm_shuffle_ps(v1, v3, 0xdd);
3943  }
3944 
3945  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3946  for (unsigned int v = 0; v < 4; ++v)
3947  out[i][v] = in[v][i];
3948 }
3949 
3950 
3951 
3955 template <>
3956 inline DEAL_II_ALWAYS_INLINE void
3957 vectorized_transpose_and_store(const bool add_into,
3958  const unsigned int n_entries,
3959  const VectorizedArray<float, 4> *in,
3960  const unsigned int * offsets,
3961  float * out)
3962 {
3963  const unsigned int n_chunks = n_entries / 4;
3964  for (unsigned int i = 0; i < n_chunks; ++i)
3965  {
3966  __m128 u0 = in[4 * i + 0].data;
3967  __m128 u1 = in[4 * i + 1].data;
3968  __m128 u2 = in[4 * i + 2].data;
3969  __m128 u3 = in[4 * i + 3].data;
3970  __m128 t0 = _mm_shuffle_ps(u0, u1, 0x44);
3971  __m128 t1 = _mm_shuffle_ps(u0, u1, 0xee);
3972  __m128 t2 = _mm_shuffle_ps(u2, u3, 0x44);
3973  __m128 t3 = _mm_shuffle_ps(u2, u3, 0xee);
3974  u0 = _mm_shuffle_ps(t0, t2, 0x88);
3975  u1 = _mm_shuffle_ps(t0, t2, 0xdd);
3976  u2 = _mm_shuffle_ps(t1, t3, 0x88);
3977  u3 = _mm_shuffle_ps(t1, t3, 0xdd);
3978 
3979  // Cannot use the same store instructions in both paths of the 'if'
3980  // because the compiler cannot know that there is no aliasing between
3981  // pointers
3982  if (add_into)
3983  {
3984  u0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), u0);
3985  _mm_storeu_ps(out + 4 * i + offsets[0], u0);
3986  u1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), u1);
3987  _mm_storeu_ps(out + 4 * i + offsets[1], u1);
3988  u2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), u2);
3989  _mm_storeu_ps(out + 4 * i + offsets[2], u2);
3990  u3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), u3);
3991  _mm_storeu_ps(out + 4 * i + offsets[3], u3);
3992  }
3993  else
3994  {
3995  _mm_storeu_ps(out + 4 * i + offsets[0], u0);
3996  _mm_storeu_ps(out + 4 * i + offsets[1], u1);
3997  _mm_storeu_ps(out + 4 * i + offsets[2], u2);
3998  _mm_storeu_ps(out + 4 * i + offsets[3], u3);
3999  }
4000  }
4001 
4002  // remainder loop of work that does not divide by 4
4003  if (add_into)
4004  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4005  for (unsigned int v = 0; v < 4; ++v)
4006  out[offsets[v] + i] += in[i][v];
4007  else
4008  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4009  for (unsigned int v = 0; v < 4; ++v)
4010  out[offsets[v] + i] = in[i][v];
4011 }
4012 
4013 
4014 
4018 template <>
4019 inline DEAL_II_ALWAYS_INLINE void
4020 vectorized_transpose_and_store(const bool add_into,
4021  const unsigned int n_entries,
4022  const VectorizedArray<float, 4> *in,
4023  std::array<float *, 4> & out)
4024 {
4025  // see the comments in the vectorized_transpose_and_store above
4026 
4027  const unsigned int n_chunks = n_entries / 4;
4028  for (unsigned int i = 0; i < n_chunks; ++i)
4029  {
4030  __m128 u0 = in[4 * i + 0].data;
4031  __m128 u1 = in[4 * i + 1].data;
4032  __m128 u2 = in[4 * i + 2].data;
4033  __m128 u3 = in[4 * i + 3].data;
4034  __m128 t0 = _mm_shuffle_ps(u0, u1, 0x44);
4035  __m128 t1 = _mm_shuffle_ps(u0, u1, 0xee);
4036  __m128 t2 = _mm_shuffle_ps(u2, u3, 0x44);
4037  __m128 t3 = _mm_shuffle_ps(u2, u3, 0xee);
4038  u0 = _mm_shuffle_ps(t0, t2, 0x88);
4039  u1 = _mm_shuffle_ps(t0, t2, 0xdd);
4040  u2 = _mm_shuffle_ps(t1, t3, 0x88);
4041  u3 = _mm_shuffle_ps(t1, t3, 0xdd);
4042 
4043  if (add_into)
4044  {
4045  u0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), u0);
4046  _mm_storeu_ps(out[0] + 4 * i, u0);
4047  u1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), u1);
4048  _mm_storeu_ps(out[1] + 4 * i, u1);
4049  u2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), u2);
4050  _mm_storeu_ps(out[2] + 4 * i, u2);
4051  u3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), u3);
4052  _mm_storeu_ps(out[3] + 4 * i, u3);
4053  }
4054  else
4055  {
4056  _mm_storeu_ps(out[0] + 4 * i, u0);
4057  _mm_storeu_ps(out[1] + 4 * i, u1);
4058  _mm_storeu_ps(out[2] + 4 * i, u2);
4059  _mm_storeu_ps(out[3] + 4 * i, u3);
4060  }
4061  }
4062 
4063  if (add_into)
4064  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4065  for (unsigned int v = 0; v < 4; ++v)
4066  out[v][i] += in[i][v];
4067  else
4068  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4069  for (unsigned int v = 0; v < 4; ++v)
4070  out[v][i] = in[i][v];
4071 }
4072 
4073 
4074 
4075 # endif // if DEAL_II_VECTORIZATION_WIDTH_IN_BITS > 0 && defined(__SSE2__)
4076 
4077 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__ALTIVEC__) && \
4078  defined(__VSX__)
4079 
4080 template <>
4081 class VectorizedArray<double, 2>
4082  : public VectorizedArrayBase<VectorizedArray<double, 2>, 2>
4083 {
4084 public:
4088  using value_type = double;
4089 
4094  VectorizedArray() = default;
4095 
4099  VectorizedArray(const double scalar)
4100  {
4101  this->operator=(scalar);
4102  }
4103 
4108  VectorizedArray &
4109  operator=(const double x)
4110  {
4111  data = vec_splats(x);
4112 
4113  // Some compilers believe that vec_splats sets 'x', but that's not true.
4114  // They then warn about setting a variable and not using it. Suppress the
4115  // warning by "using" the variable:
4116  (void)x;
4117  return *this;
4118  }
4119 
4124  double &
4125  operator[](const unsigned int comp)
4126  {
4127  AssertIndexRange(comp, 2);
4128  return *(reinterpret_cast<double *>(&data) + comp);
4129  }
4130 
4135  const double &
4136  operator[](const unsigned int comp) const
4137  {
4138  AssertIndexRange(comp, 2);
4139  return *(reinterpret_cast<const double *>(&data) + comp);
4140  }
4141 
4146  VectorizedArray &
4147  operator+=(const VectorizedArray &vec)
4148  {
4149  data = vec_add(data, vec.data);
4150  return *this;
4151  }
4152 
4157  VectorizedArray &
4158  operator-=(const VectorizedArray &vec)
4159  {
4160  data = vec_sub(data, vec.data);
4161  return *this;
4162  }
4163 
4168  VectorizedArray &
4169  operator*=(const VectorizedArray &vec)
4170  {
4171  data = vec_mul(data, vec.data);
4172  return *this;
4173  }
4174 
4179  VectorizedArray &
4180  operator/=(const VectorizedArray &vec)
4181  {
4182  data = vec_div(data, vec.data);
4183  return *this;
4184  }
4185 
4191  void
4192  load(const double *ptr)
4193  {
4194  data = vec_vsx_ld(0, ptr);
4195  }
4196 
4202  void
4203  store(double *ptr) const
4204  {
4205  vec_vsx_st(data, 0, ptr);
4206  }
4207 
4211  void
4212  streaming_store(double *ptr) const
4213  {
4214  store(ptr);
4215  }
4216 
4220  void
4221  gather(const double *base_ptr, const unsigned int *offsets)
4222  {
4223  for (unsigned int i = 0; i < 2; ++i)
4224  *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
4225  }
4226 
4230  void
4231  scatter(const unsigned int *offsets, double *base_ptr) const
4232  {
4233  for (unsigned int i = 0; i < 2; ++i)
4234  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
4235  }
4236 
4242  __vector double data;
4243 
4244 private:
4251  get_sqrt() const
4252  {
4253  VectorizedArray res;
4254  res.data = vec_sqrt(data);
4255  return res;
4256  }
4257 
4264  get_abs() const
4265  {
4266  VectorizedArray res;
4267  res.data = vec_abs(data);
4268  return res;
4269  }
4270 
4277  get_max(const VectorizedArray &other) const
4278  {
4279  VectorizedArray res;
4280  res.data = vec_max(data, other.data);
4281  return res;
4282  }
4283 
4290  get_min(const VectorizedArray &other) const
4291  {
4292  VectorizedArray res;
4293  res.data = vec_min(data, other.data);
4294  return res;
4295  }
4296 
4297  // Make a few functions friends.
4298  template <typename Number2, std::size_t width2>
4300  std::sqrt(const VectorizedArray<Number2, width2> &);
4301  template <typename Number2, std::size_t width2>
4303  std::abs(const VectorizedArray<Number2, width2> &);
4304  template <typename Number2, std::size_t width2>
4306  std::max(const VectorizedArray<Number2, width2> &,
4308  template <typename Number2, std::size_t width2>
4310  std::min(const VectorizedArray<Number2, width2> &,
4312 };
4313 
4314 
4315 
4316 template <>
4317 class VectorizedArray<float, 4>
4318  : public VectorizedArrayBase<VectorizedArray<float, 4>, 4>
4319 {
4320 public:
4324  using value_type = float;
4325 
4330  VectorizedArray() = default;
4331 
4335  VectorizedArray(const float scalar)
4336  {
4337  this->operator=(scalar);
4338  }
4339 
4344  VectorizedArray &
4345  operator=(const float x)
4346  {
4347  data = vec_splats(x);
4348 
4349  // Some compilers believe that vec_splats sets 'x', but that's not true.
4350  // They then warn about setting a variable and not using it. Suppress the
4351  // warning by "using" the variable:
4352  (void)x;
4353  return *this;
4354  }
4355 
4360  float &
4361  operator[](const unsigned int comp)
4362  {
4363  AssertIndexRange(comp, 4);
4364  return *(reinterpret_cast<float *>(&data) + comp);
4365  }
4366 
4371  const float &
4372  operator[](const unsigned int comp) const
4373  {
4374  AssertIndexRange(comp, 4);
4375  return *(reinterpret_cast<const float *>(&data) + comp);
4376  }
4377 
4382  VectorizedArray &
4383  operator+=(const VectorizedArray &vec)
4384  {
4385  data = vec_add(data, vec.data);
4386  return *this;
4387  }
4388 
4393  VectorizedArray &
4394  operator-=(const VectorizedArray &vec)
4395  {
4396  data = vec_sub(data, vec.data);
4397  return *this;
4398  }
4399 
4404  VectorizedArray &
4405  operator*=(const VectorizedArray &vec)
4406  {
4407  data = vec_mul(data, vec.data);
4408  return *this;
4409  }
4410 
4415  VectorizedArray &
4416  operator/=(const VectorizedArray &vec)
4417  {
4418  data = vec_div(data, vec.data);
4419  return *this;
4420  }
4421 
4427  void
4428  load(const float *ptr)
4429  {
4430  data = vec_vsx_ld(0, ptr);
4431  }
4432 
4438  void
4439  store(float *ptr) const
4440  {
4441  vec_vsx_st(data, 0, ptr);
4442  }
4443 
4447  void
4448  streaming_store(float *ptr) const
4449  {
4450  store(ptr);
4451  }
4452 
4456  void
4457  gather(const float *base_ptr, const unsigned int *offsets)
4458  {
4459  for (unsigned int i = 0; i < 4; ++i)
4460  *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
4461  }
4462 
4466  void
4467  scatter(const unsigned int *offsets, float *base_ptr) const
4468  {
4469  for (unsigned int i = 0; i < 4; ++i)
4470  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
4471  }
4472 
4478  __vector float data;
4479 
4480 private:
4487  get_sqrt() const
4488  {
4489  VectorizedArray res;
4490  res.data = vec_sqrt(data);
4491  return res;
4492  }
4493 
4500  get_abs() const
4501  {
4502  VectorizedArray res;
4503  res.data = vec_abs(data);
4504  return res;
4505  }
4506 
4513  get_max(const VectorizedArray &other) const
4514  {
4515  VectorizedArray res;
4516  res.data = vec_max(data, other.data);
4517  return res;
4518  }
4519 
4526  get_min(const VectorizedArray &other) const
4527  {
4528  VectorizedArray res;
4529  res.data = vec_min(data, other.data);
4530  return res;
4531  }
4532 
4533  // Make a few functions friends.
4534  template <typename Number2, std::size_t width2>
4536  std::sqrt(const VectorizedArray<Number2, width2> &);
4537  template <typename Number2, std::size_t width2>
4539  std::abs(const VectorizedArray<Number2, width2> &);
4540  template <typename Number2, std::size_t width2>
4542  std::max(const VectorizedArray<Number2, width2> &,
4544  template <typename Number2, std::size_t width2>
4546  std::min(const VectorizedArray<Number2, width2> &,
4548 };
4549 
4550 # endif // if DEAL_II_VECTORIZATION_LEVEL >=1 && defined(__ALTIVEC__) &&
4551  // defined(__VSX__)
4552 
4553 
4554 #endif // DOXYGEN
4555 
4560 
4566 template <typename Number, std::size_t width>
4567 inline DEAL_II_ALWAYS_INLINE bool
4569  const VectorizedArray<Number, width> &rhs)
4570 {
4571  for (unsigned int i = 0; i < VectorizedArray<Number, width>::size(); ++i)
4572  if (lhs[i] != rhs[i])
4573  return false;
4574 
4575  return true;
4576 }
4577 
4578 
4584 template <typename Number, std::size_t width>
4588 {
4590  return tmp += v;
4591 }
4592 
4598 template <typename Number, std::size_t width>
4602 {
4604  return tmp -= v;
4605 }
4606 
4612 template <typename Number, std::size_t width>
4616 {
4618  return tmp *= v;
4619 }
4620 
4626 template <typename Number, std::size_t width>
4630 {
4632  return tmp /= v;
4633 }
4634 
4641 template <typename Number, std::size_t width>
4643 operator+(const Number &u, const VectorizedArray<Number, width> &v)
4644 {
4646  return tmp += v;
4647 }
4648 
4657 template <std::size_t width>
4659 operator+(const double u, const VectorizedArray<float, width> &v)
4660 {
4662  return tmp += v;
4663 }
4664 
4671 template <typename Number, std::size_t width>
4673 operator+(const VectorizedArray<Number, width> &v, const Number &u)
4674 {
4675  return u + v;
4676 }
4677 
4686 template <std::size_t width>
4688 operator+(const VectorizedArray<float, width> &v, const double u)
4689 {
4690  return u + v;
4691 }
4692 
4699 template <typename Number, std::size_t width>
4701 operator-(const Number &u, const VectorizedArray<Number, width> &v)
4702 {
4704  return tmp -= v;
4705 }
4706 
4715 template <std::size_t width>
4717 operator-(const double u, const VectorizedArray<float, width> &v)
4718 {
4719  VectorizedArray<float, width> tmp = static_cast<float>(u);
4720  return tmp -= v;
4721 }
4722 
4729 template <typename Number, std::size_t width>
4731 operator-(const VectorizedArray<Number, width> &v, const Number &u)
4732 {
4734  return v - tmp;
4735 }
4736 
4745 template <std::size_t width>
4747 operator-(const VectorizedArray<float, width> &v, const double u)
4748 {
4749  VectorizedArray<float, width> tmp = static_cast<float>(u);
4750  return v - tmp;
4751 }
4752 
4759 template <typename Number, std::size_t width>
4761 operator*(const Number &u, const VectorizedArray<Number, width> &v)
4762 {
4764  return tmp *= v;
4765 }
4766 
4775 template <std::size_t width>
4777 operator*(const double u, const VectorizedArray<float, width> &v)
4778 {
4779  VectorizedArray<float, width> tmp = static_cast<float>(u);
4780  return tmp *= v;
4781 }
4782 
4789 template <typename Number, std::size_t width>
4791 operator*(const VectorizedArray<Number, width> &v, const Number &u)
4792 {
4793  return u * v;
4794 }
4795 
4804 template <std::size_t width>
4806 operator*(const VectorizedArray<float, width> &v, const double u)
4807 {
4808  return u * v;
4809 }
4810 
4817 template <typename Number, std::size_t width>
4819 operator/(const Number &u, const VectorizedArray<Number, width> &v)
4820 {
4822  return tmp /= v;
4823 }
4824 
4833 template <std::size_t width>
4835 operator/(const double u, const VectorizedArray<float, width> &v)
4836 {
4837  VectorizedArray<float, width> tmp = static_cast<float>(u);
4838  return tmp /= v;
4839 }
4840 
4847 template <typename Number, std::size_t width>
4849 operator/(const VectorizedArray<Number, width> &v, const Number &u)
4850 {
4852  return v / tmp;
4853 }
4854 
4863 template <std::size_t width>
4865 operator/(const VectorizedArray<float, width> &v, const double u)
4866 {
4867  VectorizedArray<float, width> tmp = static_cast<float>(u);
4868  return v / tmp;
4869 }
4870 
4876 template <typename Number, std::size_t width>
4879 {
4880  return u;
4881 }
4882 
4888 template <typename Number, std::size_t width>
4891 {
4892  // to get a negative sign, subtract the input from zero (could also
4893  // multiply by -1, but this one is slightly simpler)
4894  return VectorizedArray<Number, width>() - u;
4895 }
4896 
4902 template <typename Number, std::size_t width>
4903 inline std::ostream &
4904 operator<<(std::ostream &out, const VectorizedArray<Number, width> &p)
4905 {
4906  constexpr unsigned int n = VectorizedArray<Number, width>::size();
4907  for (unsigned int i = 0; i < n - 1; ++i)
4908  out << p[i] << ' ';
4909  out << p[n - 1];
4910 
4911  return out;
4912 }
4913 
4915 
4920 
4921 
4929 enum class SIMDComparison : int
4930 {
4931 #if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
4932  equal = _CMP_EQ_OQ,
4933  not_equal = _CMP_NEQ_OQ,
4934  less_than = _CMP_LT_OQ,
4935  less_than_or_equal = _CMP_LE_OQ,
4936  greater_than = _CMP_GT_OQ,
4937  greater_than_or_equal = _CMP_GE_OQ
4938 #else
4939  equal,
4940  not_equal,
4941  less_than,
4943  greater_than,
4944  greater_than_or_equal
4945 #endif
4946 };
4947 
4948 
5012 template <SIMDComparison predicate, typename Number>
5013 DEAL_II_ALWAYS_INLINE inline Number
5014 compare_and_apply_mask(const Number &left,
5015  const Number &right,
5016  const Number &true_value,
5017  const Number &false_value)
5018 {
5019  bool mask;
5020  switch (predicate)
5021  {
5022  case SIMDComparison::equal:
5023  mask = (left == right);
5024  break;
5026  mask = (left != right);
5027  break;
5029  mask = (left < right);
5030  break;
5032  mask = (left <= right);
5033  break;
5035  mask = (left > right);
5036  break;
5038  mask = (left >= right);
5039  break;
5040  }
5041 
5042  return mask ? true_value : false_value;
5043 }
5044 
5045 
5050 template <SIMDComparison predicate, typename Number>
5053  const VectorizedArray<Number, 1> &right,
5054  const VectorizedArray<Number, 1> &true_value,
5055  const VectorizedArray<Number, 1> &false_value)
5056 {
5058  result.data = compare_and_apply_mask<predicate, Number>(left.data,
5059  right.data,
5060  true_value.data,
5061  false_value.data);
5062  return result;
5063 }
5064 
5066 
5067 #ifndef DOXYGEN
5068 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && defined(__AVX512F__)
5069 
5070 template <SIMDComparison predicate>
5073  const VectorizedArray<float, 16> &right,
5074  const VectorizedArray<float, 16> &true_values,
5075  const VectorizedArray<float, 16> &false_values)
5076 {
5077  const __mmask16 mask =
5078  _mm512_cmp_ps_mask(left.data, right.data, static_cast<int>(predicate));
5080  result.data = _mm512_mask_mov_ps(false_values.data, mask, true_values.data);
5081  return result;
5082 }
5083 
5084 
5085 
5086 template <SIMDComparison predicate>
5089  const VectorizedArray<double, 8> &right,
5090  const VectorizedArray<double, 8> &true_values,
5091  const VectorizedArray<double, 8> &false_values)
5092 {
5093  const __mmask16 mask =
5094  _mm512_cmp_pd_mask(left.data, right.data, static_cast<int>(predicate));
5096  result.data = _mm512_mask_mov_pd(false_values.data, mask, true_values.data);
5097  return result;
5098 }
5099 
5100 # endif
5101 
5102 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
5103 
5104 template <SIMDComparison predicate>
5107  const VectorizedArray<float, 8> &right,
5108  const VectorizedArray<float, 8> &true_values,
5109  const VectorizedArray<float, 8> &false_values)
5110 {
5111  const auto mask =
5112  _mm256_cmp_ps(left.data, right.data, static_cast<int>(predicate));
5113 
5115  result.data = _mm256_or_ps(_mm256_and_ps(mask, true_values.data),
5116  _mm256_andnot_ps(mask, false_values.data));
5117  return result;
5118 }
5119 
5120 
5121 template <SIMDComparison predicate>
5124  const VectorizedArray<double, 4> &right,
5125  const VectorizedArray<double, 4> &true_values,
5126  const VectorizedArray<double, 4> &false_values)
5127 {
5128  const auto mask =
5129  _mm256_cmp_pd(left.data, right.data, static_cast<int>(predicate));
5130 
5132  result.data = _mm256_or_pd(_mm256_and_pd(mask, true_values.data),
5133  _mm256_andnot_pd(mask, false_values.data));
5134  return result;
5135 }
5136 
5137 # endif
5138 
5139 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__SSE2__)
5140 
5141 template <SIMDComparison predicate>
5144  const VectorizedArray<float, 4> &right,
5145  const VectorizedArray<float, 4> &true_values,
5146  const VectorizedArray<float, 4> &false_values)
5147 {
5148  __m128 mask;
5149  switch (predicate)
5150  {
5151  case SIMDComparison::equal:
5152  mask = _mm_cmpeq_ps(left.data, right.data);
5153  break;
5155  mask = _mm_cmpneq_ps(left.data, right.data);
5156  break;
5158  mask = _mm_cmplt_ps(left.data, right.data);
5159  break;
5161  mask = _mm_cmple_ps(left.data, right.data);
5162  break;
5164  mask = _mm_cmpgt_ps(left.data, right.data);
5165  break;
5167  mask = _mm_cmpge_ps(left.data, right.data);
5168  break;
5169  }
5170 
5172  result.data = _mm_or_ps(_mm_and_ps(mask, true_values.data),
5173  _mm_andnot_ps(mask, false_values.data));
5174 
5175  return result;
5176 }
5177 
5178 
5179 template <SIMDComparison predicate>
5182  const VectorizedArray<double, 2> &right,
5183  const VectorizedArray<double, 2> &true_values,
5184  const VectorizedArray<double, 2> &false_values)
5185 {
5186  __m128d mask;
5187  switch (predicate)
5188  {
5189  case SIMDComparison::equal:
5190  mask = _mm_cmpeq_pd(left.data, right.data);
5191  break;
5193  mask = _mm_cmpneq_pd(left.data, right.data);
5194  break;
5196  mask = _mm_cmplt_pd(left.data, right.data);
5197  break;
5199  mask = _mm_cmple_pd(left.data, right.data);
5200  break;
5202  mask = _mm_cmpgt_pd(left.data, right.data);
5203  break;
5205  mask = _mm_cmpge_pd(left.data, right.data);
5206  break;
5207  }
5208 
5210  result.data = _mm_or_pd(_mm_and_pd(mask, true_values.data),
5211  _mm_andnot_pd(mask, false_values.data));
5212 
5213  return result;
5214 }
5215 
5216 # endif
5217 #endif // DOXYGEN
5218 
5219 
5221 
5228 namespace std
5229 {
5237  template <typename Number, std::size_t width>
5238  inline ::VectorizedArray<Number, width>
5239  sin(const ::VectorizedArray<Number, width> &x)
5240  {
5241  // put values in an array and later read in that array with an unaligned
5242  // read. This should save some instructions as compared to directly
5243  // setting the individual elements and also circumvents a compiler
5244  // optimization bug in gcc-4.6 with SSE2 (see also deal.II developers list
5245  // from April 2014, topic "matrix_free/step-48 Test").
5247  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5248  ++i)
5249  values[i] = std::sin(x[i]);
5251  out.load(&values[0]);
5252  return out;
5253  }
5254 
5255 
5256 
5264  template <typename Number, std::size_t width>
5265  inline ::VectorizedArray<Number, width>
5266  cos(const ::VectorizedArray<Number, width> &x)
5267  {
5269  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5270  ++i)
5271  values[i] = std::cos(x[i]);
5273  out.load(&values[0]);
5274  return out;
5275  }
5276 
5277 
5278 
5286  template <typename Number, std::size_t width>
5287  inline ::VectorizedArray<Number, width>
5288  tan(const ::VectorizedArray<Number, width> &x)
5289  {
5291  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5292  ++i)
5293  values[i] = std::tan(x[i]);
5295  out.load(&values[0]);
5296  return out;
5297  }
5298 
5299 
5300 
5308  template <typename Number, std::size_t width>
5309  inline ::VectorizedArray<Number, width>
5310  exp(const ::VectorizedArray<Number, width> &x)
5311  {
5313  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5314  ++i)
5315  values[i] = std::exp(x[i]);
5317  out.load(&values[0]);
5318  return out;
5319  }
5320 
5321 
5322 
5330  template <typename Number, std::size_t width>
5331  inline ::VectorizedArray<Number, width>
5332  log(const ::VectorizedArray<Number, width> &x)
5333  {
5335  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5336  ++i)
5337  values[i] = std::log(x[i]);
5339  out.load(&values[0]);
5340  return out;
5341  }
5342 
5343 
5344 
5352  template <typename Number, std::size_t width>
5353  inline ::VectorizedArray<Number, width>
5354  sqrt(const ::VectorizedArray<Number, width> &x)
5355  {
5356  return x.get_sqrt();
5357  }
5358 
5359 
5360 
5368  template <typename Number, std::size_t width>
5369  inline ::VectorizedArray<Number, width>
5370  pow(const ::VectorizedArray<Number, width> &x, const Number p)
5371  {
5373  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5374  ++i)
5375  values[i] = std::pow(x[i], p);
5377  out.load(&values[0]);
5378  return out;
5379  }
5380 
5381 
5382 
5391  template <typename Number, std::size_t width>
5392  inline ::VectorizedArray<Number, width>
5393  pow(const ::VectorizedArray<Number, width> &x,
5394  const ::VectorizedArray<Number, width> &p)
5395  {
5397  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5398  ++i)
5399  values[i] = std::pow(x[i], p[i]);
5401  out.load(&values[0]);
5402  return out;
5403  }
5404 
5405 
5406 
5414  template <typename Number, std::size_t width>
5415  inline ::VectorizedArray<Number, width>
5416  abs(const ::VectorizedArray<Number, width> &x)
5417  {
5418  return x.get_abs();
5419  }
5420 
5421 
5422 
5430  template <typename Number, std::size_t width>
5431  inline ::VectorizedArray<Number, width>
5432  max(const ::VectorizedArray<Number, width> &x,
5433  const ::VectorizedArray<Number, width> &y)
5434  {
5435  return x.get_max(y);
5436  }
5437 
5438 
5439 
5447  template <typename Number, std::size_t width>
5448  inline ::VectorizedArray<Number, width>
5449  min(const ::VectorizedArray<Number, width> &x,
5450  const ::VectorizedArray<Number, width> &y)
5451  {
5452  return x.get_min(y);
5453  }
5454 
5455 
5456 
5460  template <class T>
5461  struct iterator_traits<::VectorizedArrayIterator<T>>
5462  {
5463  using iterator_category = random_access_iterator_tag;
5464  using value_type = typename T::value_type;
5465  using difference_type = std::ptrdiff_t;
5466  };
5467 
5468 } // namespace std
5469 
5470 #endif
inline ::VectorizedArray< Number, width > cos(const ::VectorizedArray< Number, width > &x)
Number compare_and_apply_mask(const Number &left, const Number &right, const Number &true_value, const Number &false_value)
::VectorizedArray< Number, width > log(const ::VectorizedArray< Number, width > &)
inline ::VectorizedArray< Number, width > exp(const ::VectorizedArray< Number, width > &x)
Point< dim, typename ProductType< Number, typename EnableIfScalar< OtherNumber >::type >::type > operator*(const OtherNumber factor, const Point< dim, Number > &p)
Definition: point.h:657
::VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &, const Number p)
VectorizedArray< Number, width > operator/(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
void gather(VectorizedArray< Number, width > &out, const std::array< Number *, width > &ptrs, const unsigned int offset)
::VectorizedArray< Number, width > cos(const ::VectorizedArray< Number, width > &)
VectorizedArray & operator+=(const VectorizedArray &vec)
VectorizedArray get_sqrt() const
::VectorizedArray< Number, width > tan(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &)
#define AssertIndexRange(index, range)
Definition: exceptions.h:1718
VectorizedArrayIterator< T > begin()
void streaming_store(Number *ptr) const
const unsigned int v0
Definition: grid_tools.cc:961
STL namespace.
VectorizedArray & operator*=(const VectorizedArray &vec)
const unsigned int v1
Definition: grid_tools.cc:961
std::enable_if<!std::is_same< U, const U >::value, typename T::value_type >::type & operator*()
__global__ void vec_add(Number *val, const Number a, const size_type N)
VectorizedArray< Number, width > operator-(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
void store(Number *ptr) const
VectorizedArray & operator-=(const VectorizedArray &vec)
VectorizedArrayIterator< T > operator+(const std::size_t &offset) const
VectorizedArrayIterator< T > & operator+=(const std::size_t offset)
void vectorized_load_and_transpose(const unsigned int n_entries, const Number *in, const unsigned int *offsets, VectorizedArray< Number, width > *out)
static ::ExceptionBase & ExcMessage(std::string arg1)
static const char T
void gather(const Number *base_ptr, const unsigned int *offsets)
VectorizedArrayIterator(T &data, const std::size_t lane)
inline ::VectorizedArray< Number, width > tan(const ::VectorizedArray< Number, width > &x)
#define Assert(cond, exc)
Definition: exceptions.h:1461
bool operator==(const VectorizedArrayIterator< T > &other) const
std::ptrdiff_t operator-(const VectorizedArrayIterator< T > &other) const
inline ::VectorizedArray< Number, width > log(const ::VectorizedArray< Number, width > &x)
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > exp(const ::VectorizedArray< Number, width > &)
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:401
void load(const Number *ptr)
#define DEAL_II_ALWAYS_INLINE
Definition: config.h:98
bool operator!=(const VectorizedArrayIterator< T > &other) const
VectorizedArray get_min(const VectorizedArray &other) const
static constexpr std::size_t size()
Expression fabs(const Expression &x)
VectorizedArray & operator=(const Number scalar)
VectorizedArray get_max(const VectorizedArray &other) const
SIMDComparison
VectorizedArrayIterator< T > & operator--()
VectorizedArrayIterator< T > end()
inline ::VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &x, const ::VectorizedArray< Number, width > &p)
VectorizedArray & operator/=(const VectorizedArray &vec)
VectorizedArray< Number, width > operator+(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)
const T::value_type & operator*() const
Number & operator[](const unsigned int comp)
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:400
VectorizedArrayIterator< const T > begin() const
VectorizedArray(const Number scalar)
VectorizedArrayIterator< const T > end() const
bool operator==(const VectorizedArray< Number, width > &lhs, const VectorizedArray< Number, width > &rhs)
VectorizedArray< Number, width > make_vectorized_array(const Number &u)
VectorizedArrayIterator< T > & operator++()
void vectorized_transpose_and_store(const bool add_into, const unsigned int n_entries, const VectorizedArray< Number, width > *in, const unsigned int *offsets, Number *out)
const Number & operator[](const unsigned int comp) const
VectorizedArray get_abs() const
void scatter(const unsigned int *offsets, Number *base_ptr) const
inline ::VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &x)