Reference documentation for deal.II version 9.1.1
\(\newcommand{\dealcoloneq}{\mathrel{\vcenter{:}}=}\)
vectorization.h
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2011 - 2019 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #ifndef dealii_vectorization_h
18 #define dealii_vectorization_h
19 
20 #include <deal.II/base/config.h>
21 
22 #include <deal.II/base/exceptions.h>
23 #include <deal.II/base/template_constraints.h>
24 
25 #include <cmath>
26 
27 // Note:
28 // The flag DEAL_II_COMPILER_VECTORIZATION_LEVEL is essentially constructed
29 // according to the following scheme (on x86-based architectures)
30 // #ifdef __AVX512F__
31 // #define DEAL_II_COMPILER_VECTORIZATION_LEVEL 3
32 // #elif defined (__AVX__)
33 // #define DEAL_II_COMPILER_VECTORIZATION_LEVEL 2
34 // #elif defined (__SSE2__)
35 // #define DEAL_II_COMPILER_VECTORIZATION_LEVEL 1
36 // #else
37 // #define DEAL_II_COMPILER_VECTORIZATION_LEVEL 0
38 // #endif
39 // In addition to checking the flags __AVX__ and __SSE2__, a CMake test,
40 // 'check_01_cpu_features.cmake', ensures that these feature are not only
41 // present in the compilation unit but also working properly.
42 
43 #if DEAL_II_COMPILER_VECTORIZATION_LEVEL > 0
44 
45 # if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 2 && defined(__SSE2__) && \
46  !defined(__AVX__)
47 # error \
48  "Mismatch in vectorization capabilities: AVX was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native."
49 # endif
50 # if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 3 && defined(__SSE2__) && \
51  !defined(__AVX512F__)
52 # error \
53  "Mismatch in vectorization capabilities: AVX-512F was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native."
54 # endif
55 
56 # if defined(_MSC_VER)
57 # include <intrin.h>
58 # elif defined(__ALTIVEC__)
59 # include <altivec.h>
60 
61 // altivec.h defines vector, pixel, bool, but we do not use them, so undefine
62 // them before they make trouble
63 # undef vector
64 # undef pixel
65 # undef bool
66 # else
67 # include <x86intrin.h>
68 # endif
69 
70 #endif
71 
72 
73 DEAL_II_NAMESPACE_OPEN
74 
75 
76 namespace internal
77 {
91  template <typename T>
93  {
94  static const VectorizedArray<T> &
95  value(const VectorizedArray<T> &t)
96  {
97  return t;
98  }
99 
100  static VectorizedArray<T>
101  value(const T &t)
102  {
103  VectorizedArray<T> tmp;
104  tmp = t;
105  return tmp;
106  }
107  };
108 } // namespace internal
109 
110 
111 // Enable the EnableIfScalar type trait for VectorizedArray<Number> such
112 // that it can be used as a Number type in Tensor<rank,dim,Number>, etc.
113 
114 template <typename Number>
115 struct EnableIfScalar<VectorizedArray<Number>>
116 {
118 };
119 
120 
121 
172 template <typename Number>
173 class VectorizedArray
174 {
175 public:
181  static const unsigned int n_array_elements = 1;
182 
183  // POD means that there should be no user-defined constructors, destructors
184  // and copy functions (the standard is somewhat relaxed in C++2011, though).
185 
189  DEAL_II_ALWAYS_INLINE
191  operator=(const Number scalar)
192  {
193  data = scalar;
194  return *this;
195  }
196 
200  DEAL_II_ALWAYS_INLINE
201  Number &operator[](const unsigned int comp)
202  {
203  (void)comp;
204  AssertIndexRange(comp, 1);
205  return data;
206  }
207 
211  DEAL_II_ALWAYS_INLINE
212  const Number &operator[](const unsigned int comp) const
213  {
214  (void)comp;
215  AssertIndexRange(comp, 1);
216  return data;
217  }
218 
222  DEAL_II_ALWAYS_INLINE
225  {
226  data += vec.data;
227  return *this;
228  }
229 
233  DEAL_II_ALWAYS_INLINE
236  {
237  data -= vec.data;
238  return *this;
239  }
240 
244  DEAL_II_ALWAYS_INLINE
247  {
248  data *= vec.data;
249  return *this;
250  }
251 
255  DEAL_II_ALWAYS_INLINE
258  {
259  data /= vec.data;
260  return *this;
261  }
262 
269  DEAL_II_ALWAYS_INLINE
270  void
271  load(const Number *ptr)
272  {
273  data = *ptr;
274  }
275 
282  DEAL_II_ALWAYS_INLINE
283  void
284  store(Number *ptr) const
285  {
286  *ptr = data;
287  }
288 
333  DEAL_II_ALWAYS_INLINE
334  void
335  streaming_store(Number *ptr) const
336  {
337  *ptr = data;
338  }
339 
352  DEAL_II_ALWAYS_INLINE
353  void
354  gather(const Number *base_ptr, const unsigned int *offsets)
355  {
356  data = base_ptr[offsets[0]];
357  }
358 
371  DEAL_II_ALWAYS_INLINE
372  void
373  scatter(const unsigned int *offsets, Number *base_ptr) const
374  {
375  base_ptr[offsets[0]] = data;
376  }
377 
382  Number data;
383 
384 private:
389  DEAL_II_ALWAYS_INLINE
391  get_sqrt() const
392  {
393  VectorizedArray res;
394  res.data = std::sqrt(data);
395  return res;
396  }
397 
402  DEAL_II_ALWAYS_INLINE
404  get_abs() const
405  {
406  VectorizedArray res;
407  res.data = std::fabs(data);
408  return res;
409  }
410 
415  DEAL_II_ALWAYS_INLINE
417  get_max(const VectorizedArray &other) const
418  {
419  VectorizedArray res;
420  res.data = std::max(data, other.data);
421  return res;
422  }
423 
428  DEAL_II_ALWAYS_INLINE
430  get_min(const VectorizedArray &other) const
431  {
432  VectorizedArray res;
433  res.data = std::min(data, other.data);
434  return res;
435  }
436 
440  template <typename Number2>
442  std::sqrt(const VectorizedArray<Number2> &);
443  template <typename Number2>
445  std::abs(const VectorizedArray<Number2> &);
446  template <typename Number2>
448  std::max(const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
449  template <typename Number2>
451  std::min(const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
452 };
453 
454 // We need to have a separate declaration for static const members
455 template <typename Number>
457 
458 
459 
466 template <typename Number>
467 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number>
468  make_vectorized_array(const Number &u)
469 {
471  result = u;
472  return result;
473 }
474 
475 
476 
502 template <typename Number>
503 inline void
504 vectorized_load_and_transpose(const unsigned int n_entries,
505  const Number * in,
506  const unsigned int * offsets,
508 {
509  for (unsigned int i = 0; i < n_entries; ++i)
510  for (unsigned int v = 0; v < VectorizedArray<Number>::n_array_elements; ++v)
511  out[i][v] = in[offsets[v] + i];
512 }
513 
514 
515 
554 template <typename Number>
555 inline void
556 vectorized_transpose_and_store(const bool add_into,
557  const unsigned int n_entries,
558  const VectorizedArray<Number> *in,
559  const unsigned int * offsets,
560  Number * out)
561 {
562  if (add_into)
563  for (unsigned int i = 0; i < n_entries; ++i)
564  for (unsigned int v = 0; v < VectorizedArray<Number>::n_array_elements;
565  ++v)
566  out[offsets[v] + i] += in[i][v];
567  else
568  for (unsigned int i = 0; i < n_entries; ++i)
569  for (unsigned int v = 0; v < VectorizedArray<Number>::n_array_elements;
570  ++v)
571  out[offsets[v] + i] = in[i][v];
572 }
573 
574 
575 
576 // for safety, also check that __AVX512F__ is defined in case the user manually
577 // set some conflicting compile flags which prevent compilation
578 
579 #if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 3 && defined(__AVX512F__)
580 
584 template <>
585 class VectorizedArray<double>
586 {
587 public:
591  static const unsigned int n_array_elements = 8;
592 
596  DEAL_II_ALWAYS_INLINE
598  operator=(const double x)
599  {
600  data = _mm512_set1_pd(x);
601  return *this;
602  }
603 
607  DEAL_II_ALWAYS_INLINE
608  double &operator[](const unsigned int comp)
609  {
610  AssertIndexRange(comp, 8);
611  return *(reinterpret_cast<double *>(&data) + comp);
612  }
613 
617  DEAL_II_ALWAYS_INLINE
618  const double &operator[](const unsigned int comp) const
619  {
620  AssertIndexRange(comp, 8);
621  return *(reinterpret_cast<const double *>(&data) + comp);
622  }
623 
627  DEAL_II_ALWAYS_INLINE
629  operator+=(const VectorizedArray &vec)
630  {
631  // if the compiler supports vector arithmetics, we can simply use +=
632  // operator on the given data type. this allows the compiler to combine
633  // additions with multiplication (fused multiply-add) if those
634  // instructions are available. Otherwise, we need to use the built-in
635  // intrinsic command for __m512d
636 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
637  data += vec.data;
638 # else
639  data = _mm512_add_pd(data, vec.data);
640 # endif
641  return *this;
642  }
643 
647  DEAL_II_ALWAYS_INLINE
649  operator-=(const VectorizedArray &vec)
650  {
651 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
652  data -= vec.data;
653 # else
654  data = _mm512_sub_pd(data, vec.data);
655 # endif
656  return *this;
657  }
661  DEAL_II_ALWAYS_INLINE
663  operator*=(const VectorizedArray &vec)
664  {
665 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
666  data *= vec.data;
667 # else
668  data = _mm512_mul_pd(data, vec.data);
669 # endif
670  return *this;
671  }
672 
676  DEAL_II_ALWAYS_INLINE
678  operator/=(const VectorizedArray &vec)
679  {
680 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
681  data /= vec.data;
682 # else
683  data = _mm512_div_pd(data, vec.data);
684 # endif
685  return *this;
686  }
687 
693  DEAL_II_ALWAYS_INLINE
694  void
695  load(const double *ptr)
696  {
697  data = _mm512_loadu_pd(ptr);
698  }
699 
706  DEAL_II_ALWAYS_INLINE
707  void
708  store(double *ptr) const
709  {
710  _mm512_storeu_pd(ptr, data);
711  }
712 
716  DEAL_II_ALWAYS_INLINE
717  void
718  streaming_store(double *ptr) const
719  {
720  Assert(reinterpret_cast<std::size_t>(ptr) % 64 == 0,
721  ExcMessage("Memory not aligned"));
722  _mm512_stream_pd(ptr, data);
723  }
724 
737  DEAL_II_ALWAYS_INLINE
738  void
739  gather(const double *base_ptr, const unsigned int *offsets)
740  {
741  // unfortunately, there does not appear to be a 256 bit integer load, so
742  // do it by some reinterpret casts here. this is allowed because the Intel
743  // API allows aliasing between different vector types.
744  const __m256 index_val =
745  _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
746  const __m256i index = *reinterpret_cast<const __m256i *>(&index_val);
747  data = _mm512_i32gather_pd(index, base_ptr, 8);
748  }
749 
762  DEAL_II_ALWAYS_INLINE
763  void
764  scatter(const unsigned int *offsets, double *base_ptr) const
765  {
766  for (unsigned int i = 0; i < 8; ++i)
767  for (unsigned int j = i + 1; j < 8; ++j)
768  Assert(offsets[i] != offsets[j],
769  ExcMessage("Result of scatter undefined if two offset elements"
770  " point to the same position"));
771 
772  // unfortunately, there does not appear to be a 256 bit integer load, so
773  // do it by some reinterpret casts here. this is allowed because the Intel
774  // API allows aliasing between different vector types.
775  const __m256 index_val =
776  _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
777  const __m256i index = *reinterpret_cast<const __m256i *>(&index_val);
778  _mm512_i32scatter_pd(base_ptr, index, data, 8);
779  }
780 
785  __m512d data;
786 
787 private:
792  DEAL_II_ALWAYS_INLINE
794  get_sqrt() const
795  {
796  VectorizedArray res;
797  res.data = _mm512_sqrt_pd(data);
798  return res;
799  }
800 
805  DEAL_II_ALWAYS_INLINE
807  get_abs() const
808  {
809  // to compute the absolute value, perform bitwise andnot with -0. This
810  // will leave all value and exponent bits unchanged but force the sign
811  // value to +. Since there is no andnot for AVX512, we interpret the data
812  // as 64 bit integers and do the andnot on those types (note that andnot
813  // is a bitwise operation so the data type does not matter)
814  __m512d mask = _mm512_set1_pd(-0.);
815  VectorizedArray res;
816  res.data = reinterpret_cast<__m512d>(
817  _mm512_andnot_epi64(reinterpret_cast<__m512i>(mask),
818  reinterpret_cast<__m512i>(data)));
819  return res;
820  }
821 
826  DEAL_II_ALWAYS_INLINE
828  get_max(const VectorizedArray &other) const
829  {
830  VectorizedArray res;
831  res.data = _mm512_max_pd(data, other.data);
832  return res;
833  }
834 
839  DEAL_II_ALWAYS_INLINE
841  get_min(const VectorizedArray &other) const
842  {
843  VectorizedArray res;
844  res.data = _mm512_min_pd(data, other.data);
845  return res;
846  }
847 
851  template <typename Number2>
853  std::sqrt(const VectorizedArray<Number2> &);
854  template <typename Number2>
856  std::abs(const VectorizedArray<Number2> &);
857  template <typename Number2>
859  std::max(const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
860  template <typename Number2>
862  std::min(const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
863 };
864 
865 
866 
870 template <>
871 inline void
872 vectorized_load_and_transpose(const unsigned int n_entries,
873  const double * in,
874  const unsigned int * offsets,
876 {
877  const unsigned int n_chunks = n_entries / 4;
878  for (unsigned int outer = 0; outer < 8; outer += 4)
879  {
880  const double *in0 = in + offsets[0 + outer];
881  const double *in1 = in + offsets[1 + outer];
882  const double *in2 = in + offsets[2 + outer];
883  const double *in3 = in + offsets[3 + outer];
884 
885  for (unsigned int i = 0; i < n_chunks; ++i)
886  {
887  __m256d u0 = _mm256_loadu_pd(in0 + 4 * i);
888  __m256d u1 = _mm256_loadu_pd(in1 + 4 * i);
889  __m256d u2 = _mm256_loadu_pd(in2 + 4 * i);
890  __m256d u3 = _mm256_loadu_pd(in3 + 4 * i);
891  __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
892  __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
893  __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
894  __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
895  *reinterpret_cast<__m256d *>(
896  reinterpret_cast<double *>(&out[4 * i + 0].data) + outer) =
897  _mm256_unpacklo_pd(t0, t1);
898  *reinterpret_cast<__m256d *>(
899  reinterpret_cast<double *>(&out[4 * i + 1].data) + outer) =
900  _mm256_unpackhi_pd(t0, t1);
901  *reinterpret_cast<__m256d *>(
902  reinterpret_cast<double *>(&out[4 * i + 2].data) + outer) =
903  _mm256_unpacklo_pd(t2, t3);
904  *reinterpret_cast<__m256d *>(
905  reinterpret_cast<double *>(&out[4 * i + 3].data) + outer) =
906  _mm256_unpackhi_pd(t2, t3);
907  }
908  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
909  for (unsigned int v = 0; v < 4; ++v)
910  out[i][outer + v] = in[offsets[v + outer] + i];
911  }
912 }
913 
914 
915 
919 template <>
920 inline void
921 vectorized_transpose_and_store(const bool add_into,
922  const unsigned int n_entries,
923  const VectorizedArray<double> *in,
924  const unsigned int * offsets,
925  double * out)
926 {
927  const unsigned int n_chunks = n_entries / 4;
928  // do not do full transpose because the code is too long and will most
929  // likely not pay off. rather do the transposition on the vectorized array
930  // on size smaller, mm256d
931  for (unsigned int outer = 0; outer < 8; outer += 4)
932  {
933  double *out0 = out + offsets[0 + outer];
934  double *out1 = out + offsets[1 + outer];
935  double *out2 = out + offsets[2 + outer];
936  double *out3 = out + offsets[3 + outer];
937  for (unsigned int i = 0; i < n_chunks; ++i)
938  {
939  __m256d u0 = *reinterpret_cast<const __m256d *>(
940  reinterpret_cast<const double *>(&in[4 * i + 0].data) + outer);
941  __m256d u1 = *reinterpret_cast<const __m256d *>(
942  reinterpret_cast<const double *>(&in[4 * i + 1].data) + outer);
943  __m256d u2 = *reinterpret_cast<const __m256d *>(
944  reinterpret_cast<const double *>(&in[4 * i + 2].data) + outer);
945  __m256d u3 = *reinterpret_cast<const __m256d *>(
946  reinterpret_cast<const double *>(&in[4 * i + 3].data) + outer);
947  __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
948  __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
949  __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
950  __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
951  __m256d res0 = _mm256_unpacklo_pd(t0, t1);
952  __m256d res1 = _mm256_unpackhi_pd(t0, t1);
953  __m256d res2 = _mm256_unpacklo_pd(t2, t3);
954  __m256d res3 = _mm256_unpackhi_pd(t2, t3);
955 
956  // Cannot use the same store instructions in both paths of the 'if'
957  // because the compiler cannot know that there is no aliasing between
958  // pointers
959  if (add_into)
960  {
961  res0 = _mm256_add_pd(_mm256_loadu_pd(out0 + 4 * i), res0);
962  _mm256_storeu_pd(out0 + 4 * i, res0);
963  res1 = _mm256_add_pd(_mm256_loadu_pd(out1 + 4 * i), res1);
964  _mm256_storeu_pd(out1 + 4 * i, res1);
965  res2 = _mm256_add_pd(_mm256_loadu_pd(out2 + 4 * i), res2);
966  _mm256_storeu_pd(out2 + 4 * i, res2);
967  res3 = _mm256_add_pd(_mm256_loadu_pd(out3 + 4 * i), res3);
968  _mm256_storeu_pd(out3 + 4 * i, res3);
969  }
970  else
971  {
972  _mm256_storeu_pd(out0 + 4 * i, res0);
973  _mm256_storeu_pd(out1 + 4 * i, res1);
974  _mm256_storeu_pd(out2 + 4 * i, res2);
975  _mm256_storeu_pd(out3 + 4 * i, res3);
976  }
977  }
978  if (add_into)
979  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
980  for (unsigned int v = 0; v < 4; ++v)
981  out[offsets[v + outer] + i] += in[i][v + outer];
982  else
983  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
984  for (unsigned int v = 0; v < 4; ++v)
985  out[offsets[v + outer] + i] = in[i][v + outer];
986  }
987 }
988 
989 
990 
994 template <>
995 class VectorizedArray<float>
996 {
997 public:
1001  static const unsigned int n_array_elements = 16;
1002 
1006  DEAL_II_ALWAYS_INLINE
1007  VectorizedArray &
1008  operator=(const float x)
1009  {
1010  data = _mm512_set1_ps(x);
1011  return *this;
1012  }
1013 
1017  DEAL_II_ALWAYS_INLINE
1018  float &operator[](const unsigned int comp)
1019  {
1020  AssertIndexRange(comp, 16);
1021  return *(reinterpret_cast<float *>(&data) + comp);
1022  }
1023 
1027  DEAL_II_ALWAYS_INLINE
1028  const float &operator[](const unsigned int comp) const
1029  {
1030  AssertIndexRange(comp, 16);
1031  return *(reinterpret_cast<const float *>(&data) + comp);
1032  }
1033 
1037  DEAL_II_ALWAYS_INLINE
1038  VectorizedArray &
1039  operator+=(const VectorizedArray &vec)
1040  {
1041  // if the compiler supports vector arithmetics, we can simply use +=
1042  // operator on the given data type. this allows the compiler to combine
1043  // additions with multiplication (fused multiply-add) if those
1044  // instructions are available. Otherwise, we need to use the built-in
1045  // intrinsic command for __m512d
1046 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1047  data += vec.data;
1048 # else
1049  data = _mm512_add_ps(data, vec.data);
1050 # endif
1051  return *this;
1052  }
1053 
1057  DEAL_II_ALWAYS_INLINE
1058  VectorizedArray &
1059  operator-=(const VectorizedArray &vec)
1060  {
1061 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1062  data -= vec.data;
1063 # else
1064  data = _mm512_sub_ps(data, vec.data);
1065 # endif
1066  return *this;
1067  }
1071  DEAL_II_ALWAYS_INLINE
1072  VectorizedArray &
1073  operator*=(const VectorizedArray &vec)
1074  {
1075 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1076  data *= vec.data;
1077 # else
1078  data = _mm512_mul_ps(data, vec.data);
1079 # endif
1080  return *this;
1081  }
1082 
1086  DEAL_II_ALWAYS_INLINE
1087  VectorizedArray &
1088  operator/=(const VectorizedArray &vec)
1089  {
1090 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1091  data /= vec.data;
1092 # else
1093  data = _mm512_div_ps(data, vec.data);
1094 # endif
1095  return *this;
1096  }
1097 
1103  DEAL_II_ALWAYS_INLINE
1104  void
1105  load(const float *ptr)
1106  {
1107  data = _mm512_loadu_ps(ptr);
1108  }
1109 
1116  DEAL_II_ALWAYS_INLINE
1117  void
1118  store(float *ptr) const
1119  {
1120  _mm512_storeu_ps(ptr, data);
1121  }
1122 
1126  DEAL_II_ALWAYS_INLINE
1127  void
1128  streaming_store(float *ptr) const
1129  {
1130  Assert(reinterpret_cast<std::size_t>(ptr) % 64 == 0,
1131  ExcMessage("Memory not aligned"));
1132  _mm512_stream_ps(ptr, data);
1133  }
1134 
1147  DEAL_II_ALWAYS_INLINE
1148  void
1149  gather(const float *base_ptr, const unsigned int *offsets)
1150  {
1151  // unfortunately, there does not appear to be a 512 bit integer load, so
1152  // do it by some reinterpret casts here. this is allowed because the Intel
1153  // API allows aliasing between different vector types.
1154  const __m512 index_val =
1155  _mm512_loadu_ps(reinterpret_cast<const float *>(offsets));
1156  const __m512i index = *reinterpret_cast<const __m512i *>(&index_val);
1157  data = _mm512_i32gather_ps(index, base_ptr, 4);
1158  }
1159 
1172  DEAL_II_ALWAYS_INLINE
1173  void
1174  scatter(const unsigned int *offsets, float *base_ptr) const
1175  {
1176  for (unsigned int i = 0; i < 16; ++i)
1177  for (unsigned int j = i + 1; j < 16; ++j)
1178  Assert(offsets[i] != offsets[j],
1179  ExcMessage("Result of scatter undefined if two offset elements"
1180  " point to the same position"));
1181 
1182  // unfortunately, there does not appear to be a 512 bit integer load, so
1183  // do it by some reinterpret casts here. this is allowed because the Intel
1184  // API allows aliasing between different vector types.
1185  const __m512 index_val =
1186  _mm512_loadu_ps(reinterpret_cast<const float *>(offsets));
1187  const __m512i index = *reinterpret_cast<const __m512i *>(&index_val);
1188  _mm512_i32scatter_ps(base_ptr, index, data, 4);
1189  }
1190 
1195  __m512 data;
1196 
1197 private:
1202  DEAL_II_ALWAYS_INLINE
1204  get_sqrt() const
1205  {
1206  VectorizedArray res;
1207  res.data = _mm512_sqrt_ps(data);
1208  return res;
1209  }
1210 
1215  DEAL_II_ALWAYS_INLINE
1217  get_abs() const
1218  {
1219  // to compute the absolute value, perform bitwise andnot with -0. This
1220  // will leave all value and exponent bits unchanged but force the sign
1221  // value to +. Since there is no andnot for AVX512, we interpret the data
1222  // as 32 bit integers and do the andnot on those types (note that andnot
1223  // is a bitwise operation so the data type does not matter)
1224  __m512 mask = _mm512_set1_ps(-0.f);
1225  VectorizedArray res;
1226  res.data = reinterpret_cast<__m512>(
1227  _mm512_andnot_epi32(reinterpret_cast<__m512i>(mask),
1228  reinterpret_cast<__m512i>(data)));
1229  return res;
1230  }
1231 
1236  DEAL_II_ALWAYS_INLINE
1238  get_max(const VectorizedArray &other) const
1239  {
1240  VectorizedArray res;
1241  res.data = _mm512_max_ps(data, other.data);
1242  return res;
1243  }
1244 
1249  DEAL_II_ALWAYS_INLINE
1251  get_min(const VectorizedArray &other) const
1252  {
1253  VectorizedArray res;
1254  res.data = _mm512_min_ps(data, other.data);
1255  return res;
1256  }
1257 
1261  template <typename Number2>
1263  std::sqrt(const VectorizedArray<Number2> &);
1264  template <typename Number2>
1266  std::abs(const VectorizedArray<Number2> &);
1267  template <typename Number2>
1269  std::max(const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
1270  template <typename Number2>
1272  std::min(const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
1273 };
1274 
1275 
1276 
1280 template <>
1281 inline void
1282 vectorized_load_and_transpose(const unsigned int n_entries,
1283  const float * in,
1284  const unsigned int * offsets,
1286 {
1287  const unsigned int n_chunks = n_entries / 4;
1288  for (unsigned int outer = 0; outer < 16; outer += 8)
1289  {
1290  for (unsigned int i = 0; i < n_chunks; ++i)
1291  {
1292  __m128 u0 = _mm_loadu_ps(in + 4 * i + offsets[0 + outer]);
1293  __m128 u1 = _mm_loadu_ps(in + 4 * i + offsets[1 + outer]);
1294  __m128 u2 = _mm_loadu_ps(in + 4 * i + offsets[2 + outer]);
1295  __m128 u3 = _mm_loadu_ps(in + 4 * i + offsets[3 + outer]);
1296  __m128 u4 = _mm_loadu_ps(in + 4 * i + offsets[4 + outer]);
1297  __m128 u5 = _mm_loadu_ps(in + 4 * i + offsets[5 + outer]);
1298  __m128 u6 = _mm_loadu_ps(in + 4 * i + offsets[6 + outer]);
1299  __m128 u7 = _mm_loadu_ps(in + 4 * i + offsets[7 + outer]);
1300  // To avoid warnings about uninitialized variables, need to initialize
1301  // one variable with zero before using it.
1302  __m256 t0, t1, t2, t3 = _mm256_set1_ps(0.F);
1303  t0 = _mm256_insertf128_ps(t3, u0, 0);
1304  t0 = _mm256_insertf128_ps(t0, u4, 1);
1305  t1 = _mm256_insertf128_ps(t3, u1, 0);
1306  t1 = _mm256_insertf128_ps(t1, u5, 1);
1307  t2 = _mm256_insertf128_ps(t3, u2, 0);
1308  t2 = _mm256_insertf128_ps(t2, u6, 1);
1309  t3 = _mm256_insertf128_ps(t3, u3, 0);
1310  t3 = _mm256_insertf128_ps(t3, u7, 1);
1311  __m256 v0 = _mm256_shuffle_ps(t0, t1, 0x44);
1312  __m256 v1 = _mm256_shuffle_ps(t0, t1, 0xee);
1313  __m256 v2 = _mm256_shuffle_ps(t2, t3, 0x44);
1314  __m256 v3 = _mm256_shuffle_ps(t2, t3, 0xee);
1315  *reinterpret_cast<__m256 *>(
1316  reinterpret_cast<float *>(&out[4 * i + 0].data) + outer) =
1317  _mm256_shuffle_ps(v0, v2, 0x88);
1318  *reinterpret_cast<__m256 *>(
1319  reinterpret_cast<float *>(&out[4 * i + 1].data) + outer) =
1320  _mm256_shuffle_ps(v0, v2, 0xdd);
1321  *reinterpret_cast<__m256 *>(
1322  reinterpret_cast<float *>(&out[4 * i + 2].data) + outer) =
1323  _mm256_shuffle_ps(v1, v3, 0x88);
1324  *reinterpret_cast<__m256 *>(
1325  reinterpret_cast<float *>(&out[4 * i + 3].data) + outer) =
1326  _mm256_shuffle_ps(v1, v3, 0xdd);
1327  }
1328  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1329  for (unsigned int v = 0; v < 8; ++v)
1330  out[i][v + outer] = in[offsets[v + outer] + i];
1331  }
1332 }
1333 
1334 
1335 
1339 template <>
1340 inline void
1341 vectorized_transpose_and_store(const bool add_into,
1342  const unsigned int n_entries,
1343  const VectorizedArray<float> *in,
1344  const unsigned int * offsets,
1345  float * out)
1346 {
1347  const unsigned int n_chunks = n_entries / 4;
1348  for (unsigned int outer = 0; outer < 16; outer += 8)
1349  {
1350  for (unsigned int i = 0; i < n_chunks; ++i)
1351  {
1352  __m256 u0 = *reinterpret_cast<const __m256 *>(
1353  reinterpret_cast<const float *>(&in[4 * i + 0].data) + outer);
1354  __m256 u1 = *reinterpret_cast<const __m256 *>(
1355  reinterpret_cast<const float *>(&in[4 * i + 1].data) + outer);
1356  __m256 u2 = *reinterpret_cast<const __m256 *>(
1357  reinterpret_cast<const float *>(&in[4 * i + 2].data) + outer);
1358  __m256 u3 = *reinterpret_cast<const __m256 *>(
1359  reinterpret_cast<const float *>(&in[4 * i + 3].data) + outer);
1360  __m256 t0 = _mm256_shuffle_ps(u0, u1, 0x44);
1361  __m256 t1 = _mm256_shuffle_ps(u0, u1, 0xee);
1362  __m256 t2 = _mm256_shuffle_ps(u2, u3, 0x44);
1363  __m256 t3 = _mm256_shuffle_ps(u2, u3, 0xee);
1364  u0 = _mm256_shuffle_ps(t0, t2, 0x88);
1365  u1 = _mm256_shuffle_ps(t0, t2, 0xdd);
1366  u2 = _mm256_shuffle_ps(t1, t3, 0x88);
1367  u3 = _mm256_shuffle_ps(t1, t3, 0xdd);
1368  __m128 res0 = _mm256_extractf128_ps(u0, 0);
1369  __m128 res4 = _mm256_extractf128_ps(u0, 1);
1370  __m128 res1 = _mm256_extractf128_ps(u1, 0);
1371  __m128 res5 = _mm256_extractf128_ps(u1, 1);
1372  __m128 res2 = _mm256_extractf128_ps(u2, 0);
1373  __m128 res6 = _mm256_extractf128_ps(u2, 1);
1374  __m128 res3 = _mm256_extractf128_ps(u3, 0);
1375  __m128 res7 = _mm256_extractf128_ps(u3, 1);
1376 
1377  // Cannot use the same store instructions in both paths of the 'if'
1378  // because the compiler cannot know that there is no aliasing between
1379  // pointers
1380  if (add_into)
1381  {
1382  res0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0 + outer]),
1383  res0);
1384  _mm_storeu_ps(out + 4 * i + offsets[0 + outer], res0);
1385  res1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1 + outer]),
1386  res1);
1387  _mm_storeu_ps(out + 4 * i + offsets[1 + outer], res1);
1388  res2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2 + outer]),
1389  res2);
1390  _mm_storeu_ps(out + 4 * i + offsets[2 + outer], res2);
1391  res3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3 + outer]),
1392  res3);
1393  _mm_storeu_ps(out + 4 * i + offsets[3 + outer], res3);
1394  res4 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[4 + outer]),
1395  res4);
1396  _mm_storeu_ps(out + 4 * i + offsets[4 + outer], res4);
1397  res5 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[5 + outer]),
1398  res5);
1399  _mm_storeu_ps(out + 4 * i + offsets[5 + outer], res5);
1400  res6 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[6 + outer]),
1401  res6);
1402  _mm_storeu_ps(out + 4 * i + offsets[6 + outer], res6);
1403  res7 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[7 + outer]),
1404  res7);
1405  _mm_storeu_ps(out + 4 * i + offsets[7 + outer], res7);
1406  }
1407  else
1408  {
1409  _mm_storeu_ps(out + 4 * i + offsets[0 + outer], res0);
1410  _mm_storeu_ps(out + 4 * i + offsets[1 + outer], res1);
1411  _mm_storeu_ps(out + 4 * i + offsets[2 + outer], res2);
1412  _mm_storeu_ps(out + 4 * i + offsets[3 + outer], res3);
1413  _mm_storeu_ps(out + 4 * i + offsets[4 + outer], res4);
1414  _mm_storeu_ps(out + 4 * i + offsets[5 + outer], res5);
1415  _mm_storeu_ps(out + 4 * i + offsets[6 + outer], res6);
1416  _mm_storeu_ps(out + 4 * i + offsets[7 + outer], res7);
1417  }
1418  }
1419  if (add_into)
1420  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1421  for (unsigned int v = 0; v < 8; ++v)
1422  out[offsets[v + outer] + i] += in[i][v + outer];
1423  else
1424  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1425  for (unsigned int v = 0; v < 8; ++v)
1426  out[offsets[v + outer] + i] = in[i][v + outer];
1427  }
1428 }
1429 
1430 
1431 
1432 #elif DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 2 && defined(__AVX__)
1433 
1437 template <>
1438 class VectorizedArray<double>
1439 {
1440 public:
1444  static const unsigned int n_array_elements = 4;
1445 
1449  DEAL_II_ALWAYS_INLINE
1450  VectorizedArray &
1451  operator=(const double x)
1452  {
1453  data = _mm256_set1_pd(x);
1454  return *this;
1455  }
1456 
1460  DEAL_II_ALWAYS_INLINE
1461  double &operator[](const unsigned int comp)
1462  {
1463  AssertIndexRange(comp, 4);
1464  return *(reinterpret_cast<double *>(&data) + comp);
1465  }
1466 
1470  DEAL_II_ALWAYS_INLINE
1471  const double &operator[](const unsigned int comp) const
1472  {
1473  AssertIndexRange(comp, 4);
1474  return *(reinterpret_cast<const double *>(&data) + comp);
1475  }
1476 
1480  DEAL_II_ALWAYS_INLINE
1481  VectorizedArray &
1482  operator+=(const VectorizedArray &vec)
1483  {
1484  // if the compiler supports vector arithmetics, we can simply use +=
1485  // operator on the given data type. this allows the compiler to combine
1486  // additions with multiplication (fused multiply-add) if those
1487  // instructions are available. Otherwise, we need to use the built-in
1488  // intrinsic command for __m256d
1489 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1490  data += vec.data;
1491 # else
1492  data = _mm256_add_pd(data, vec.data);
1493 # endif
1494  return *this;
1495  }
1496 
1500  DEAL_II_ALWAYS_INLINE
1501  VectorizedArray &
1502  operator-=(const VectorizedArray &vec)
1503  {
1504 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1505  data -= vec.data;
1506 # else
1507  data = _mm256_sub_pd(data, vec.data);
1508 # endif
1509  return *this;
1510  }
1514  DEAL_II_ALWAYS_INLINE
1515  VectorizedArray &
1516  operator*=(const VectorizedArray &vec)
1517  {
1518 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1519  data *= vec.data;
1520 # else
1521  data = _mm256_mul_pd(data, vec.data);
1522 # endif
1523  return *this;
1524  }
1525 
1529  DEAL_II_ALWAYS_INLINE
1530  VectorizedArray &
1531  operator/=(const VectorizedArray &vec)
1532  {
1533 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1534  data /= vec.data;
1535 # else
1536  data = _mm256_div_pd(data, vec.data);
1537 # endif
1538  return *this;
1539  }
1540 
1546  DEAL_II_ALWAYS_INLINE
1547  void
1548  load(const double *ptr)
1549  {
1550  data = _mm256_loadu_pd(ptr);
1551  }
1552 
1559  DEAL_II_ALWAYS_INLINE
1560  void
1561  store(double *ptr) const
1562  {
1563  _mm256_storeu_pd(ptr, data);
1564  }
1565 
1569  DEAL_II_ALWAYS_INLINE
1570  void
1571  streaming_store(double *ptr) const
1572  {
1573  Assert(reinterpret_cast<std::size_t>(ptr) % 32 == 0,
1574  ExcMessage("Memory not aligned"));
1575  _mm256_stream_pd(ptr, data);
1576  }
1577 
1590  DEAL_II_ALWAYS_INLINE
1591  void
1592  gather(const double *base_ptr, const unsigned int *offsets)
1593  {
1594 # ifdef __AVX2__
1595  // unfortunately, there does not appear to be a 128 bit integer load, so
1596  // do it by some reinterpret casts here. this is allowed because the Intel
1597  // API allows aliasing between different vector types.
1598  const __m128 index_val =
1599  _mm_loadu_ps(reinterpret_cast<const float *>(offsets));
1600  const __m128i index = *reinterpret_cast<const __m128i *>(&index_val);
1601  data = _mm256_i32gather_pd(base_ptr, index, 8);
1602 # else
1603  for (unsigned int i = 0; i < 4; ++i)
1604  *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
1605 # endif
1606  }
1607 
1620  DEAL_II_ALWAYS_INLINE
1621  void
1622  scatter(const unsigned int *offsets, double *base_ptr) const
1623  {
1624  // no scatter operation in AVX/AVX2
1625  for (unsigned int i = 0; i < 4; ++i)
1626  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
1627  }
1628 
1633  __m256d data;
1634 
1635 private:
1640  DEAL_II_ALWAYS_INLINE
1642  get_sqrt() const
1643  {
1644  VectorizedArray res;
1645  res.data = _mm256_sqrt_pd(data);
1646  return res;
1647  }
1648 
1653  DEAL_II_ALWAYS_INLINE
1655  get_abs() const
1656  {
1657  // to compute the absolute value, perform bitwise andnot with -0. This
1658  // will leave all value and exponent bits unchanged but force the sign
1659  // value to +.
1660  __m256d mask = _mm256_set1_pd(-0.);
1661  VectorizedArray res;
1662  res.data = _mm256_andnot_pd(mask, data);
1663  return res;
1664  }
1665 
1670  DEAL_II_ALWAYS_INLINE
1672  get_max(const VectorizedArray &other) const
1673  {
1674  VectorizedArray res;
1675  res.data = _mm256_max_pd(data, other.data);
1676  return res;
1677  }
1678 
1683  DEAL_II_ALWAYS_INLINE
1685  get_min(const VectorizedArray &other) const
1686  {
1687  VectorizedArray res;
1688  res.data = _mm256_min_pd(data, other.data);
1689  return res;
1690  }
1691 
1695  template <typename Number2>
1697  std::sqrt(const VectorizedArray<Number2> &);
1698  template <typename Number2>
1700  std::abs(const VectorizedArray<Number2> &);
1701  template <typename Number2>
1703  std::max(const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
1704  template <typename Number2>
1706  std::min(const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
1707 };
1708 
1709 
1710 
1714 template <>
1715 inline void
1716 vectorized_load_and_transpose(const unsigned int n_entries,
1717  const double * in,
1718  const unsigned int * offsets,
1720 {
1721  const unsigned int n_chunks = n_entries / 4;
1722  const double * in0 = in + offsets[0];
1723  const double * in1 = in + offsets[1];
1724  const double * in2 = in + offsets[2];
1725  const double * in3 = in + offsets[3];
1726 
1727  for (unsigned int i = 0; i < n_chunks; ++i)
1728  {
1729  __m256d u0 = _mm256_loadu_pd(in0 + 4 * i);
1730  __m256d u1 = _mm256_loadu_pd(in1 + 4 * i);
1731  __m256d u2 = _mm256_loadu_pd(in2 + 4 * i);
1732  __m256d u3 = _mm256_loadu_pd(in3 + 4 * i);
1733  __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
1734  __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
1735  __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
1736  __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
1737  out[4 * i + 0].data = _mm256_unpacklo_pd(t0, t1);
1738  out[4 * i + 1].data = _mm256_unpackhi_pd(t0, t1);
1739  out[4 * i + 2].data = _mm256_unpacklo_pd(t2, t3);
1740  out[4 * i + 3].data = _mm256_unpackhi_pd(t2, t3);
1741  }
1742  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1743  for (unsigned int v = 0; v < 4; ++v)
1744  out[i][v] = in[offsets[v] + i];
1745 }
1746 
1747 
1748 
1752 template <>
1753 inline void
1754 vectorized_transpose_and_store(const bool add_into,
1755  const unsigned int n_entries,
1756  const VectorizedArray<double> *in,
1757  const unsigned int * offsets,
1758  double * out)
1759 {
1760  const unsigned int n_chunks = n_entries / 4;
1761  double * out0 = out + offsets[0];
1762  double * out1 = out + offsets[1];
1763  double * out2 = out + offsets[2];
1764  double * out3 = out + offsets[3];
1765  for (unsigned int i = 0; i < n_chunks; ++i)
1766  {
1767  __m256d u0 = in[4 * i + 0].data;
1768  __m256d u1 = in[4 * i + 1].data;
1769  __m256d u2 = in[4 * i + 2].data;
1770  __m256d u3 = in[4 * i + 3].data;
1771  __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
1772  __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
1773  __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
1774  __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
1775  __m256d res0 = _mm256_unpacklo_pd(t0, t1);
1776  __m256d res1 = _mm256_unpackhi_pd(t0, t1);
1777  __m256d res2 = _mm256_unpacklo_pd(t2, t3);
1778  __m256d res3 = _mm256_unpackhi_pd(t2, t3);
1779 
1780  // Cannot use the same store instructions in both paths of the 'if'
1781  // because the compiler cannot know that there is no aliasing between
1782  // pointers
1783  if (add_into)
1784  {
1785  res0 = _mm256_add_pd(_mm256_loadu_pd(out0 + 4 * i), res0);
1786  _mm256_storeu_pd(out0 + 4 * i, res0);
1787  res1 = _mm256_add_pd(_mm256_loadu_pd(out1 + 4 * i), res1);
1788  _mm256_storeu_pd(out1 + 4 * i, res1);
1789  res2 = _mm256_add_pd(_mm256_loadu_pd(out2 + 4 * i), res2);
1790  _mm256_storeu_pd(out2 + 4 * i, res2);
1791  res3 = _mm256_add_pd(_mm256_loadu_pd(out3 + 4 * i), res3);
1792  _mm256_storeu_pd(out3 + 4 * i, res3);
1793  }
1794  else
1795  {
1796  _mm256_storeu_pd(out0 + 4 * i, res0);
1797  _mm256_storeu_pd(out1 + 4 * i, res1);
1798  _mm256_storeu_pd(out2 + 4 * i, res2);
1799  _mm256_storeu_pd(out3 + 4 * i, res3);
1800  }
1801  }
1802  if (add_into)
1803  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1804  for (unsigned int v = 0; v < 4; ++v)
1805  out[offsets[v] + i] += in[i][v];
1806  else
1807  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1808  for (unsigned int v = 0; v < 4; ++v)
1809  out[offsets[v] + i] = in[i][v];
1810 }
1811 
1812 
1813 
1817 template <>
1818 class VectorizedArray<float>
1819 {
1820 public:
1824  static const unsigned int n_array_elements = 8;
1825 
1829  DEAL_II_ALWAYS_INLINE
1830  VectorizedArray &
1831  operator=(const float x)
1832  {
1833  data = _mm256_set1_ps(x);
1834  return *this;
1835  }
1836 
1840  DEAL_II_ALWAYS_INLINE
1841  float &operator[](const unsigned int comp)
1842  {
1843  AssertIndexRange(comp, 8);
1844  return *(reinterpret_cast<float *>(&data) + comp);
1845  }
1846 
1850  DEAL_II_ALWAYS_INLINE
1851  const float &operator[](const unsigned int comp) const
1852  {
1853  AssertIndexRange(comp, 8);
1854  return *(reinterpret_cast<const float *>(&data) + comp);
1855  }
1856 
1860  DEAL_II_ALWAYS_INLINE
1861  VectorizedArray &
1862  operator+=(const VectorizedArray &vec)
1863  {
1864  // if the compiler supports vector arithmetics, we can simply use +=
1865  // operator on the given data type. this allows the compiler to combine
1866  // additions with multiplication (fused multiply-add) if those
1867  // instructions are available. Otherwise, we need to use the built-in
1868  // intrinsic command for __m256d
1869 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1870  data += vec.data;
1871 # else
1872  data = _mm256_add_ps(data, vec.data);
1873 # endif
1874  return *this;
1875  }
1876 
1880  DEAL_II_ALWAYS_INLINE
1881  VectorizedArray &
1882  operator-=(const VectorizedArray &vec)
1883  {
1884 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1885  data -= vec.data;
1886 # else
1887  data = _mm256_sub_ps(data, vec.data);
1888 # endif
1889  return *this;
1890  }
1894  DEAL_II_ALWAYS_INLINE
1895  VectorizedArray &
1896  operator*=(const VectorizedArray &vec)
1897  {
1898 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1899  data *= vec.data;
1900 # else
1901  data = _mm256_mul_ps(data, vec.data);
1902 # endif
1903  return *this;
1904  }
1905 
1909  DEAL_II_ALWAYS_INLINE
1910  VectorizedArray &
1911  operator/=(const VectorizedArray &vec)
1912  {
1913 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1914  data /= vec.data;
1915 # else
1916  data = _mm256_div_ps(data, vec.data);
1917 # endif
1918  return *this;
1919  }
1920 
1926  DEAL_II_ALWAYS_INLINE
1927  void
1928  load(const float *ptr)
1929  {
1930  data = _mm256_loadu_ps(ptr);
1931  }
1932 
1939  DEAL_II_ALWAYS_INLINE
1940  void
1941  store(float *ptr) const
1942  {
1943  _mm256_storeu_ps(ptr, data);
1944  }
1945 
1949  DEAL_II_ALWAYS_INLINE
1950  void
1951  streaming_store(float *ptr) const
1952  {
1953  Assert(reinterpret_cast<std::size_t>(ptr) % 32 == 0,
1954  ExcMessage("Memory not aligned"));
1955  _mm256_stream_ps(ptr, data);
1956  }
1957 
1970  DEAL_II_ALWAYS_INLINE
1971  void
1972  gather(const float *base_ptr, const unsigned int *offsets)
1973  {
1974 # ifdef __AVX2__
1975  // unfortunately, there does not appear to be a 256 bit integer load, so
1976  // do it by some reinterpret casts here. this is allowed because the Intel
1977  // API allows aliasing between different vector types.
1978  const __m256 index_val =
1979  _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
1980  const __m256i index = *reinterpret_cast<const __m256i *>(&index_val);
1981  data = _mm256_i32gather_ps(base_ptr, index, 4);
1982 # else
1983  for (unsigned int i = 0; i < 8; ++i)
1984  *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
1985 # endif
1986  }
1987 
2000  DEAL_II_ALWAYS_INLINE
2001  void
2002  scatter(const unsigned int *offsets, float *base_ptr) const
2003  {
2004  // no scatter operation in AVX/AVX2
2005  for (unsigned int i = 0; i < 8; ++i)
2006  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
2007  }
2008 
2013  __m256 data;
2014 
2015 private:
2020  DEAL_II_ALWAYS_INLINE
2022  get_sqrt() const
2023  {
2024  VectorizedArray res;
2025  res.data = _mm256_sqrt_ps(data);
2026  return res;
2027  }
2028 
2033  DEAL_II_ALWAYS_INLINE
2035  get_abs() const
2036  {
2037  // to compute the absolute value, perform bitwise andnot with -0. This
2038  // will leave all value and exponent bits unchanged but force the sign
2039  // value to +.
2040  __m256 mask = _mm256_set1_ps(-0.f);
2041  VectorizedArray res;
2042  res.data = _mm256_andnot_ps(mask, data);
2043  return res;
2044  }
2045 
2050  DEAL_II_ALWAYS_INLINE
2052  get_max(const VectorizedArray &other) const
2053  {
2054  VectorizedArray res;
2055  res.data = _mm256_max_ps(data, other.data);
2056  return res;
2057  }
2058 
2063  DEAL_II_ALWAYS_INLINE
2065  get_min(const VectorizedArray &other) const
2066  {
2067  VectorizedArray res;
2068  res.data = _mm256_min_ps(data, other.data);
2069  return res;
2070  }
2071 
2075  template <typename Number2>
2077  std::sqrt(const VectorizedArray<Number2> &);
2078  template <typename Number2>
2080  std::abs(const VectorizedArray<Number2> &);
2081  template <typename Number2>
2083  std::max(const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
2084  template <typename Number2>
2086  std::min(const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
2087 };
2088 
2089 
2090 
2094 template <>
2095 inline void
2096 vectorized_load_and_transpose(const unsigned int n_entries,
2097  const float * in,
2098  const unsigned int * offsets,
2100 {
2101  const unsigned int n_chunks = n_entries / 4;
2102  for (unsigned int i = 0; i < n_chunks; ++i)
2103  {
2104  __m128 u0 = _mm_loadu_ps(in + 4 * i + offsets[0]);
2105  __m128 u1 = _mm_loadu_ps(in + 4 * i + offsets[1]);
2106  __m128 u2 = _mm_loadu_ps(in + 4 * i + offsets[2]);
2107  __m128 u3 = _mm_loadu_ps(in + 4 * i + offsets[3]);
2108  __m128 u4 = _mm_loadu_ps(in + 4 * i + offsets[4]);
2109  __m128 u5 = _mm_loadu_ps(in + 4 * i + offsets[5]);
2110  __m128 u6 = _mm_loadu_ps(in + 4 * i + offsets[6]);
2111  __m128 u7 = _mm_loadu_ps(in + 4 * i + offsets[7]);
2112  // To avoid warnings about uninitialized variables, need to initialize
2113  // one variable with zero before using it.
2114  __m256 t0, t1, t2, t3 = _mm256_set1_ps(0.F);
2115  t0 = _mm256_insertf128_ps(t3, u0, 0);
2116  t0 = _mm256_insertf128_ps(t0, u4, 1);
2117  t1 = _mm256_insertf128_ps(t3, u1, 0);
2118  t1 = _mm256_insertf128_ps(t1, u5, 1);
2119  t2 = _mm256_insertf128_ps(t3, u2, 0);
2120  t2 = _mm256_insertf128_ps(t2, u6, 1);
2121  t3 = _mm256_insertf128_ps(t3, u3, 0);
2122  t3 = _mm256_insertf128_ps(t3, u7, 1);
2123  __m256 v0 = _mm256_shuffle_ps(t0, t1, 0x44);
2124  __m256 v1 = _mm256_shuffle_ps(t0, t1, 0xee);
2125  __m256 v2 = _mm256_shuffle_ps(t2, t3, 0x44);
2126  __m256 v3 = _mm256_shuffle_ps(t2, t3, 0xee);
2127  out[4 * i + 0].data = _mm256_shuffle_ps(v0, v2, 0x88);
2128  out[4 * i + 1].data = _mm256_shuffle_ps(v0, v2, 0xdd);
2129  out[4 * i + 2].data = _mm256_shuffle_ps(v1, v3, 0x88);
2130  out[4 * i + 3].data = _mm256_shuffle_ps(v1, v3, 0xdd);
2131  }
2132  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2133  for (unsigned int v = 0; v < 8; ++v)
2134  out[i][v] = in[offsets[v] + i];
2135 }
2136 
2137 
2138 
2142 template <>
2143 inline void
2144 vectorized_transpose_and_store(const bool add_into,
2145  const unsigned int n_entries,
2146  const VectorizedArray<float> *in,
2147  const unsigned int * offsets,
2148  float * out)
2149 {
2150  const unsigned int n_chunks = n_entries / 4;
2151  for (unsigned int i = 0; i < n_chunks; ++i)
2152  {
2153  __m256 u0 = in[4 * i + 0].data;
2154  __m256 u1 = in[4 * i + 1].data;
2155  __m256 u2 = in[4 * i + 2].data;
2156  __m256 u3 = in[4 * i + 3].data;
2157  __m256 t0 = _mm256_shuffle_ps(u0, u1, 0x44);
2158  __m256 t1 = _mm256_shuffle_ps(u0, u1, 0xee);
2159  __m256 t2 = _mm256_shuffle_ps(u2, u3, 0x44);
2160  __m256 t3 = _mm256_shuffle_ps(u2, u3, 0xee);
2161  u0 = _mm256_shuffle_ps(t0, t2, 0x88);
2162  u1 = _mm256_shuffle_ps(t0, t2, 0xdd);
2163  u2 = _mm256_shuffle_ps(t1, t3, 0x88);
2164  u3 = _mm256_shuffle_ps(t1, t3, 0xdd);
2165  __m128 res0 = _mm256_extractf128_ps(u0, 0);
2166  __m128 res4 = _mm256_extractf128_ps(u0, 1);
2167  __m128 res1 = _mm256_extractf128_ps(u1, 0);
2168  __m128 res5 = _mm256_extractf128_ps(u1, 1);
2169  __m128 res2 = _mm256_extractf128_ps(u2, 0);
2170  __m128 res6 = _mm256_extractf128_ps(u2, 1);
2171  __m128 res3 = _mm256_extractf128_ps(u3, 0);
2172  __m128 res7 = _mm256_extractf128_ps(u3, 1);
2173 
2174  // Cannot use the same store instructions in both paths of the 'if'
2175  // because the compiler cannot know that there is no aliasing between
2176  // pointers
2177  if (add_into)
2178  {
2179  res0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), res0);
2180  _mm_storeu_ps(out + 4 * i + offsets[0], res0);
2181  res1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), res1);
2182  _mm_storeu_ps(out + 4 * i + offsets[1], res1);
2183  res2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), res2);
2184  _mm_storeu_ps(out + 4 * i + offsets[2], res2);
2185  res3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), res3);
2186  _mm_storeu_ps(out + 4 * i + offsets[3], res3);
2187  res4 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[4]), res4);
2188  _mm_storeu_ps(out + 4 * i + offsets[4], res4);
2189  res5 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[5]), res5);
2190  _mm_storeu_ps(out + 4 * i + offsets[5], res5);
2191  res6 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[6]), res6);
2192  _mm_storeu_ps(out + 4 * i + offsets[6], res6);
2193  res7 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[7]), res7);
2194  _mm_storeu_ps(out + 4 * i + offsets[7], res7);
2195  }
2196  else
2197  {
2198  _mm_storeu_ps(out + 4 * i + offsets[0], res0);
2199  _mm_storeu_ps(out + 4 * i + offsets[1], res1);
2200  _mm_storeu_ps(out + 4 * i + offsets[2], res2);
2201  _mm_storeu_ps(out + 4 * i + offsets[3], res3);
2202  _mm_storeu_ps(out + 4 * i + offsets[4], res4);
2203  _mm_storeu_ps(out + 4 * i + offsets[5], res5);
2204  _mm_storeu_ps(out + 4 * i + offsets[6], res6);
2205  _mm_storeu_ps(out + 4 * i + offsets[7], res7);
2206  }
2207  }
2208  if (add_into)
2209  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2210  for (unsigned int v = 0; v < 8; ++v)
2211  out[offsets[v] + i] += in[i][v];
2212  else
2213  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2214  for (unsigned int v = 0; v < 8; ++v)
2215  out[offsets[v] + i] = in[i][v];
2216 }
2217 
2218 
2219 
2220 #elif DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 1 && defined(__SSE2__)
2221 
2225 template <>
2226 class VectorizedArray<double>
2227 {
2228 public:
2232  static const unsigned int n_array_elements = 2;
2233 
2237  DEAL_II_ALWAYS_INLINE
2238  VectorizedArray &
2239  operator=(const double x)
2240  {
2241  data = _mm_set1_pd(x);
2242  return *this;
2243  }
2244 
2248  DEAL_II_ALWAYS_INLINE
2249  double &operator[](const unsigned int comp)
2250  {
2251  AssertIndexRange(comp, 2);
2252  return *(reinterpret_cast<double *>(&data) + comp);
2253  }
2254 
2258  DEAL_II_ALWAYS_INLINE
2259  const double &operator[](const unsigned int comp) const
2260  {
2261  AssertIndexRange(comp, 2);
2262  return *(reinterpret_cast<const double *>(&data) + comp);
2263  }
2264 
2268  DEAL_II_ALWAYS_INLINE
2269  VectorizedArray &
2270  operator+=(const VectorizedArray &vec)
2271  {
2272 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2273  data += vec.data;
2274 # else
2275  data = _mm_add_pd(data, vec.data);
2276 # endif
2277  return *this;
2278  }
2279 
2283  DEAL_II_ALWAYS_INLINE
2284  VectorizedArray &
2285  operator-=(const VectorizedArray &vec)
2286  {
2287 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2288  data -= vec.data;
2289 # else
2290  data = _mm_sub_pd(data, vec.data);
2291 # endif
2292  return *this;
2293  }
2294 
2298  DEAL_II_ALWAYS_INLINE
2299  VectorizedArray &
2300  operator*=(const VectorizedArray &vec)
2301  {
2302 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2303  data *= vec.data;
2304 # else
2305  data = _mm_mul_pd(data, vec.data);
2306 # endif
2307  return *this;
2308  }
2309 
2313  DEAL_II_ALWAYS_INLINE
2314  VectorizedArray &
2315  operator/=(const VectorizedArray &vec)
2316  {
2317 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2318  data /= vec.data;
2319 # else
2320  data = _mm_div_pd(data, vec.data);
2321 # endif
2322  return *this;
2323  }
2324 
2330  DEAL_II_ALWAYS_INLINE
2331  void
2332  load(const double *ptr)
2333  {
2334  data = _mm_loadu_pd(ptr);
2335  }
2336 
2343  DEAL_II_ALWAYS_INLINE
2344  void
2345  store(double *ptr) const
2346  {
2347  _mm_storeu_pd(ptr, data);
2348  }
2349 
2353  DEAL_II_ALWAYS_INLINE
2354  void
2355  streaming_store(double *ptr) const
2356  {
2357  Assert(reinterpret_cast<std::size_t>(ptr) % 16 == 0,
2358  ExcMessage("Memory not aligned"));
2359  _mm_stream_pd(ptr, data);
2360  }
2361 
2374  DEAL_II_ALWAYS_INLINE
2375  void
2376  gather(const double *base_ptr, const unsigned int *offsets)
2377  {
2378  for (unsigned int i = 0; i < 2; ++i)
2379  *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
2380  }
2381 
2394  DEAL_II_ALWAYS_INLINE
2395  void
2396  scatter(const unsigned int *offsets, double *base_ptr) const
2397  {
2398  for (unsigned int i = 0; i < 2; ++i)
2399  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
2400  }
2401 
2406  __m128d data;
2407 
2408 private:
2413  DEAL_II_ALWAYS_INLINE
2415  get_sqrt() const
2416  {
2417  VectorizedArray res;
2418  res.data = _mm_sqrt_pd(data);
2419  return res;
2420  }
2421 
2426  DEAL_II_ALWAYS_INLINE
2428  get_abs() const
2429  {
2430  // to compute the absolute value, perform
2431  // bitwise andnot with -0. This will leave all
2432  // value and exponent bits unchanged but force
2433  // the sign value to +.
2434  __m128d mask = _mm_set1_pd(-0.);
2435  VectorizedArray res;
2436  res.data = _mm_andnot_pd(mask, data);
2437  return res;
2438  }
2439 
2444  DEAL_II_ALWAYS_INLINE
2446  get_max(const VectorizedArray &other) const
2447  {
2448  VectorizedArray res;
2449  res.data = _mm_max_pd(data, other.data);
2450  return res;
2451  }
2452 
2457  DEAL_II_ALWAYS_INLINE
2459  get_min(const VectorizedArray &other) const
2460  {
2461  VectorizedArray res;
2462  res.data = _mm_min_pd(data, other.data);
2463  return res;
2464  }
2465 
2469  template <typename Number2>
2471  std::sqrt(const VectorizedArray<Number2> &);
2472  template <typename Number2>
2474  std::abs(const VectorizedArray<Number2> &);
2475  template <typename Number2>
2477  std::max(const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
2478  template <typename Number2>
2480  std::min(const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
2481 };
2482 
2483 
2484 
2488 template <>
2489 inline void
2490 vectorized_load_and_transpose(const unsigned int n_entries,
2491  const double * in,
2492  const unsigned int * offsets,
2494 {
2495  const unsigned int n_chunks = n_entries / 2;
2496  for (unsigned int i = 0; i < n_chunks; ++i)
2497  {
2498  __m128d u0 = _mm_loadu_pd(in + 2 * i + offsets[0]);
2499  __m128d u1 = _mm_loadu_pd(in + 2 * i + offsets[1]);
2500  out[2 * i + 0].data = _mm_unpacklo_pd(u0, u1);
2501  out[2 * i + 1].data = _mm_unpackhi_pd(u0, u1);
2502  }
2503  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
2504  for (unsigned int v = 0; v < 2; ++v)
2505  out[i][v] = in[offsets[v] + i];
2506 }
2507 
2508 
2509 
2513 template <>
2514 inline void
2515 vectorized_transpose_and_store(const bool add_into,
2516  const unsigned int n_entries,
2517  const VectorizedArray<double> *in,
2518  const unsigned int * offsets,
2519  double * out)
2520 {
2521  const unsigned int n_chunks = n_entries / 2;
2522  if (add_into)
2523  {
2524  for (unsigned int i = 0; i < n_chunks; ++i)
2525  {
2526  __m128d u0 = in[2 * i + 0].data;
2527  __m128d u1 = in[2 * i + 1].data;
2528  __m128d res0 = _mm_unpacklo_pd(u0, u1);
2529  __m128d res1 = _mm_unpackhi_pd(u0, u1);
2530  _mm_storeu_pd(out + 2 * i + offsets[0],
2531  _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[0]),
2532  res0));
2533  _mm_storeu_pd(out + 2 * i + offsets[1],
2534  _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[1]),
2535  res1));
2536  }
2537  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
2538  for (unsigned int v = 0; v < 2; ++v)
2539  out[offsets[v] + i] += in[i][v];
2540  }
2541  else
2542  {
2543  for (unsigned int i = 0; i < n_chunks; ++i)
2544  {
2545  __m128d u0 = in[2 * i + 0].data;
2546  __m128d u1 = in[2 * i + 1].data;
2547  __m128d res0 = _mm_unpacklo_pd(u0, u1);
2548  __m128d res1 = _mm_unpackhi_pd(u0, u1);
2549  _mm_storeu_pd(out + 2 * i + offsets[0], res0);
2550  _mm_storeu_pd(out + 2 * i + offsets[1], res1);
2551  }
2552  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
2553  for (unsigned int v = 0; v < 2; ++v)
2554  out[offsets[v] + i] = in[i][v];
2555  }
2556 }
2557 
2558 
2559 
2563 template <>
2564 class VectorizedArray<float>
2565 {
2566 public:
2570  static const unsigned int n_array_elements = 4;
2571 
2576  DEAL_II_ALWAYS_INLINE
2577  VectorizedArray &
2578  operator=(const float x)
2579  {
2580  data = _mm_set1_ps(x);
2581  return *this;
2582  }
2583 
2587  DEAL_II_ALWAYS_INLINE
2588  float &operator[](const unsigned int comp)
2589  {
2590  AssertIndexRange(comp, 4);
2591  return *(reinterpret_cast<float *>(&data) + comp);
2592  }
2593 
2597  DEAL_II_ALWAYS_INLINE
2598  const float &operator[](const unsigned int comp) const
2599  {
2600  AssertIndexRange(comp, 4);
2601  return *(reinterpret_cast<const float *>(&data) + comp);
2602  }
2603 
2607  DEAL_II_ALWAYS_INLINE
2608  VectorizedArray &
2609  operator+=(const VectorizedArray &vec)
2610  {
2611 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2612  data += vec.data;
2613 # else
2614  data = _mm_add_ps(data, vec.data);
2615 # endif
2616  return *this;
2617  }
2618 
2622  DEAL_II_ALWAYS_INLINE
2623  VectorizedArray &
2624  operator-=(const VectorizedArray &vec)
2625  {
2626 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2627  data -= vec.data;
2628 # else
2629  data = _mm_sub_ps(data, vec.data);
2630 # endif
2631  return *this;
2632  }
2633 
2637  DEAL_II_ALWAYS_INLINE
2638  VectorizedArray &
2639  operator*=(const VectorizedArray &vec)
2640  {
2641 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2642  data *= vec.data;
2643 # else
2644  data = _mm_mul_ps(data, vec.data);
2645 # endif
2646  return *this;
2647  }
2648 
2652  DEAL_II_ALWAYS_INLINE
2653  VectorizedArray &
2654  operator/=(const VectorizedArray &vec)
2655  {
2656 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2657  data /= vec.data;
2658 # else
2659  data = _mm_div_ps(data, vec.data);
2660 # endif
2661  return *this;
2662  }
2663 
2669  DEAL_II_ALWAYS_INLINE
2670  void
2671  load(const float *ptr)
2672  {
2673  data = _mm_loadu_ps(ptr);
2674  }
2675 
2682  DEAL_II_ALWAYS_INLINE
2683  void
2684  store(float *ptr) const
2685  {
2686  _mm_storeu_ps(ptr, data);
2687  }
2688 
2692  DEAL_II_ALWAYS_INLINE
2693  void
2694  streaming_store(float *ptr) const
2695  {
2696  Assert(reinterpret_cast<std::size_t>(ptr) % 16 == 0,
2697  ExcMessage("Memory not aligned"));
2698  _mm_stream_ps(ptr, data);
2699  }
2700 
2713  DEAL_II_ALWAYS_INLINE
2714  void
2715  gather(const float *base_ptr, const unsigned int *offsets)
2716  {
2717  for (unsigned int i = 0; i < 4; ++i)
2718  *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
2719  }
2720 
2733  DEAL_II_ALWAYS_INLINE
2734  void
2735  scatter(const unsigned int *offsets, float *base_ptr) const
2736  {
2737  for (unsigned int i = 0; i < 4; ++i)
2738  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
2739  }
2740 
2745  __m128 data;
2746 
2747 private:
2752  DEAL_II_ALWAYS_INLINE
2754  get_sqrt() const
2755  {
2756  VectorizedArray res;
2757  res.data = _mm_sqrt_ps(data);
2758  return res;
2759  }
2760 
2765  DEAL_II_ALWAYS_INLINE
2767  get_abs() const
2768  {
2769  // to compute the absolute value, perform bitwise andnot with -0. This
2770  // will leave all value and exponent bits unchanged but force the sign
2771  // value to +.
2772  __m128 mask = _mm_set1_ps(-0.f);
2773  VectorizedArray res;
2774  res.data = _mm_andnot_ps(mask, data);
2775  return res;
2776  }
2777 
2782  DEAL_II_ALWAYS_INLINE
2784  get_max(const VectorizedArray &other) const
2785  {
2786  VectorizedArray res;
2787  res.data = _mm_max_ps(data, other.data);
2788  return res;
2789  }
2790 
2795  DEAL_II_ALWAYS_INLINE
2797  get_min(const VectorizedArray &other) const
2798  {
2799  VectorizedArray res;
2800  res.data = _mm_min_ps(data, other.data);
2801  return res;
2802  }
2803 
2807  template <typename Number2>
2809  std::sqrt(const VectorizedArray<Number2> &);
2810  template <typename Number2>
2812  std::abs(const VectorizedArray<Number2> &);
2813  template <typename Number2>
2815  std::max(const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
2816  template <typename Number2>
2818  std::min(const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
2819 };
2820 
2821 
2822 
2826 template <>
2827 inline void
2828 vectorized_load_and_transpose(const unsigned int n_entries,
2829  const float * in,
2830  const unsigned int * offsets,
2832 {
2833  const unsigned int n_chunks = n_entries / 4;
2834  for (unsigned int i = 0; i < n_chunks; ++i)
2835  {
2836  __m128 u0 = _mm_loadu_ps(in + 4 * i + offsets[0]);
2837  __m128 u1 = _mm_loadu_ps(in + 4 * i + offsets[1]);
2838  __m128 u2 = _mm_loadu_ps(in + 4 * i + offsets[2]);
2839  __m128 u3 = _mm_loadu_ps(in + 4 * i + offsets[3]);
2840  __m128 v0 = _mm_shuffle_ps(u0, u1, 0x44);
2841  __m128 v1 = _mm_shuffle_ps(u0, u1, 0xee);
2842  __m128 v2 = _mm_shuffle_ps(u2, u3, 0x44);
2843  __m128 v3 = _mm_shuffle_ps(u2, u3, 0xee);
2844  out[4 * i + 0].data = _mm_shuffle_ps(v0, v2, 0x88);
2845  out[4 * i + 1].data = _mm_shuffle_ps(v0, v2, 0xdd);
2846  out[4 * i + 2].data = _mm_shuffle_ps(v1, v3, 0x88);
2847  out[4 * i + 3].data = _mm_shuffle_ps(v1, v3, 0xdd);
2848  }
2849  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2850  for (unsigned int v = 0; v < 4; ++v)
2851  out[i][v] = in[offsets[v] + i];
2852 }
2853 
2854 
2855 
2859 template <>
2860 inline void
2861 vectorized_transpose_and_store(const bool add_into,
2862  const unsigned int n_entries,
2863  const VectorizedArray<float> *in,
2864  const unsigned int * offsets,
2865  float * out)
2866 {
2867  const unsigned int n_chunks = n_entries / 4;
2868  for (unsigned int i = 0; i < n_chunks; ++i)
2869  {
2870  __m128 u0 = in[4 * i + 0].data;
2871  __m128 u1 = in[4 * i + 1].data;
2872  __m128 u2 = in[4 * i + 2].data;
2873  __m128 u3 = in[4 * i + 3].data;
2874  __m128 t0 = _mm_shuffle_ps(u0, u1, 0x44);
2875  __m128 t1 = _mm_shuffle_ps(u0, u1, 0xee);
2876  __m128 t2 = _mm_shuffle_ps(u2, u3, 0x44);
2877  __m128 t3 = _mm_shuffle_ps(u2, u3, 0xee);
2878  u0 = _mm_shuffle_ps(t0, t2, 0x88);
2879  u1 = _mm_shuffle_ps(t0, t2, 0xdd);
2880  u2 = _mm_shuffle_ps(t1, t3, 0x88);
2881  u3 = _mm_shuffle_ps(t1, t3, 0xdd);
2882 
2883  // Cannot use the same store instructions in both paths of the 'if'
2884  // because the compiler cannot know that there is no aliasing between
2885  // pointers
2886  if (add_into)
2887  {
2888  u0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), u0);
2889  _mm_storeu_ps(out + 4 * i + offsets[0], u0);
2890  u1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), u1);
2891  _mm_storeu_ps(out + 4 * i + offsets[1], u1);
2892  u2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), u2);
2893  _mm_storeu_ps(out + 4 * i + offsets[2], u2);
2894  u3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), u3);
2895  _mm_storeu_ps(out + 4 * i + offsets[3], u3);
2896  }
2897  else
2898  {
2899  _mm_storeu_ps(out + 4 * i + offsets[0], u0);
2900  _mm_storeu_ps(out + 4 * i + offsets[1], u1);
2901  _mm_storeu_ps(out + 4 * i + offsets[2], u2);
2902  _mm_storeu_ps(out + 4 * i + offsets[3], u3);
2903  }
2904  }
2905  if (add_into)
2906  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2907  for (unsigned int v = 0; v < 4; ++v)
2908  out[offsets[v] + i] += in[i][v];
2909  else
2910  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2911  for (unsigned int v = 0; v < 4; ++v)
2912  out[offsets[v] + i] = in[i][v];
2913 }
2914 
2915 
2916 
2917 #endif // if DEAL_II_COMPILER_VECTORIZATION_LEVEL > 0 && defined(__SSE2__)
2918 
2919 
2920 #if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 1 && defined(__ALTIVEC__) && \
2921  defined(__VSX__)
2922 
2923 template <>
2924 class VectorizedArray<double>
2925 {
2926 public:
2930  static const unsigned int n_array_elements = 2;
2931 
2935  DEAL_II_ALWAYS_INLINE
2936  VectorizedArray &
2937  operator=(const double x)
2938  {
2939  data = vec_splats(x);
2940  return *this;
2941  }
2942 
2946  DEAL_II_ALWAYS_INLINE
2947  double &operator[](const unsigned int comp)
2948  {
2949  AssertIndexRange(comp, 2);
2950  return *(reinterpret_cast<double *>(&data) + comp);
2951  }
2952 
2956  DEAL_II_ALWAYS_INLINE
2957  const double &operator[](const unsigned int comp) const
2958  {
2959  AssertIndexRange(comp, 2);
2960  return *(reinterpret_cast<const double *>(&data) + comp);
2961  }
2962 
2966  DEAL_II_ALWAYS_INLINE
2967  VectorizedArray &
2968  operator+=(const VectorizedArray &vec)
2969  {
2970  data = vec_add(data, vec.data);
2971  return *this;
2972  }
2973 
2977  DEAL_II_ALWAYS_INLINE
2978  VectorizedArray &
2979  operator-=(const VectorizedArray &vec)
2980  {
2981  data = vec_sub(data, vec.data);
2982  return *this;
2983  }
2984 
2988  DEAL_II_ALWAYS_INLINE
2989  VectorizedArray &
2990  operator*=(const VectorizedArray &vec)
2991  {
2992  data = vec_mul(data, vec.data);
2993  return *this;
2994  }
2995 
2999  DEAL_II_ALWAYS_INLINE
3000  VectorizedArray &
3001  operator/=(const VectorizedArray &vec)
3002  {
3003  data = vec_div(data, vec.data);
3004  return *this;
3005  }
3006 
3011  DEAL_II_ALWAYS_INLINE
3012  void
3013  load(const double *ptr)
3014  {
3015  data = vec_vsx_ld(0, ptr);
3016  }
3017 
3022  DEAL_II_ALWAYS_INLINE
3023  void
3024  store(double *ptr) const
3025  {
3026  vec_vsx_st(data, 0, ptr);
3027  }
3028 
3031  DEAL_II_ALWAYS_INLINE
3032  void
3033  streaming_store(double *ptr) const
3034  {
3035  store(ptr);
3036  }
3037 
3040  DEAL_II_ALWAYS_INLINE
3041  void
3042  gather(const double *base_ptr, const unsigned int *offsets)
3043  {
3044  for (unsigned int i = 0; i < 2; ++i)
3045  *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
3046  }
3047 
3050  DEAL_II_ALWAYS_INLINE
3051  void
3052  scatter(const unsigned int *offsets, double *base_ptr) const
3053  {
3054  for (unsigned int i = 0; i < 2; ++i)
3055  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
3056  }
3057 
3062  __vector double data;
3063 
3064 private:
3069  DEAL_II_ALWAYS_INLINE
3071  get_sqrt() const
3072  {
3073  VectorizedArray res;
3074  res.data = vec_sqrt(data);
3075  return res;
3076  }
3077 
3082  DEAL_II_ALWAYS_INLINE
3084  get_abs() const
3085  {
3086  VectorizedArray res;
3087  res.data = vec_abs(data);
3088  return res;
3089  }
3090 
3095  DEAL_II_ALWAYS_INLINE
3097  get_max(const VectorizedArray &other) const
3098  {
3099  VectorizedArray res;
3100  res.data = vec_max(data, other.data);
3101  return res;
3102  }
3103 
3108  DEAL_II_ALWAYS_INLINE
3110  get_min(const VectorizedArray &other) const
3111  {
3112  VectorizedArray res;
3113  res.data = vec_min(data, other.data);
3114  return res;
3115  }
3116 
3120  template <typename Number2>
3122  std::sqrt(const VectorizedArray<Number2> &);
3123  template <typename Number2>
3125  std::abs(const VectorizedArray<Number2> &);
3126  template <typename Number2>
3128  std::max(const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
3129  template <typename Number2>
3131  std::min(const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
3132 };
3133 
3134 
3135 
3136 template <>
3137 class VectorizedArray<float>
3138 {
3139 public:
3143  static const unsigned int n_array_elements = 4;
3144 
3148  DEAL_II_ALWAYS_INLINE
3149  VectorizedArray &
3150  operator=(const float x)
3151  {
3152  data = vec_splats(x);
3153  return *this;
3154  }
3155 
3159  DEAL_II_ALWAYS_INLINE
3160  float &operator[](const unsigned int comp)
3161  {
3162  AssertIndexRange(comp, 4);
3163  return *(reinterpret_cast<float *>(&data) + comp);
3164  }
3165 
3169  DEAL_II_ALWAYS_INLINE
3170  const float &operator[](const unsigned int comp) const
3171  {
3172  AssertIndexRange(comp, 4);
3173  return *(reinterpret_cast<const float *>(&data) + comp);
3174  }
3175 
3179  DEAL_II_ALWAYS_INLINE
3180  VectorizedArray &
3181  operator+=(const VectorizedArray &vec)
3182  {
3183  data = vec_add(data, vec.data);
3184  return *this;
3185  }
3186 
3190  DEAL_II_ALWAYS_INLINE
3191  VectorizedArray &
3192  operator-=(const VectorizedArray &vec)
3193  {
3194  data = vec_sub(data, vec.data);
3195  return *this;
3196  }
3197 
3201  DEAL_II_ALWAYS_INLINE
3202  VectorizedArray &
3203  operator*=(const VectorizedArray &vec)
3204  {
3205  data = vec_mul(data, vec.data);
3206  return *this;
3207  }
3208 
3212  DEAL_II_ALWAYS_INLINE
3213  VectorizedArray &
3214  operator/=(const VectorizedArray &vec)
3215  {
3216  data = vec_div(data, vec.data);
3217  return *this;
3218  }
3219 
3224  DEAL_II_ALWAYS_INLINE
3225  void
3226  load(const float *ptr)
3227  {
3228  data = vec_vsx_ld(0, ptr);
3229  }
3230 
3235  DEAL_II_ALWAYS_INLINE
3236  void
3237  store(float *ptr) const
3238  {
3239  vec_vsx_st(data, 0, ptr);
3240  }
3241 
3244  DEAL_II_ALWAYS_INLINE
3245  void
3246  streaming_store(float *ptr) const
3247  {
3248  store(ptr);
3249  }
3250 
3253  DEAL_II_ALWAYS_INLINE
3254  void
3255  gather(const float *base_ptr, const unsigned int *offsets)
3256  {
3257  for (unsigned int i = 0; i < 4; ++i)
3258  *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
3259  }
3260 
3263  DEAL_II_ALWAYS_INLINE
3264  void
3265  scatter(const unsigned int *offsets, float *base_ptr) const
3266  {
3267  for (unsigned int i = 0; i < 4; ++i)
3268  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
3269  }
3270 
3275  __vector float data;
3276 
3277 private:
3282  DEAL_II_ALWAYS_INLINE
3284  get_sqrt() const
3285  {
3286  VectorizedArray res;
3287  res.data = vec_sqrt(data);
3288  return res;
3289  }
3290 
3295  DEAL_II_ALWAYS_INLINE
3297  get_abs() const
3298  {
3299  VectorizedArray res;
3300  res.data = vec_abs(data);
3301  return res;
3302  }
3303 
3308  DEAL_II_ALWAYS_INLINE
3310  get_max(const VectorizedArray &other) const
3311  {
3312  VectorizedArray res;
3313  res.data = vec_max(data, other.data);
3314  return res;
3315  }
3316 
3321  DEAL_II_ALWAYS_INLINE
3323  get_min(const VectorizedArray &other) const
3324  {
3325  VectorizedArray res;
3326  res.data = vec_min(data, other.data);
3327  return res;
3328  }
3329 
3333  template <typename Number2>
3335  std::sqrt(const VectorizedArray<Number2> &);
3336  template <typename Number2>
3338  std::abs(const VectorizedArray<Number2> &);
3339  template <typename Number2>
3341  std::max(const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
3342  template <typename Number2>
3344  std::min(const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
3345 };
3346 
3347 #endif // if DEAL_II_VECTORIZATION_LEVEL >=1 && defined(__ALTIVEC__) &&
3348  // defined(__VSX__)
3349 
3350 
3351 
3357 template <typename Number>
3358 inline DEAL_II_ALWAYS_INLINE bool
3360  const VectorizedArray<Number> &rhs)
3361 {
3362  for (unsigned int i = 0; i < VectorizedArray<Number>::n_array_elements; ++i)
3363  if (lhs[i] != rhs[i])
3364  return false;
3365 
3366  return true;
3367 }
3368 
3369 
3375 template <typename Number>
3376 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number>
3378 {
3379  VectorizedArray<Number> tmp = u;
3380  return tmp += v;
3381 }
3382 
3388 template <typename Number>
3389 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number>
3391 {
3392  VectorizedArray<Number> tmp = u;
3393  return tmp -= v;
3394 }
3395 
3401 template <typename Number>
3402 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number>
3404 {
3405  VectorizedArray<Number> tmp = u;
3406  return tmp *= v;
3407 }
3408 
3414 template <typename Number>
3415 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number>
3417 {
3418  VectorizedArray<Number> tmp = u;
3419  return tmp /= v;
3420 }
3421 
3428 template <typename Number>
3429 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number>
3430  operator+(const Number &u, const VectorizedArray<Number> &v)
3431 {
3433  tmp = u;
3434  return tmp += v;
3435 }
3436 
3445 inline DEAL_II_ALWAYS_INLINE VectorizedArray<float>
3446  operator+(const double u, const VectorizedArray<float> &v)
3447 {
3449  tmp = u;
3450  return tmp += v;
3451 }
3452 
3459 template <typename Number>
3460 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number>
3461  operator+(const VectorizedArray<Number> &v, const Number &u)
3462 {
3463  return u + v;
3464 }
3465 
3474 inline DEAL_II_ALWAYS_INLINE VectorizedArray<float>
3475  operator+(const VectorizedArray<float> &v, const double u)
3476 {
3477  return u + v;
3478 }
3479 
3486 template <typename Number>
3487 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number>
3488  operator-(const Number &u, const VectorizedArray<Number> &v)
3489 {
3491  tmp = u;
3492  return tmp -= v;
3493 }
3494 
3503 inline DEAL_II_ALWAYS_INLINE VectorizedArray<float>
3504  operator-(const double u, const VectorizedArray<float> &v)
3505 {
3507  tmp = float(u);
3508  return tmp -= v;
3509 }
3510 
3517 template <typename Number>
3518 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number>
3519  operator-(const VectorizedArray<Number> &v, const Number &u)
3520 {
3522  tmp = u;
3523  return v - tmp;
3524 }
3525 
3534 inline DEAL_II_ALWAYS_INLINE VectorizedArray<float>
3535  operator-(const VectorizedArray<float> &v, const double u)
3536 {
3538  tmp = float(u);
3539  return v - tmp;
3540 }
3541 
3548 template <typename Number>
3549 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number>
3550  operator*(const Number &u, const VectorizedArray<Number> &v)
3551 {
3553  tmp = u;
3554  return tmp *= v;
3555 }
3556 
3565 inline DEAL_II_ALWAYS_INLINE VectorizedArray<float>
3566  operator*(const double u, const VectorizedArray<float> &v)
3567 {
3569  tmp = float(u);
3570  return tmp *= v;
3571 }
3572 
3579 template <typename Number>
3580 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number>
3581  operator*(const VectorizedArray<Number> &v, const Number &u)
3582 {
3583  return u * v;
3584 }
3585 
3594 inline DEAL_II_ALWAYS_INLINE VectorizedArray<float>
3595  operator*(const VectorizedArray<float> &v, const double u)
3596 {
3597  return u * v;
3598 }
3599 
3606 template <typename Number>
3607 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number>
3608  operator/(const Number &u, const VectorizedArray<Number> &v)
3609 {
3611  tmp = u;
3612  return tmp /= v;
3613 }
3614 
3623 inline DEAL_II_ALWAYS_INLINE VectorizedArray<float>
3624  operator/(const double u, const VectorizedArray<float> &v)
3625 {
3627  tmp = float(u);
3628  return tmp /= v;
3629 }
3630 
3637 template <typename Number>
3638 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number>
3639  operator/(const VectorizedArray<Number> &v, const Number &u)
3640 {
3642  tmp = u;
3643  return v / tmp;
3644 }
3645 
3654 inline DEAL_II_ALWAYS_INLINE VectorizedArray<float>
3655  operator/(const VectorizedArray<float> &v, const double u)
3656 {
3658  tmp = float(u);
3659  return v / tmp;
3660 }
3661 
3667 template <typename Number>
3668 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number>
3670 {
3671  return u;
3672 }
3673 
3679 template <typename Number>
3680 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number>
3682 {
3683  // to get a negative sign, subtract the input from zero (could also
3684  // multiply by -1, but this one is slightly simpler)
3685  return VectorizedArray<Number>() - u;
3686 }
3687 
3688 
3689 DEAL_II_NAMESPACE_CLOSE
3690 
3691 
3698 namespace std
3699 {
3707  template <typename Number>
3708  inline ::VectorizedArray<Number>
3709  sin(const ::VectorizedArray<Number> &x)
3710  {
3711  // put values in an array and later read in that array with an unaligned
3712  // read. This should save some instructions as compared to directly
3713  // setting the individual elements and also circumvents a compiler
3714  // optimization bug in gcc-4.6 with SSE2 (see also deal.II developers list
3715  // from April 2014, topic "matrix_free/step-48 Test").
3717  for (unsigned int i = 0;
3718  i < ::VectorizedArray<Number>::n_array_elements;
3719  ++i)
3720  values[i] = std::sin(x[i]);
3722  out.load(&values[0]);
3723  return out;
3724  }
3725 
3726 
3727 
3735  template <typename Number>
3736  inline ::VectorizedArray<Number>
3737  cos(const ::VectorizedArray<Number> &x)
3738  {
3740  for (unsigned int i = 0;
3741  i < ::VectorizedArray<Number>::n_array_elements;
3742  ++i)
3743  values[i] = std::cos(x[i]);
3745  out.load(&values[0]);
3746  return out;
3747  }
3748 
3749 
3750 
3758  template <typename Number>
3759  inline ::VectorizedArray<Number>
3760  tan(const ::VectorizedArray<Number> &x)
3761  {
3763  for (unsigned int i = 0;
3764  i < ::VectorizedArray<Number>::n_array_elements;
3765  ++i)
3766  values[i] = std::tan(x[i]);
3768  out.load(&values[0]);
3769  return out;
3770  }
3771 
3772 
3773 
3781  template <typename Number>
3782  inline ::VectorizedArray<Number>
3783  exp(const ::VectorizedArray<Number> &x)
3784  {
3786  for (unsigned int i = 0;
3787  i < ::VectorizedArray<Number>::n_array_elements;
3788  ++i)
3789  values[i] = std::exp(x[i]);
3791  out.load(&values[0]);
3792  return out;
3793  }
3794 
3795 
3796 
3804  template <typename Number>
3805  inline ::VectorizedArray<Number>
3806  log(const ::VectorizedArray<Number> &x)
3807  {
3809  for (unsigned int i = 0;
3810  i < ::VectorizedArray<Number>::n_array_elements;
3811  ++i)
3812  values[i] = std::log(x[i]);
3814  out.load(&values[0]);
3815  return out;
3816  }
3817 
3818 
3819 
3827  template <typename Number>
3828  inline ::VectorizedArray<Number>
3829  sqrt(const ::VectorizedArray<Number> &x)
3830  {
3831  return x.get_sqrt();
3832  }
3833 
3834 
3835 
3843  template <typename Number>
3844  inline ::VectorizedArray<Number>
3845  pow(const ::VectorizedArray<Number> &x, const Number p)
3846  {
3848  for (unsigned int i = 0;
3849  i < ::VectorizedArray<Number>::n_array_elements;
3850  ++i)
3851  values[i] = std::pow(x[i], p);
3853  out.load(&values[0]);
3854  return out;
3855  }
3856 
3857 
3858 
3866  template <typename Number>
3867  inline ::VectorizedArray<Number>
3868  abs(const ::VectorizedArray<Number> &x)
3869  {
3870  return x.get_abs();
3871  }
3872 
3873 
3874 
3882  template <typename Number>
3883  inline ::VectorizedArray<Number>
3884  max(const ::VectorizedArray<Number> &x,
3885  const ::VectorizedArray<Number> &y)
3886  {
3887  return x.get_max(y);
3888  }
3889 
3890 
3891 
3899  template <typename Number>
3900  inline ::VectorizedArray<Number>
3901  min(const ::VectorizedArray<Number> &x,
3902  const ::VectorizedArray<Number> &y)
3903  {
3904  return x.get_min(y);
3905  }
3906 
3907 } // namespace std
3908 
3909 #endif
VectorizedArray< Number > operator/(const VectorizedArray< Number > &u, const VectorizedArray< Number > &v)
VectorizedArray< Number > operator+(const VectorizedArray< Number > &u, const VectorizedArray< Number > &v)
void store(Number *ptr) const
VectorizedArray< Number > operator-(const VectorizedArray< Number > &u, const VectorizedArray< Number > &v)
VectorizedArray< Number > log(const ::VectorizedArray< Number > &x)
VectorizedArray< Number > operator*(const VectorizedArray< Number > &u, const VectorizedArray< Number > &v)
VectorizedArray get_max(const VectorizedArray &other) const
VectorizedArray< Number > operator-(const Number &u, const VectorizedArray< Number > &v)
VectorizedArray< Number > operator/(const Number &u, const VectorizedArray< Number > &v)
VectorizedArray< Number > make_vectorized_array(const Number &u)
VectorizedArray< Number > tan(const ::VectorizedArray< Number > &x)
const Number & operator[](const unsigned int comp) const
#define AssertIndexRange(index, range)
Definition: exceptions.h:1637
STL namespace.
VectorizedArray< Number > exp(const ::VectorizedArray< Number > &x)
bool operator==(const VectorizedArray< Number > &lhs, const VectorizedArray< Number > &rhs)
VectorizedArray< float > operator/(const double u, const VectorizedArray< float > &v)
__global__ void vec_add(Number *val, const Number a, const size_type N)
VectorizedArray< float > operator+(const VectorizedArray< float > &v, const double u)
void scatter(const unsigned int *offsets, Number *base_ptr) const
VectorizedArray< float > operator+(const double u, const VectorizedArray< float > &v)
VectorizedArray get_abs() const
VectorizedArray & operator+=(const VectorizedArray< Number > &vec)
void vectorized_transpose_and_store(const bool add_into, const unsigned int n_entries, const VectorizedArray< Number > *in, const unsigned int *offsets, Number *out)
VectorizedArray< float > operator-(const double u, const VectorizedArray< float > &v)
static ::ExceptionBase & ExcMessage(std::string arg1)
Number & operator[](const unsigned int comp)
VectorizedArray< Number > operator+(const VectorizedArray< Number > &v, const Number &u)
static const unsigned int n_array_elements
VectorizedArray< Number > min(const ::VectorizedArray< Number > &x, const ::VectorizedArray< Number > &y)
VectorizedArray< Number > operator-(const VectorizedArray< Number > &u)
#define Assert(cond, exc)
Definition: exceptions.h:1407
VectorizedArray get_sqrt() const
void streaming_store(Number *ptr) const
VectorizedArray< Number > pow(const ::VectorizedArray< Number > &x, const Number p)
VectorizedArray get_min(const VectorizedArray &other) const
VectorizedArray< Number > operator+(const VectorizedArray< Number > &u)
void vectorized_load_and_transpose(const unsigned int n_entries, const Number *in, const unsigned int *offsets, VectorizedArray< Number > *out)
VectorizedArray< Number > operator+(const Number &u, const VectorizedArray< Number > &v)
VectorizedArray & operator-=(const VectorizedArray< Number > &vec)
VectorizedArray< float > operator-(const VectorizedArray< float > &v, const double u)
VectorizedArray< Number > operator-(const VectorizedArray< Number > &v, const Number &u)
VectorizedArray< float > operator/(const VectorizedArray< float > &v, const double u)
VectorizedArray< float > operator*(const VectorizedArray< float > &v, const double u)
VectorizedArray< Number > sqrt(const ::VectorizedArray< Number > &x)
VectorizedArray< Number > sin(const ::VectorizedArray< Number > &x)
VectorizedArray< Number > operator/(const VectorizedArray< Number > &v, const Number &u)
VectorizedArray & operator/=(const VectorizedArray< Number > &vec)
void load(const Number *ptr)
VectorizedArray< Number > operator*(const Number &u, const VectorizedArray< Number > &v)
VectorizedArray< float > operator*(const double u, const VectorizedArray< float > &v)
VectorizedArray< Number > operator*(const VectorizedArray< Number > &v, const Number &u)
void gather(const Number *base_ptr, const unsigned int *offsets)
VectorizedArray & operator*=(const VectorizedArray< Number > &vec)
VectorizedArray & operator=(const Number scalar)
VectorizedArray< Number > abs(const ::VectorizedArray< Number > &x)
VectorizedArray< Number > max(const ::VectorizedArray< Number > &x, const ::VectorizedArray< Number > &y)
VectorizedArray< Number > cos(const ::VectorizedArray< Number > &x)