Reference documentation for deal.II version 8.5.1
vectorization.h
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2011 - 2017 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE at
12 // the top level of the deal.II distribution.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #ifndef dealii__vectorization_h
18 #define dealii__vectorization_h
19 
20 #include <deal.II/base/config.h>
21 #include <deal.II/base/exceptions.h>
22 #include <deal.II/base/template_constraints.h>
23 
24 #include <cmath>
25 
26 // Note:
27 // The flag DEAL_II_COMPILER_VECTORIZATION_LEVEL is essentially constructed
28 // according to the following scheme
29 // #ifdef __AVX512F__
30 // #define DEAL_II_COMPILER_VECTORIZATION_LEVEL 3
31 // #elif defined (__AVX__)
32 // #define DEAL_II_COMPILER_VECTORIZATION_LEVEL 2
33 // #elif defined (__SSE2__)
34 // #define DEAL_II_COMPILER_VECTORIZATION_LEVEL 1
35 // #else
36 // #define DEAL_II_COMPILER_VECTORIZATION_LEVEL 0
37 // #endif
38 // In addition to checking the flags __AVX__ and __SSE2__, a CMake test,
39 // 'check_01_cpu_features.cmake', ensures that these feature are not only
40 // present in the compilation unit but also working properly.
41 
42 #if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 2 // AVX, AVX-512
43 #include <immintrin.h>
44 #elif DEAL_II_COMPILER_VECTORIZATION_LEVEL == 1 // SSE2
45 #include <emmintrin.h>
46 #endif
47 
48 
49 DEAL_II_NAMESPACE_OPEN
50 
51 
52 namespace internal
53 {
64  template <typename T>
66  {
67  static VectorizedArray<T> value (const T &t)
68  {
70  tmp=t;
71  return tmp;
72  }
73  };
74 }
75 
76 
77 // Enable the EnableIfScalar type trait for VectorizedArray<Number> such
78 // that it can be used as a Number type in Tensor<rank,dim,Number>, etc.
79 
80 template<typename Number>
81 struct EnableIfScalar<VectorizedArray<Number> >
82 {
84 };
85 
86 
87 
88 #ifndef DEAL_II_WITH_CXX11
89 // Specify the types for the implemented multiplications explicitly
90 
91 template <typename Number>
92 struct ProductType<Number, VectorizedArray<Number> >
93 {
94  typedef VectorizedArray<Number> type;
95 };
96 
97 template <typename Number>
98 struct ProductType<VectorizedArray<Number>, Number>
99 {
100  typedef VectorizedArray<Number> type;
101 };
102 
103 // In contrast to scalar types for which the product of a float and a double
104 // variable would be a double variable, the implemented type here really is
105 // VectorizedArray<float>. Since VectorizedArray<double> is only half as
106 // wide as VectorizedArray<float>, we would have to throw away half of the
107 // vector otherwise.
108 template<>
109 struct ProductType<double, VectorizedArray<float> >
110 {
111  typedef VectorizedArray<float> type;
112 };
113 
114 template<>
115 struct ProductType<VectorizedArray<float>, double>
116 {
117  typedef VectorizedArray<float> type;
118 };
119 #endif
120 
121 
122 
173 template <typename Number>
174 class VectorizedArray
175 {
176 public:
180  static const unsigned int n_array_elements = 1;
181 
182  // POD means that there should be no user-defined constructors, destructors
183  // and copy functions (the standard is somewhat relaxed in C++2011, though).
184 
188  DEAL_II_ALWAYS_INLINE
190  operator = (const Number scalar)
191  {
192  data = scalar;
193  return *this;
194  }
195 
199  DEAL_II_ALWAYS_INLINE
200  Number &
201  operator [] (const unsigned int comp)
202  {
203  (void)comp;
204  AssertIndexRange (comp, 1);
205  return data;
206  }
207 
211  DEAL_II_ALWAYS_INLINE
212  const Number &
213  operator [] (const unsigned int comp) const
214  {
215  (void)comp;
216  AssertIndexRange (comp, 1);
217  return data;
218  }
219 
223  DEAL_II_ALWAYS_INLINE
226  {
227  data+=vec.data;
228  return *this;
229  }
230 
234  DEAL_II_ALWAYS_INLINE
237  {
238  data-=vec.data;
239  return *this;
240  }
241 
245  DEAL_II_ALWAYS_INLINE
248  {
249  data*=vec.data;
250  return *this;
251  }
252 
256  DEAL_II_ALWAYS_INLINE
259  {
260  data/=vec.data;
261  return *this;
262  }
263 
270  DEAL_II_ALWAYS_INLINE
271  void load (const Number *ptr)
272  {
273  data = *ptr;
274  }
275 
282  DEAL_II_ALWAYS_INLINE
283  void store (Number *ptr) const
284  {
285  *ptr = data;
286  }
287 
300  DEAL_II_ALWAYS_INLINE
301  void gather (const Number *base_ptr,
302  const unsigned int *offsets)
303  {
304  data = base_ptr[offsets[0]];
305  }
306 
319  DEAL_II_ALWAYS_INLINE
320  void scatter (const unsigned int *offsets,
321  Number *base_ptr) const
322  {
323  base_ptr[offsets[0]] = data;
324  }
325 
330  Number data;
331 
332 private:
337  DEAL_II_ALWAYS_INLINE
339  get_sqrt () const
340  {
341  VectorizedArray res;
342  res.data = std::sqrt(data);
343  return res;
344  }
345 
350  DEAL_II_ALWAYS_INLINE
352  get_abs () const
353  {
354  VectorizedArray res;
355  res.data = std::fabs(data);
356  return res;
357  }
358 
363  DEAL_II_ALWAYS_INLINE
365  get_max (const VectorizedArray &other) const
366  {
367  VectorizedArray res;
368  res.data = std::max (data, other.data);
369  return res;
370  }
371 
376  DEAL_II_ALWAYS_INLINE
378  get_min (const VectorizedArray &other) const
379  {
380  VectorizedArray res;
381  res.data = std::min (data, other.data);
382  return res;
383  }
384 
388  template <typename Number2> friend VectorizedArray<Number2>
389  std::sqrt (const VectorizedArray<Number2> &);
390  template <typename Number2> friend VectorizedArray<Number2>
391  std::abs (const VectorizedArray<Number2> &);
392  template <typename Number2> friend VectorizedArray<Number2>
393  std::max (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
394  template <typename Number2> friend VectorizedArray<Number2>
395  std::min (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
396 };
397 
398 
399 
406 template <typename Number>
407 inline DEAL_II_ALWAYS_INLINE
409 make_vectorized_array (const Number &u)
410 {
412  result = u;
413  return result;
414 }
415 
416 
417 
443 template <typename Number>
444 inline
445 void
446 vectorized_load_and_transpose(const unsigned int n_entries,
447  const Number *in,
448  const unsigned int *offsets,
450 {
451  for (unsigned int i=0; i<n_entries; ++i)
452  for (unsigned int v=0; v<VectorizedArray<Number>::n_array_elements; ++v)
453  out[i][v] = in[offsets[v]+i];
454 }
455 
456 
457 
496 template <typename Number>
497 inline
498 void
499 vectorized_transpose_and_store(const bool add_into,
500  const unsigned int n_entries,
501  const VectorizedArray<Number> *in,
502  const unsigned int *offsets,
503  Number *out)
504 {
505  if (add_into)
506  for (unsigned int i=0; i<n_entries; ++i)
507  for (unsigned int v=0; v<VectorizedArray<Number>::n_array_elements; ++v)
508  out[offsets[v]+i] += in[i][v];
509  else
510  for (unsigned int i=0; i<n_entries; ++i)
511  for (unsigned int v=0; v<VectorizedArray<Number>::n_array_elements; ++v)
512  out[offsets[v]+i] = in[i][v];
513 }
514 
515 
516 
517 // for safety, also check that __AVX512F__ is defined in case the user manually
518 // set some conflicting compile flags which prevent compilation
519 
520 #if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 3 && defined(__AVX512F__)
521 
525 template <>
526 class VectorizedArray<double>
527 {
528 public:
532  static const unsigned int n_array_elements = 8;
533 
537  DEAL_II_ALWAYS_INLINE
539  operator = (const double x)
540  {
541  data = _mm512_set1_pd(x);
542  return *this;
543  }
544 
548  DEAL_II_ALWAYS_INLINE
549  double &
550  operator [] (const unsigned int comp)
551  {
552  AssertIndexRange (comp, 8);
553  return *(reinterpret_cast<double *>(&data)+comp);
554  }
555 
559  DEAL_II_ALWAYS_INLINE
560  const double &
561  operator [] (const unsigned int comp) const
562  {
563  AssertIndexRange (comp, 8);
564  return *(reinterpret_cast<const double *>(&data)+comp);
565  }
566 
570  DEAL_II_ALWAYS_INLINE
572  operator += (const VectorizedArray &vec)
573  {
574  // if the compiler supports vector arithmetics, we can simply use +=
575  // operator on the given data type. this allows the compiler to combine
576  // additions with multiplication (fused multiply-add) if those
577  // instructions are available. Otherwise, we need to use the built-in
578  // intrinsic command for __m512d
579 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
580  data += vec.data;
581 #else
582  data = _mm512_add_pd(data,vec.data);
583 #endif
584  return *this;
585  }
586 
590  DEAL_II_ALWAYS_INLINE
592  operator -= (const VectorizedArray &vec)
593  {
594 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
595  data -= vec.data;
596 #else
597  data = _mm512_sub_pd(data,vec.data);
598 #endif
599  return *this;
600  }
604  DEAL_II_ALWAYS_INLINE
606  operator *= (const VectorizedArray &vec)
607  {
608 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
609  data *= vec.data;
610 #else
611  data = _mm512_mul_pd(data,vec.data);
612 #endif
613  return *this;
614  }
615 
619  DEAL_II_ALWAYS_INLINE
621  operator /= (const VectorizedArray &vec)
622  {
623 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
624  data /= vec.data;
625 #else
626  data = _mm512_div_pd(data,vec.data);
627 #endif
628  return *this;
629  }
630 
636  DEAL_II_ALWAYS_INLINE
637  void load (const double *ptr)
638  {
639  data = _mm512_loadu_pd (ptr);
640  }
641 
648  DEAL_II_ALWAYS_INLINE
649  void store (double *ptr) const
650  {
651  _mm512_storeu_pd (ptr, data);
652  }
653 
666  DEAL_II_ALWAYS_INLINE
667  void gather (const double *base_ptr,
668  const unsigned int *offsets)
669  {
670  // unfortunately, there does not appear to be a 256 bit integer load, so
671  // do it by some reinterpret casts here. this is allowed because the Intel
672  // API allows aliasing between different vector types.
673  const __m256 index_val = _mm256_loadu_ps((const float *)offsets);
674  const __m256i index = *((__m256i *)(&index_val));
675  data = _mm512_i32gather_pd(index, base_ptr, 8);
676  }
677 
690  DEAL_II_ALWAYS_INLINE
691  void scatter (const unsigned int *offsets,
692  double *base_ptr) const
693  {
694  for (unsigned int i=0; i<8; ++i)
695  for (unsigned int j=i+1; j<8; ++j)
696  Assert(offsets[i] != offsets[j],
697  ExcMessage("Result of scatter undefined if two offset elements"
698  " point to the same position"));
699 
700  // unfortunately, there does not appear to be a 256 bit integer load, so
701  // do it by some reinterpret casts here. this is allowed because the Intel
702  // API allows aliasing between different vector types.
703  const __m256 index_val = _mm256_loadu_ps((const float *)offsets);
704  const __m256i index = *((__m256i *)(&index_val));
705  _mm512_i32scatter_pd(base_ptr, index, data, 8);
706  }
707 
712  __m512d data;
713 
714 private:
719  DEAL_II_ALWAYS_INLINE
721  get_sqrt () const
722  {
723  VectorizedArray res;
724  res.data = _mm512_sqrt_pd(data);
725  return res;
726  }
727 
732  DEAL_II_ALWAYS_INLINE
734  get_abs () const
735  {
736  // to compute the absolute value, perform bitwise andnot with -0. This
737  // will leave all value and exponent bits unchanged but force the sign
738  // value to +. Since there is no andnot for AVX512, we interpret the data
739  // as 64 bit integers and do the andnot on those types (note that andnot
740  // is a bitwise operation so the data type does not matter)
741  __m512d mask = _mm512_set1_pd (-0.);
742  VectorizedArray res;
743  res.data = (__m512d)_mm512_andnot_epi64 ((__m512i)mask, (__m512i)data);
744  return res;
745  }
746 
751  DEAL_II_ALWAYS_INLINE
753  get_max (const VectorizedArray &other) const
754  {
755  VectorizedArray res;
756  res.data = _mm512_max_pd (data, other.data);
757  return res;
758  }
759 
764  DEAL_II_ALWAYS_INLINE
766  get_min (const VectorizedArray &other) const
767  {
768  VectorizedArray res;
769  res.data = _mm512_min_pd (data, other.data);
770  return res;
771  }
772 
776  template <typename Number2> friend VectorizedArray<Number2>
777  std::sqrt (const VectorizedArray<Number2> &);
778  template <typename Number2> friend VectorizedArray<Number2>
779  std::abs (const VectorizedArray<Number2> &);
780  template <typename Number2> friend VectorizedArray<Number2>
781  std::max (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
782  template <typename Number2> friend VectorizedArray<Number2>
783  std::min (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
784 };
785 
786 
787 
791 template <>
792 inline
793 void
794 vectorized_load_and_transpose(const unsigned int n_entries,
795  const double *in,
796  const unsigned int *offsets,
798 {
799  const unsigned int n_chunks = n_entries/4;
800  for (unsigned int outer=0; outer<8; outer += 4)
801  {
802  const double *in0 = in + offsets[0+outer];
803  const double *in1 = in + offsets[1+outer];
804  const double *in2 = in + offsets[2+outer];
805  const double *in3 = in + offsets[3+outer];
806 
807  for (unsigned int i=0; i<n_chunks; ++i)
808  {
809  __m256d u0 = _mm256_loadu_pd(in0+4*i);
810  __m256d u1 = _mm256_loadu_pd(in1+4*i);
811  __m256d u2 = _mm256_loadu_pd(in2+4*i);
812  __m256d u3 = _mm256_loadu_pd(in3+4*i);
813  __m256d t0 = _mm256_permute2f128_pd (u0, u2, 0x20);
814  __m256d t1 = _mm256_permute2f128_pd (u1, u3, 0x20);
815  __m256d t2 = _mm256_permute2f128_pd (u0, u2, 0x31);
816  __m256d t3 = _mm256_permute2f128_pd (u1, u3, 0x31);
817  *(__m256d *)((double *)(&out[4*i+0].data)+outer) = _mm256_unpacklo_pd (t0, t1);
818  *(__m256d *)((double *)(&out[4*i+1].data)+outer) = _mm256_unpackhi_pd (t0, t1);
819  *(__m256d *)((double *)(&out[4*i+2].data)+outer) = _mm256_unpacklo_pd (t2, t3);
820  *(__m256d *)((double *)(&out[4*i+3].data)+outer) = _mm256_unpackhi_pd (t2, t3);
821  }
822  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
823  for (unsigned int v=0; v<4; ++v)
824  out[i][outer+v] = in[offsets[v+outer]+i];
825  }
826 }
827 
828 
829 
833 template <>
834 inline
835 void
836 vectorized_transpose_and_store(const bool add_into,
837  const unsigned int n_entries,
838  const VectorizedArray<double> *in,
839  const unsigned int *offsets,
840  double *out)
841 {
842  const unsigned int n_chunks = n_entries/4;
843  // do not do full transpose because the code is too long and will most
844  // likely not pay off. rather do the transposition on the vectorized array
845  // on size smaller, mm256d
846  for (unsigned int outer=0; outer<8; outer += 4)
847  {
848  double *out0 = out + offsets[0+outer];
849  double *out1 = out + offsets[1+outer];
850  double *out2 = out + offsets[2+outer];
851  double *out3 = out + offsets[3+outer];
852  for (unsigned int i=0; i<n_chunks; ++i)
853  {
854  __m256d u0 = *(const __m256d *)((const double *)(&in[4*i+0].data)+outer);
855  __m256d u1 = *(const __m256d *)((const double *)(&in[4*i+1].data)+outer);
856  __m256d u2 = *(const __m256d *)((const double *)(&in[4*i+2].data)+outer);
857  __m256d u3 = *(const __m256d *)((const double *)(&in[4*i+3].data)+outer);
858  __m256d t0 = _mm256_permute2f128_pd (u0, u2, 0x20);
859  __m256d t1 = _mm256_permute2f128_pd (u1, u3, 0x20);
860  __m256d t2 = _mm256_permute2f128_pd (u0, u2, 0x31);
861  __m256d t3 = _mm256_permute2f128_pd (u1, u3, 0x31);
862  __m256d res0 = _mm256_unpacklo_pd (t0, t1);
863  __m256d res1 = _mm256_unpackhi_pd (t0, t1);
864  __m256d res2 = _mm256_unpacklo_pd (t2, t3);
865  __m256d res3 = _mm256_unpackhi_pd (t2, t3);
866 
867  // Cannot use the same store instructions in both paths of the 'if'
868  // because the compiler cannot know that there is no aliasing between
869  // pointers
870  if (add_into)
871  {
872  res0 = _mm256_add_pd(_mm256_loadu_pd(out0+4*i), res0);
873  _mm256_storeu_pd(out0+4*i, res0);
874  res1 = _mm256_add_pd(_mm256_loadu_pd(out1+4*i), res1);
875  _mm256_storeu_pd(out1+4*i, res1);
876  res2 = _mm256_add_pd(_mm256_loadu_pd(out2+4*i), res2);
877  _mm256_storeu_pd(out2+4*i, res2);
878  res3 = _mm256_add_pd(_mm256_loadu_pd(out3+4*i), res3);
879  _mm256_storeu_pd(out3+4*i, res3);
880  }
881  else
882  {
883  _mm256_storeu_pd(out0+4*i, res0);
884  _mm256_storeu_pd(out1+4*i, res1);
885  _mm256_storeu_pd(out2+4*i, res2);
886  _mm256_storeu_pd(out3+4*i, res3);
887  }
888  }
889  if (add_into)
890  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
891  for (unsigned int v=0; v<4; ++v)
892  out[offsets[v+outer]+i] += in[i][v+outer];
893  else
894  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
895  for (unsigned int v=0; v<4; ++v)
896  out[offsets[v+outer]+i] = in[i][v+outer];
897  }
898 }
899 
900 
901 
905 template<>
906 class VectorizedArray<float>
907 {
908 public:
912  static const unsigned int n_array_elements = 16;
913 
917  DEAL_II_ALWAYS_INLINE
919  operator = (const float x)
920  {
921  data = _mm512_set1_ps(x);
922  return *this;
923  }
924 
928  DEAL_II_ALWAYS_INLINE
929  float &
930  operator [] (const unsigned int comp)
931  {
932  AssertIndexRange (comp, 16);
933  return *(reinterpret_cast<float *>(&data)+comp);
934  }
935 
939  DEAL_II_ALWAYS_INLINE
940  const float &
941  operator [] (const unsigned int comp) const
942  {
943  AssertIndexRange (comp, 16);
944  return *(reinterpret_cast<const float *>(&data)+comp);
945  }
946 
950  DEAL_II_ALWAYS_INLINE
952  operator += (const VectorizedArray &vec)
953  {
954  // if the compiler supports vector arithmetics, we can simply use +=
955  // operator on the given data type. this allows the compiler to combine
956  // additions with multiplication (fused multiply-add) if those
957  // instructions are available. Otherwise, we need to use the built-in
958  // intrinsic command for __m512d
959 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
960  data += vec.data;
961 #else
962  data = _mm512_add_ps(data,vec.data);
963 #endif
964  return *this;
965  }
966 
970  DEAL_II_ALWAYS_INLINE
972  operator -= (const VectorizedArray &vec)
973  {
974 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
975  data -= vec.data;
976 #else
977  data = _mm512_sub_ps(data,vec.data);
978 #endif
979  return *this;
980  }
984  DEAL_II_ALWAYS_INLINE
986  operator *= (const VectorizedArray &vec)
987  {
988 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
989  data *= vec.data;
990 #else
991  data = _mm512_mul_ps(data,vec.data);
992 #endif
993  return *this;
994  }
995 
999  DEAL_II_ALWAYS_INLINE
1000  VectorizedArray &
1001  operator /= (const VectorizedArray &vec)
1002  {
1003 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1004  data /= vec.data;
1005 #else
1006  data = _mm512_div_ps(data,vec.data);
1007 #endif
1008  return *this;
1009  }
1010 
1016  DEAL_II_ALWAYS_INLINE
1017  void load (const float *ptr)
1018  {
1019  data = _mm512_loadu_ps (ptr);
1020  }
1021 
1028  DEAL_II_ALWAYS_INLINE
1029  void store (float *ptr) const
1030  {
1031  _mm512_storeu_ps (ptr, data);
1032  }
1033 
1046  DEAL_II_ALWAYS_INLINE
1047  void gather (const float *base_ptr,
1048  const unsigned int *offsets)
1049  {
1050  // unfortunately, there does not appear to be a 512 bit integer load, so
1051  // do it by some reinterpret casts here. this is allowed because the Intel
1052  // API allows aliasing between different vector types.
1053  const __m512 index_val = _mm512_loadu_ps((const float *)offsets);
1054  const __m512i index = *((__m512i *)(&index_val));
1055  data = _mm512_i32gather_ps(index, base_ptr, 4);
1056  }
1057 
1070  DEAL_II_ALWAYS_INLINE
1071  void scatter (const unsigned int *offsets,
1072  float *base_ptr) const
1073  {
1074  for (unsigned int i=0; i<16; ++i)
1075  for (unsigned int j=i+1; j<16; ++j)
1076  Assert(offsets[i] != offsets[j],
1077  ExcMessage("Result of scatter undefined if two offset elements"
1078  " point to the same position"));
1079 
1080  // unfortunately, there does not appear to be a 512 bit integer load, so
1081  // do it by some reinterpret casts here. this is allowed because the Intel
1082  // API allows aliasing between different vector types.
1083  const __m512 index_val = _mm512_loadu_ps((const float *)offsets);
1084  const __m512i index = *((__m512i *)(&index_val));
1085  _mm512_i32scatter_ps(base_ptr, index, data, 4);
1086  }
1087 
1092  __m512 data;
1093 
1094 private:
1095 
1100  DEAL_II_ALWAYS_INLINE
1102  get_sqrt () const
1103  {
1104  VectorizedArray res;
1105  res.data = _mm512_sqrt_ps(data);
1106  return res;
1107  }
1108 
1113  DEAL_II_ALWAYS_INLINE
1115  get_abs () const
1116  {
1117  // to compute the absolute value, perform bitwise andnot with -0. This
1118  // will leave all value and exponent bits unchanged but force the sign
1119  // value to +. Since there is no andnot for AVX512, we interpret the data
1120  // as 32 bit integers and do the andnot on those types (note that andnot
1121  // is a bitwise operation so the data type does not matter)
1122  __m512 mask = _mm512_set1_ps (-0.f);
1123  VectorizedArray res;
1124  res.data = (__m512)_mm512_andnot_epi32 ((__m512i)mask, (__m512i)data);
1125  return res;
1126  }
1127 
1132  DEAL_II_ALWAYS_INLINE
1134  get_max (const VectorizedArray &other) const
1135  {
1136  VectorizedArray res;
1137  res.data = _mm512_max_ps (data, other.data);
1138  return res;
1139  }
1140 
1145  DEAL_II_ALWAYS_INLINE
1147  get_min (const VectorizedArray &other) const
1148  {
1149  VectorizedArray res;
1150  res.data = _mm512_min_ps (data, other.data);
1151  return res;
1152  }
1153 
1157  template <typename Number2> friend VectorizedArray<Number2>
1158  std::sqrt (const VectorizedArray<Number2> &);
1159  template <typename Number2> friend VectorizedArray<Number2>
1160  std::abs (const VectorizedArray<Number2> &);
1161  template <typename Number2> friend VectorizedArray<Number2>
1162  std::max (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
1163  template <typename Number2> friend VectorizedArray<Number2>
1164  std::min (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
1165 };
1166 
1167 
1168 
1172 template <>
1173 inline
1174 void
1175 vectorized_load_and_transpose(const unsigned int n_entries,
1176  const float *in,
1177  const unsigned int *offsets,
1179 {
1180  const unsigned int n_chunks = n_entries/4;
1181  for (unsigned int outer = 0; outer<16; outer += 8)
1182  {
1183  for (unsigned int i=0; i<n_chunks; ++i)
1184  {
1185  __m128 u0 = _mm_loadu_ps(in+4*i+offsets[0+outer]);
1186  __m128 u1 = _mm_loadu_ps(in+4*i+offsets[1+outer]);
1187  __m128 u2 = _mm_loadu_ps(in+4*i+offsets[2+outer]);
1188  __m128 u3 = _mm_loadu_ps(in+4*i+offsets[3+outer]);
1189  __m128 u4 = _mm_loadu_ps(in+4*i+offsets[4+outer]);
1190  __m128 u5 = _mm_loadu_ps(in+4*i+offsets[5+outer]);
1191  __m128 u6 = _mm_loadu_ps(in+4*i+offsets[6+outer]);
1192  __m128 u7 = _mm_loadu_ps(in+4*i+offsets[7+outer]);
1193  // To avoid warnings about uninitialized variables, need to initialize
1194  // one variable with zero before using it.
1195  __m256 t0, t1, t2, t3 = _mm256_set1_ps(0.F);
1196  t0 = _mm256_insertf128_ps (t3, u0, 0);
1197  t0 = _mm256_insertf128_ps (t0, u4, 1);
1198  t1 = _mm256_insertf128_ps (t3, u1, 0);
1199  t1 = _mm256_insertf128_ps (t1, u5, 1);
1200  t2 = _mm256_insertf128_ps (t3, u2, 0);
1201  t2 = _mm256_insertf128_ps (t2, u6, 1);
1202  t3 = _mm256_insertf128_ps (t3, u3, 0);
1203  t3 = _mm256_insertf128_ps (t3, u7, 1);
1204  __m256 v0 = _mm256_shuffle_ps (t0, t1, 0x44);
1205  __m256 v1 = _mm256_shuffle_ps (t0, t1, 0xee);
1206  __m256 v2 = _mm256_shuffle_ps (t2, t3, 0x44);
1207  __m256 v3 = _mm256_shuffle_ps (t2, t3, 0xee);
1208  *(__m256 *)((float *)(&out[4*i+0].data)+outer) = _mm256_shuffle_ps (v0, v2, 0x88);
1209  *(__m256 *)((float *)(&out[4*i+1].data)+outer) = _mm256_shuffle_ps (v0, v2, 0xdd);
1210  *(__m256 *)((float *)(&out[4*i+2].data)+outer) = _mm256_shuffle_ps (v1, v3, 0x88);
1211  *(__m256 *)((float *)(&out[4*i+3].data)+outer) = _mm256_shuffle_ps (v1, v3, 0xdd);
1212  }
1213  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
1214  for (unsigned int v=0; v<8; ++v)
1215  out[i][v+outer] = in[offsets[v+outer]+i];
1216  }
1217 }
1218 
1219 
1220 
1224 template <>
1225 inline
1226 void
1227 vectorized_transpose_and_store(const bool add_into,
1228  const unsigned int n_entries,
1229  const VectorizedArray<float> *in,
1230  const unsigned int *offsets,
1231  float *out)
1232 {
1233  const unsigned int n_chunks = n_entries/4;
1234  for (unsigned int outer = 0; outer<16; outer += 8)
1235  {
1236  for (unsigned int i=0; i<n_chunks; ++i)
1237  {
1238  __m256 u0 = *(const __m256 *)((const float *)(&in[4*i+0].data)+outer);
1239  __m256 u1 = *(const __m256 *)((const float *)(&in[4*i+1].data)+outer);
1240  __m256 u2 = *(const __m256 *)((const float *)(&in[4*i+2].data)+outer);
1241  __m256 u3 = *(const __m256 *)((const float *)(&in[4*i+3].data)+outer);
1242  __m256 t0 = _mm256_shuffle_ps (u0, u1, 0x44);
1243  __m256 t1 = _mm256_shuffle_ps (u0, u1, 0xee);
1244  __m256 t2 = _mm256_shuffle_ps (u2, u3, 0x44);
1245  __m256 t3 = _mm256_shuffle_ps (u2, u3, 0xee);
1246  u0 = _mm256_shuffle_ps (t0, t2, 0x88);
1247  u1 = _mm256_shuffle_ps (t0, t2, 0xdd);
1248  u2 = _mm256_shuffle_ps (t1, t3, 0x88);
1249  u3 = _mm256_shuffle_ps (t1, t3, 0xdd);
1250  __m128 res0 = _mm256_extractf128_ps (u0, 0);
1251  __m128 res4 = _mm256_extractf128_ps (u0, 1);
1252  __m128 res1 = _mm256_extractf128_ps (u1, 0);
1253  __m128 res5 = _mm256_extractf128_ps (u1, 1);
1254  __m128 res2 = _mm256_extractf128_ps (u2, 0);
1255  __m128 res6 = _mm256_extractf128_ps (u2, 1);
1256  __m128 res3 = _mm256_extractf128_ps (u3, 0);
1257  __m128 res7 = _mm256_extractf128_ps (u3, 1);
1258 
1259  // Cannot use the same store instructions in both paths of the 'if'
1260  // because the compiler cannot know that there is no aliasing between
1261  // pointers
1262  if (add_into)
1263  {
1264  res0 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[0+outer]), res0);
1265  _mm_storeu_ps(out+4*i+offsets[0+outer], res0);
1266  res1 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[1+outer]), res1);
1267  _mm_storeu_ps(out+4*i+offsets[1+outer], res1);
1268  res2 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[2+outer]), res2);
1269  _mm_storeu_ps(out+4*i+offsets[2+outer], res2);
1270  res3 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[3+outer]), res3);
1271  _mm_storeu_ps(out+4*i+offsets[3+outer], res3);
1272  res4 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[4+outer]), res4);
1273  _mm_storeu_ps(out+4*i+offsets[4+outer], res4);
1274  res5 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[5+outer]), res5);
1275  _mm_storeu_ps(out+4*i+offsets[5+outer], res5);
1276  res6 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[6+outer]), res6);
1277  _mm_storeu_ps(out+4*i+offsets[6+outer], res6);
1278  res7 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[7+outer]), res7);
1279  _mm_storeu_ps(out+4*i+offsets[7+outer], res7);
1280  }
1281  else
1282  {
1283  _mm_storeu_ps(out+4*i+offsets[0+outer], res0);
1284  _mm_storeu_ps(out+4*i+offsets[1+outer], res1);
1285  _mm_storeu_ps(out+4*i+offsets[2+outer], res2);
1286  _mm_storeu_ps(out+4*i+offsets[3+outer], res3);
1287  _mm_storeu_ps(out+4*i+offsets[4+outer], res4);
1288  _mm_storeu_ps(out+4*i+offsets[5+outer], res5);
1289  _mm_storeu_ps(out+4*i+offsets[6+outer], res6);
1290  _mm_storeu_ps(out+4*i+offsets[7+outer], res7);
1291  }
1292  }
1293  if (add_into)
1294  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
1295  for (unsigned int v=0; v<8; ++v)
1296  out[offsets[v+outer]+i] += in[i][v+outer];
1297  else
1298  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
1299  for (unsigned int v=0; v<8; ++v)
1300  out[offsets[v+outer]+i] = in[i][v+outer];
1301  }
1302 }
1303 
1304 
1305 
1306 #elif DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 2 && defined(__AVX__)
1307 
1311 template <>
1312 class VectorizedArray<double>
1313 {
1314 public:
1318  static const unsigned int n_array_elements = 4;
1319 
1323  DEAL_II_ALWAYS_INLINE
1324  VectorizedArray &
1325  operator = (const double x)
1326  {
1327  data = _mm256_set1_pd(x);
1328  return *this;
1329  }
1330 
1334  DEAL_II_ALWAYS_INLINE
1335  double &
1336  operator [] (const unsigned int comp)
1337  {
1338  AssertIndexRange (comp, 4);
1339  return *(reinterpret_cast<double *>(&data)+comp);
1340  }
1341 
1345  DEAL_II_ALWAYS_INLINE
1346  const double &
1347  operator [] (const unsigned int comp) const
1348  {
1349  AssertIndexRange (comp, 4);
1350  return *(reinterpret_cast<const double *>(&data)+comp);
1351  }
1352 
1356  DEAL_II_ALWAYS_INLINE
1357  VectorizedArray &
1358  operator += (const VectorizedArray &vec)
1359  {
1360  // if the compiler supports vector arithmetics, we can simply use +=
1361  // operator on the given data type. this allows the compiler to combine
1362  // additions with multiplication (fused multiply-add) if those
1363  // instructions are available. Otherwise, we need to use the built-in
1364  // intrinsic command for __m256d
1365 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1366  data += vec.data;
1367 #else
1368  data = _mm256_add_pd(data,vec.data);
1369 #endif
1370  return *this;
1371  }
1372 
1376  DEAL_II_ALWAYS_INLINE
1377  VectorizedArray &
1378  operator -= (const VectorizedArray &vec)
1379  {
1380 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1381  data -= vec.data;
1382 #else
1383  data = _mm256_sub_pd(data,vec.data);
1384 #endif
1385  return *this;
1386  }
1390  DEAL_II_ALWAYS_INLINE
1391  VectorizedArray &
1392  operator *= (const VectorizedArray &vec)
1393  {
1394 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1395  data *= vec.data;
1396 #else
1397  data = _mm256_mul_pd(data,vec.data);
1398 #endif
1399  return *this;
1400  }
1401 
1405  DEAL_II_ALWAYS_INLINE
1406  VectorizedArray &
1407  operator /= (const VectorizedArray &vec)
1408  {
1409 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1410  data /= vec.data;
1411 #else
1412  data = _mm256_div_pd(data,vec.data);
1413 #endif
1414  return *this;
1415  }
1416 
1422  DEAL_II_ALWAYS_INLINE
1423  void load (const double *ptr)
1424  {
1425  data = _mm256_loadu_pd (ptr);
1426  }
1427 
1434  DEAL_II_ALWAYS_INLINE
1435  void store (double *ptr) const
1436  {
1437  _mm256_storeu_pd (ptr, data);
1438  }
1439 
1452  DEAL_II_ALWAYS_INLINE
1453  void gather (const double *base_ptr,
1454  const unsigned int *offsets)
1455  {
1456 #ifdef __AVX2__
1457  // unfortunately, there does not appear to be a 128 bit integer load, so
1458  // do it by some reinterpret casts here. this is allowed because the Intel
1459  // API allows aliasing between different vector types.
1460  const __m128 index_val = _mm_loadu_ps((const float *)offsets);
1461  const __m128i index = *((__m128i *)(&index_val));
1462  data = _mm256_i32gather_pd(base_ptr, index, 8);
1463 #else
1464  for (unsigned int i=0; i<4; ++i)
1465  *(reinterpret_cast<double *>(&data)+i) = base_ptr[offsets[i]];
1466 #endif
1467  }
1468 
1481  DEAL_II_ALWAYS_INLINE
1482  void scatter (const unsigned int *offsets,
1483  double *base_ptr) const
1484  {
1485  for (unsigned int i=0; i<4; ++i)
1486  for (unsigned int j=i+1; j<4; ++j)
1487  Assert(offsets[i] != offsets[j],
1488  ExcMessage("Result of scatter undefined if two offset elements"
1489  " point to the same position"));
1490 
1491  // no scatter operation in AVX/AVX2
1492  for (unsigned int i=0; i<4; ++i)
1493  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data)+i);
1494  }
1495 
1500  __m256d data;
1501 
1502 private:
1507  DEAL_II_ALWAYS_INLINE
1509  get_sqrt () const
1510  {
1511  VectorizedArray res;
1512  res.data = _mm256_sqrt_pd(data);
1513  return res;
1514  }
1515 
1520  DEAL_II_ALWAYS_INLINE
1522  get_abs () const
1523  {
1524  // to compute the absolute value, perform bitwise andnot with -0. This
1525  // will leave all value and exponent bits unchanged but force the sign
1526  // value to +.
1527  __m256d mask = _mm256_set1_pd (-0.);
1528  VectorizedArray res;
1529  res.data = _mm256_andnot_pd(mask, data);
1530  return res;
1531  }
1532 
1537  DEAL_II_ALWAYS_INLINE
1539  get_max (const VectorizedArray &other) const
1540  {
1541  VectorizedArray res;
1542  res.data = _mm256_max_pd (data, other.data);
1543  return res;
1544  }
1545 
1550  DEAL_II_ALWAYS_INLINE
1552  get_min (const VectorizedArray &other) const
1553  {
1554  VectorizedArray res;
1555  res.data = _mm256_min_pd (data, other.data);
1556  return res;
1557  }
1558 
1562  template <typename Number2> friend VectorizedArray<Number2>
1563  std::sqrt (const VectorizedArray<Number2> &);
1564  template <typename Number2> friend VectorizedArray<Number2>
1565  std::abs (const VectorizedArray<Number2> &);
1566  template <typename Number2> friend VectorizedArray<Number2>
1567  std::max (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
1568  template <typename Number2> friend VectorizedArray<Number2>
1569  std::min (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
1570 };
1571 
1572 
1573 
1577 template <>
1578 inline
1579 void
1580 vectorized_load_and_transpose(const unsigned int n_entries,
1581  const double *in,
1582  const unsigned int *offsets,
1584 {
1585  const unsigned int n_chunks = n_entries/4;
1586  const double *in0 = in + offsets[0];
1587  const double *in1 = in + offsets[1];
1588  const double *in2 = in + offsets[2];
1589  const double *in3 = in + offsets[3];
1590 
1591  for (unsigned int i=0; i<n_chunks; ++i)
1592  {
1593  __m256d u0 = _mm256_loadu_pd(in0+4*i);
1594  __m256d u1 = _mm256_loadu_pd(in1+4*i);
1595  __m256d u2 = _mm256_loadu_pd(in2+4*i);
1596  __m256d u3 = _mm256_loadu_pd(in3+4*i);
1597  __m256d t0 = _mm256_permute2f128_pd (u0, u2, 0x20);
1598  __m256d t1 = _mm256_permute2f128_pd (u1, u3, 0x20);
1599  __m256d t2 = _mm256_permute2f128_pd (u0, u2, 0x31);
1600  __m256d t3 = _mm256_permute2f128_pd (u1, u3, 0x31);
1601  out[4*i+0].data = _mm256_unpacklo_pd (t0, t1);
1602  out[4*i+1].data = _mm256_unpackhi_pd (t0, t1);
1603  out[4*i+2].data = _mm256_unpacklo_pd (t2, t3);
1604  out[4*i+3].data = _mm256_unpackhi_pd (t2, t3);
1605  }
1606  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
1607  for (unsigned int v=0; v<4; ++v)
1608  out[i][v] = in[offsets[v]+i];
1609 }
1610 
1611 
1612 
1616 template <>
1617 inline
1618 void
1619 vectorized_transpose_and_store(const bool add_into,
1620  const unsigned int n_entries,
1621  const VectorizedArray<double> *in,
1622  const unsigned int *offsets,
1623  double *out)
1624 {
1625  const unsigned int n_chunks = n_entries/4;
1626  double *out0 = out + offsets[0];
1627  double *out1 = out + offsets[1];
1628  double *out2 = out + offsets[2];
1629  double *out3 = out + offsets[3];
1630  for (unsigned int i=0; i<n_chunks; ++i)
1631  {
1632  __m256d u0 = in[4*i+0].data;
1633  __m256d u1 = in[4*i+1].data;
1634  __m256d u2 = in[4*i+2].data;
1635  __m256d u3 = in[4*i+3].data;
1636  __m256d t0 = _mm256_permute2f128_pd (u0, u2, 0x20);
1637  __m256d t1 = _mm256_permute2f128_pd (u1, u3, 0x20);
1638  __m256d t2 = _mm256_permute2f128_pd (u0, u2, 0x31);
1639  __m256d t3 = _mm256_permute2f128_pd (u1, u3, 0x31);
1640  __m256d res0 = _mm256_unpacklo_pd (t0, t1);
1641  __m256d res1 = _mm256_unpackhi_pd (t0, t1);
1642  __m256d res2 = _mm256_unpacklo_pd (t2, t3);
1643  __m256d res3 = _mm256_unpackhi_pd (t2, t3);
1644 
1645  // Cannot use the same store instructions in both paths of the 'if'
1646  // because the compiler cannot know that there is no aliasing between
1647  // pointers
1648  if (add_into)
1649  {
1650  res0 = _mm256_add_pd(_mm256_loadu_pd(out0+4*i), res0);
1651  _mm256_storeu_pd(out0+4*i, res0);
1652  res1 = _mm256_add_pd(_mm256_loadu_pd(out1+4*i), res1);
1653  _mm256_storeu_pd(out1+4*i, res1);
1654  res2 = _mm256_add_pd(_mm256_loadu_pd(out2+4*i), res2);
1655  _mm256_storeu_pd(out2+4*i, res2);
1656  res3 = _mm256_add_pd(_mm256_loadu_pd(out3+4*i), res3);
1657  _mm256_storeu_pd(out3+4*i, res3);
1658  }
1659  else
1660  {
1661  _mm256_storeu_pd(out0+4*i, res0);
1662  _mm256_storeu_pd(out1+4*i, res1);
1663  _mm256_storeu_pd(out2+4*i, res2);
1664  _mm256_storeu_pd(out3+4*i, res3);
1665  }
1666  }
1667  if (add_into)
1668  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
1669  for (unsigned int v=0; v<4; ++v)
1670  out[offsets[v]+i] += in[i][v];
1671  else
1672  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
1673  for (unsigned int v=0; v<4; ++v)
1674  out[offsets[v]+i] = in[i][v];
1675 }
1676 
1677 
1678 
1682 template<>
1683 class VectorizedArray<float>
1684 {
1685 public:
1689  static const unsigned int n_array_elements = 8;
1690 
1694  DEAL_II_ALWAYS_INLINE
1695  VectorizedArray &
1696  operator = (const float x)
1697  {
1698  data = _mm256_set1_ps(x);
1699  return *this;
1700  }
1701 
1705  DEAL_II_ALWAYS_INLINE
1706  float &
1707  operator [] (const unsigned int comp)
1708  {
1709  AssertIndexRange (comp, 8);
1710  return *(reinterpret_cast<float *>(&data)+comp);
1711  }
1712 
1716  DEAL_II_ALWAYS_INLINE
1717  const float &
1718  operator [] (const unsigned int comp) const
1719  {
1720  AssertIndexRange (comp, 8);
1721  return *(reinterpret_cast<const float *>(&data)+comp);
1722  }
1723 
1727  DEAL_II_ALWAYS_INLINE
1728  VectorizedArray &
1729  operator += (const VectorizedArray &vec)
1730  {
1731  // if the compiler supports vector arithmetics, we can simply use +=
1732  // operator on the given data type. this allows the compiler to combine
1733  // additions with multiplication (fused multiply-add) if those
1734  // instructions are available. Otherwise, we need to use the built-in
1735  // intrinsic command for __m256d
1736 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1737  data += vec.data;
1738 #else
1739  data = _mm256_add_ps(data,vec.data);
1740 #endif
1741  return *this;
1742  }
1743 
1747  DEAL_II_ALWAYS_INLINE
1748  VectorizedArray &
1749  operator -= (const VectorizedArray &vec)
1750  {
1751 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1752  data -= vec.data;
1753 #else
1754  data = _mm256_sub_ps(data,vec.data);
1755 #endif
1756  return *this;
1757  }
1761  DEAL_II_ALWAYS_INLINE
1762  VectorizedArray &
1763  operator *= (const VectorizedArray &vec)
1764  {
1765 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1766  data *= vec.data;
1767 #else
1768  data = _mm256_mul_ps(data,vec.data);
1769 #endif
1770  return *this;
1771  }
1772 
1776  DEAL_II_ALWAYS_INLINE
1777  VectorizedArray &
1778  operator /= (const VectorizedArray &vec)
1779  {
1780 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1781  data /= vec.data;
1782 #else
1783  data = _mm256_div_ps(data,vec.data);
1784 #endif
1785  return *this;
1786  }
1787 
1793  DEAL_II_ALWAYS_INLINE
1794  void load (const float *ptr)
1795  {
1796  data = _mm256_loadu_ps (ptr);
1797  }
1798 
1805  DEAL_II_ALWAYS_INLINE
1806  void store (float *ptr) const
1807  {
1808  _mm256_storeu_ps (ptr, data);
1809  }
1810 
1823  DEAL_II_ALWAYS_INLINE
1824  void gather (const float *base_ptr,
1825  const unsigned int *offsets)
1826  {
1827 #ifdef __AVX2__
1828  // unfortunately, there does not appear to be a 256 bit integer load, so
1829  // do it by some reinterpret casts here. this is allowed because the Intel
1830  // API allows aliasing between different vector types.
1831  const __m256 index_val = _mm256_loadu_ps((const float *)offsets);
1832  const __m256i index = *((__m256i *)(&index_val));
1833  data = _mm256_i32gather_ps(base_ptr, index, 4);
1834 #else
1835  for (unsigned int i=0; i<8; ++i)
1836  *(reinterpret_cast<float *>(&data)+i) = base_ptr[offsets[i]];
1837 #endif
1838  }
1839 
1852  DEAL_II_ALWAYS_INLINE
1853  void scatter (const unsigned int *offsets,
1854  float *base_ptr) const
1855  {
1856  for (unsigned int i=0; i<8; ++i)
1857  for (unsigned int j=i+1; j<8; ++j)
1858  Assert(offsets[i] != offsets[j],
1859  ExcMessage("Result of scatter undefined if two offset elements"
1860  " point to the same position"));
1861 
1862  // no scatter operation in AVX/AVX2
1863  for (unsigned int i=0; i<8; ++i)
1864  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data)+i);
1865  }
1866 
1871  __m256 data;
1872 
1873 private:
1874 
1879  DEAL_II_ALWAYS_INLINE
1881  get_sqrt () const
1882  {
1883  VectorizedArray res;
1884  res.data = _mm256_sqrt_ps(data);
1885  return res;
1886  }
1887 
1892  DEAL_II_ALWAYS_INLINE
1894  get_abs () const
1895  {
1896  // to compute the absolute value, perform bitwise andnot with -0. This
1897  // will leave all value and exponent bits unchanged but force the sign
1898  // value to +.
1899  __m256 mask = _mm256_set1_ps (-0.f);
1900  VectorizedArray res;
1901  res.data = _mm256_andnot_ps(mask, data);
1902  return res;
1903  }
1904 
1909  DEAL_II_ALWAYS_INLINE
1911  get_max (const VectorizedArray &other) const
1912  {
1913  VectorizedArray res;
1914  res.data = _mm256_max_ps (data, other.data);
1915  return res;
1916  }
1917 
1922  DEAL_II_ALWAYS_INLINE
1924  get_min (const VectorizedArray &other) const
1925  {
1926  VectorizedArray res;
1927  res.data = _mm256_min_ps (data, other.data);
1928  return res;
1929  }
1930 
1934  template <typename Number2> friend VectorizedArray<Number2>
1935  std::sqrt (const VectorizedArray<Number2> &);
1936  template <typename Number2> friend VectorizedArray<Number2>
1937  std::abs (const VectorizedArray<Number2> &);
1938  template <typename Number2> friend VectorizedArray<Number2>
1939  std::max (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
1940  template <typename Number2> friend VectorizedArray<Number2>
1941  std::min (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
1942 };
1943 
1944 
1945 
1949 template <>
1950 inline
1951 void
1952 vectorized_load_and_transpose(const unsigned int n_entries,
1953  const float *in,
1954  const unsigned int *offsets,
1956 {
1957  const unsigned int n_chunks = n_entries/4;
1958  for (unsigned int i=0; i<n_chunks; ++i)
1959  {
1960  __m128 u0 = _mm_loadu_ps(in+4*i+offsets[0]);
1961  __m128 u1 = _mm_loadu_ps(in+4*i+offsets[1]);
1962  __m128 u2 = _mm_loadu_ps(in+4*i+offsets[2]);
1963  __m128 u3 = _mm_loadu_ps(in+4*i+offsets[3]);
1964  __m128 u4 = _mm_loadu_ps(in+4*i+offsets[4]);
1965  __m128 u5 = _mm_loadu_ps(in+4*i+offsets[5]);
1966  __m128 u6 = _mm_loadu_ps(in+4*i+offsets[6]);
1967  __m128 u7 = _mm_loadu_ps(in+4*i+offsets[7]);
1968  // To avoid warnings about uninitialized variables, need to initialize
1969  // one variable with zero before using it.
1970  __m256 t0, t1, t2, t3 = _mm256_set1_ps(0.F);
1971  t0 = _mm256_insertf128_ps (t3, u0, 0);
1972  t0 = _mm256_insertf128_ps (t0, u4, 1);
1973  t1 = _mm256_insertf128_ps (t3, u1, 0);
1974  t1 = _mm256_insertf128_ps (t1, u5, 1);
1975  t2 = _mm256_insertf128_ps (t3, u2, 0);
1976  t2 = _mm256_insertf128_ps (t2, u6, 1);
1977  t3 = _mm256_insertf128_ps (t3, u3, 0);
1978  t3 = _mm256_insertf128_ps (t3, u7, 1);
1979  __m256 v0 = _mm256_shuffle_ps (t0, t1, 0x44);
1980  __m256 v1 = _mm256_shuffle_ps (t0, t1, 0xee);
1981  __m256 v2 = _mm256_shuffle_ps (t2, t3, 0x44);
1982  __m256 v3 = _mm256_shuffle_ps (t2, t3, 0xee);
1983  out[4*i+0].data = _mm256_shuffle_ps (v0, v2, 0x88);
1984  out[4*i+1].data = _mm256_shuffle_ps (v0, v2, 0xdd);
1985  out[4*i+2].data = _mm256_shuffle_ps (v1, v3, 0x88);
1986  out[4*i+3].data = _mm256_shuffle_ps (v1, v3, 0xdd);
1987  }
1988  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
1989  for (unsigned int v=0; v<8; ++v)
1990  out[i][v] = in[offsets[v]+i];
1991 }
1992 
1993 
1994 
1998 template <>
1999 inline
2000 void
2001 vectorized_transpose_and_store(const bool add_into,
2002  const unsigned int n_entries,
2003  const VectorizedArray<float> *in,
2004  const unsigned int *offsets,
2005  float *out)
2006 {
2007  const unsigned int n_chunks = n_entries/4;
2008  for (unsigned int i=0; i<n_chunks; ++i)
2009  {
2010  __m256 u0 = in[4*i+0].data;
2011  __m256 u1 = in[4*i+1].data;
2012  __m256 u2 = in[4*i+2].data;
2013  __m256 u3 = in[4*i+3].data;
2014  __m256 t0 = _mm256_shuffle_ps (u0, u1, 0x44);
2015  __m256 t1 = _mm256_shuffle_ps (u0, u1, 0xee);
2016  __m256 t2 = _mm256_shuffle_ps (u2, u3, 0x44);
2017  __m256 t3 = _mm256_shuffle_ps (u2, u3, 0xee);
2018  u0 = _mm256_shuffle_ps (t0, t2, 0x88);
2019  u1 = _mm256_shuffle_ps (t0, t2, 0xdd);
2020  u2 = _mm256_shuffle_ps (t1, t3, 0x88);
2021  u3 = _mm256_shuffle_ps (t1, t3, 0xdd);
2022  __m128 res0 = _mm256_extractf128_ps (u0, 0);
2023  __m128 res4 = _mm256_extractf128_ps (u0, 1);
2024  __m128 res1 = _mm256_extractf128_ps (u1, 0);
2025  __m128 res5 = _mm256_extractf128_ps (u1, 1);
2026  __m128 res2 = _mm256_extractf128_ps (u2, 0);
2027  __m128 res6 = _mm256_extractf128_ps (u2, 1);
2028  __m128 res3 = _mm256_extractf128_ps (u3, 0);
2029  __m128 res7 = _mm256_extractf128_ps (u3, 1);
2030 
2031  // Cannot use the same store instructions in both paths of the 'if'
2032  // because the compiler cannot know that there is no aliasing between
2033  // pointers
2034  if (add_into)
2035  {
2036  res0 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[0]), res0);
2037  _mm_storeu_ps(out+4*i+offsets[0], res0);
2038  res1 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[1]), res1);
2039  _mm_storeu_ps(out+4*i+offsets[1], res1);
2040  res2 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[2]), res2);
2041  _mm_storeu_ps(out+4*i+offsets[2], res2);
2042  res3 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[3]), res3);
2043  _mm_storeu_ps(out+4*i+offsets[3], res3);
2044  res4 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[4]), res4);
2045  _mm_storeu_ps(out+4*i+offsets[4], res4);
2046  res5 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[5]), res5);
2047  _mm_storeu_ps(out+4*i+offsets[5], res5);
2048  res6 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[6]), res6);
2049  _mm_storeu_ps(out+4*i+offsets[6], res6);
2050  res7 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[7]), res7);
2051  _mm_storeu_ps(out+4*i+offsets[7], res7);
2052  }
2053  else
2054  {
2055  _mm_storeu_ps(out+4*i+offsets[0], res0);
2056  _mm_storeu_ps(out+4*i+offsets[1], res1);
2057  _mm_storeu_ps(out+4*i+offsets[2], res2);
2058  _mm_storeu_ps(out+4*i+offsets[3], res3);
2059  _mm_storeu_ps(out+4*i+offsets[4], res4);
2060  _mm_storeu_ps(out+4*i+offsets[5], res5);
2061  _mm_storeu_ps(out+4*i+offsets[6], res6);
2062  _mm_storeu_ps(out+4*i+offsets[7], res7);
2063  }
2064  }
2065  if (add_into)
2066  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
2067  for (unsigned int v=0; v<8; ++v)
2068  out[offsets[v]+i] += in[i][v];
2069  else
2070  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
2071  for (unsigned int v=0; v<8; ++v)
2072  out[offsets[v]+i] = in[i][v];
2073 }
2074 
2075 
2076 
2077 // for safety, also check that __SSE2__ is defined in case the user manually
2078 // set some conflicting compile flags which prevent compilation
2079 
2080 #elif DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 1 && defined(__SSE2__)
2081 
2085 template <>
2086 class VectorizedArray<double>
2087 {
2088 public:
2092  static const unsigned int n_array_elements = 2;
2093 
2097  DEAL_II_ALWAYS_INLINE
2098  VectorizedArray &
2099  operator = (const double x)
2100  {
2101  data = _mm_set1_pd(x);
2102  return *this;
2103  }
2104 
2108  DEAL_II_ALWAYS_INLINE
2109  double &
2110  operator [] (const unsigned int comp)
2111  {
2112  AssertIndexRange (comp, 2);
2113  return *(reinterpret_cast<double *>(&data)+comp);
2114  }
2115 
2119  DEAL_II_ALWAYS_INLINE
2120  const double &
2121  operator [] (const unsigned int comp) const
2122  {
2123  AssertIndexRange (comp, 2);
2124  return *(reinterpret_cast<const double *>(&data)+comp);
2125  }
2126 
2130  DEAL_II_ALWAYS_INLINE
2131  VectorizedArray &
2132  operator += (const VectorizedArray &vec)
2133  {
2134 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2135  data += vec.data;
2136 #else
2137  data = _mm_add_pd(data,vec.data);
2138 #endif
2139  return *this;
2140  }
2141 
2145  DEAL_II_ALWAYS_INLINE
2146  VectorizedArray &
2147  operator -= (const VectorizedArray &vec)
2148  {
2149 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2150  data -= vec.data;
2151 #else
2152  data = _mm_sub_pd(data,vec.data);
2153 #endif
2154  return *this;
2155  }
2156 
2160  DEAL_II_ALWAYS_INLINE
2161  VectorizedArray &
2162  operator *= (const VectorizedArray &vec)
2163  {
2164 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2165  data *= vec.data;
2166 #else
2167  data = _mm_mul_pd(data,vec.data);
2168 #endif
2169  return *this;
2170  }
2171 
2175  DEAL_II_ALWAYS_INLINE
2176  VectorizedArray &
2177  operator /= (const VectorizedArray &vec)
2178  {
2179 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2180  data /= vec.data;
2181 #else
2182  data = _mm_div_pd(data,vec.data);
2183 #endif
2184  return *this;
2185  }
2186 
2192  DEAL_II_ALWAYS_INLINE
2193  void load (const double *ptr)
2194  {
2195  data = _mm_loadu_pd (ptr);
2196  }
2197 
2204  DEAL_II_ALWAYS_INLINE
2205  void store (double *ptr) const
2206  {
2207  _mm_storeu_pd (ptr, data);
2208  }
2209 
2222  DEAL_II_ALWAYS_INLINE
2223  void gather (const double *base_ptr,
2224  const unsigned int *offsets)
2225  {
2226  for (unsigned int i=0; i<2; ++i)
2227  *(reinterpret_cast<double *>(&data)+i) = base_ptr[offsets[i]];
2228  }
2229 
2242  DEAL_II_ALWAYS_INLINE
2243  void scatter (const unsigned int *offsets,
2244  double *base_ptr) const
2245  {
2246  Assert(offsets[0] != offsets[1],
2247  ExcMessage("Result of scatter undefined if two offset elements"
2248  " point to the same position"));
2249 
2250  for (unsigned int i=0; i<2; ++i)
2251  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data)+i);
2252  }
2253 
2258  __m128d data;
2259 
2260 private:
2265  DEAL_II_ALWAYS_INLINE
2267  get_sqrt () const
2268  {
2269  VectorizedArray res;
2270  res.data = _mm_sqrt_pd(data);
2271  return res;
2272  }
2273 
2278  DEAL_II_ALWAYS_INLINE
2280  get_abs () const
2281  {
2282  // to compute the absolute value, perform
2283  // bitwise andnot with -0. This will leave all
2284  // value and exponent bits unchanged but force
2285  // the sign value to +.
2286  __m128d mask = _mm_set1_pd (-0.);
2287  VectorizedArray res;
2288  res.data = _mm_andnot_pd(mask, data);
2289  return res;
2290  }
2291 
2296  DEAL_II_ALWAYS_INLINE
2298  get_max (const VectorizedArray &other) const
2299  {
2300  VectorizedArray res;
2301  res.data = _mm_max_pd (data, other.data);
2302  return res;
2303  }
2304 
2309  DEAL_II_ALWAYS_INLINE
2311  get_min (const VectorizedArray &other) const
2312  {
2313  VectorizedArray res;
2314  res.data = _mm_min_pd (data, other.data);
2315  return res;
2316  }
2317 
2321  template <typename Number2> friend VectorizedArray<Number2>
2322  std::sqrt (const VectorizedArray<Number2> &);
2323  template <typename Number2> friend VectorizedArray<Number2>
2324  std::abs (const VectorizedArray<Number2> &);
2325  template <typename Number2> friend VectorizedArray<Number2>
2326  std::max (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
2327  template <typename Number2> friend VectorizedArray<Number2>
2328  std::min (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
2329 };
2330 
2331 
2332 
2336 template <>
2337 inline
2338 void vectorized_load_and_transpose(const unsigned int n_entries,
2339  const double *in,
2340  const unsigned int *offsets,
2342 {
2343  const unsigned int n_chunks = n_entries/2;
2344  for (unsigned int i=0; i<n_chunks; ++i)
2345  {
2346  __m128d u0 = _mm_loadu_pd(in+2*i+offsets[0]);
2347  __m128d u1 = _mm_loadu_pd(in+2*i+offsets[1]);
2348  out[2*i+0].data = _mm_unpacklo_pd (u0, u1);
2349  out[2*i+1].data = _mm_unpackhi_pd (u0, u1);
2350  }
2351  for (unsigned int i=2*n_chunks; i<n_entries; ++i)
2352  for (unsigned int v=0; v<2; ++v)
2353  out[i][v] = in[offsets[v]+i];
2354 }
2355 
2356 
2357 
2361 template <>
2362 inline
2363 void
2364 vectorized_transpose_and_store(const bool add_into,
2365  const unsigned int n_entries,
2366  const VectorizedArray<double> *in,
2367  const unsigned int *offsets,
2368  double *out)
2369 {
2370  const unsigned int n_chunks = n_entries/2;
2371  if (add_into)
2372  {
2373  for (unsigned int i=0; i<n_chunks; ++i)
2374  {
2375  __m128d u0 = in[2*i+0].data;
2376  __m128d u1 = in[2*i+1].data;
2377  __m128d res0 = _mm_unpacklo_pd (u0, u1);
2378  __m128d res1 = _mm_unpackhi_pd (u0, u1);
2379  _mm_storeu_pd(out+2*i+offsets[0], _mm_add_pd(_mm_loadu_pd(out+2*i+offsets[0]), res0));
2380  _mm_storeu_pd(out+2*i+offsets[1], _mm_add_pd(_mm_loadu_pd(out+2*i+offsets[1]), res1));
2381  }
2382  for (unsigned int i=2*n_chunks; i<n_entries; ++i)
2383  for (unsigned int v=0; v<2; ++v)
2384  out[offsets[v]+i] += in[i][v];
2385  }
2386  else
2387  {
2388  for (unsigned int i=0; i<n_chunks; ++i)
2389  {
2390  __m128d u0 = in[2*i+0].data;
2391  __m128d u1 = in[2*i+1].data;
2392  __m128d res0 = _mm_unpacklo_pd (u0, u1);
2393  __m128d res1 = _mm_unpackhi_pd (u0, u1);
2394  _mm_storeu_pd(out+2*i+offsets[0], res0);
2395  _mm_storeu_pd(out+2*i+offsets[1], res1);
2396  }
2397  for (unsigned int i=2*n_chunks; i<n_entries; ++i)
2398  for (unsigned int v=0; v<2; ++v)
2399  out[offsets[v]+i] = in[i][v];
2400  }
2401 }
2402 
2403 
2404 
2408 template <>
2409 class VectorizedArray<float>
2410 {
2411 public:
2415  static const unsigned int n_array_elements = 4;
2416 
2421  DEAL_II_ALWAYS_INLINE
2422  VectorizedArray &
2423  operator = (const float x)
2424  {
2425  data = _mm_set1_ps(x);
2426  return *this;
2427  }
2428 
2432  DEAL_II_ALWAYS_INLINE
2433  float &
2434  operator [] (const unsigned int comp)
2435  {
2436  AssertIndexRange (comp, 4);
2437  return *(reinterpret_cast<float *>(&data)+comp);
2438  }
2439 
2443  DEAL_II_ALWAYS_INLINE
2444  const float &
2445  operator [] (const unsigned int comp) const
2446  {
2447  AssertIndexRange (comp, 4);
2448  return *(reinterpret_cast<const float *>(&data)+comp);
2449  }
2450 
2454  DEAL_II_ALWAYS_INLINE
2455  VectorizedArray &
2456  operator += (const VectorizedArray &vec)
2457  {
2458 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2459  data += vec.data;
2460 #else
2461  data = _mm_add_ps(data,vec.data);
2462 #endif
2463  return *this;
2464  }
2465 
2469  DEAL_II_ALWAYS_INLINE
2470  VectorizedArray &
2471  operator -= (const VectorizedArray &vec)
2472  {
2473 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2474  data -= vec.data;
2475 #else
2476  data = _mm_sub_ps(data,vec.data);
2477 #endif
2478  return *this;
2479  }
2480 
2484  DEAL_II_ALWAYS_INLINE
2485  VectorizedArray &
2486  operator *= (const VectorizedArray &vec)
2487  {
2488 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2489  data *= vec.data;
2490 #else
2491  data = _mm_mul_ps(data,vec.data);
2492 #endif
2493  return *this;
2494  }
2495 
2499  DEAL_II_ALWAYS_INLINE
2500  VectorizedArray &
2501  operator /= (const VectorizedArray &vec)
2502  {
2503 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2504  data /= vec.data;
2505 #else
2506  data = _mm_div_ps(data,vec.data);
2507 #endif
2508  return *this;
2509  }
2510 
2516  DEAL_II_ALWAYS_INLINE
2517  void load (const float *ptr)
2518  {
2519  data = _mm_loadu_ps (ptr);
2520  }
2521 
2528  DEAL_II_ALWAYS_INLINE
2529  void store (float *ptr) const
2530  {
2531  _mm_storeu_ps (ptr, data);
2532  }
2533 
2546  DEAL_II_ALWAYS_INLINE
2547  void gather (const float *base_ptr,
2548  const unsigned int *offsets)
2549  {
2550  for (unsigned int i=0; i<4; ++i)
2551  *(reinterpret_cast<float *>(&data)+i) = base_ptr[offsets[i]];
2552  }
2553 
2566  DEAL_II_ALWAYS_INLINE
2567  void scatter (const unsigned int *offsets,
2568  float *base_ptr) const
2569  {
2570  for (unsigned int i=0; i<4; ++i)
2571  for (unsigned int j=i+1; j<4; ++j)
2572  Assert(offsets[i] != offsets[j],
2573  ExcMessage("Result of scatter undefined if two offset elements"
2574  " point to the same position"));
2575 
2576  for (unsigned int i=0; i<4; ++i)
2577  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data)+i);
2578  }
2579 
2584  __m128 data;
2585 
2586 private:
2591  DEAL_II_ALWAYS_INLINE
2593  get_sqrt () const
2594  {
2595  VectorizedArray res;
2596  res.data = _mm_sqrt_ps(data);
2597  return res;
2598  }
2599 
2604  DEAL_II_ALWAYS_INLINE
2606  get_abs () const
2607  {
2608  // to compute the absolute value, perform bitwise andnot with -0. This
2609  // will leave all value and exponent bits unchanged but force the sign
2610  // value to +.
2611  __m128 mask = _mm_set1_ps (-0.f);
2612  VectorizedArray res;
2613  res.data = _mm_andnot_ps(mask, data);
2614  return res;
2615  }
2616 
2621  DEAL_II_ALWAYS_INLINE
2623  get_max (const VectorizedArray &other) const
2624  {
2625  VectorizedArray res;
2626  res.data = _mm_max_ps (data, other.data);
2627  return res;
2628  }
2629 
2634  DEAL_II_ALWAYS_INLINE
2636  get_min (const VectorizedArray &other) const
2637  {
2638  VectorizedArray res;
2639  res.data = _mm_min_ps (data, other.data);
2640  return res;
2641  }
2642 
2646  template <typename Number2> friend VectorizedArray<Number2>
2647  std::sqrt (const VectorizedArray<Number2> &);
2648  template <typename Number2> friend VectorizedArray<Number2>
2649  std::abs (const VectorizedArray<Number2> &);
2650  template <typename Number2> friend VectorizedArray<Number2>
2651  std::max (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
2652  template <typename Number2> friend VectorizedArray<Number2>
2653  std::min (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
2654 };
2655 
2656 
2657 
2661 template <>
2662 inline
2663 void vectorized_load_and_transpose(const unsigned int n_entries,
2664  const float *in,
2665  const unsigned int *offsets,
2667 {
2668  const unsigned int n_chunks = n_entries/4;
2669  for (unsigned int i=0; i<n_chunks; ++i)
2670  {
2671  __m128 u0 = _mm_loadu_ps(in+4*i+offsets[0]);
2672  __m128 u1 = _mm_loadu_ps(in+4*i+offsets[1]);
2673  __m128 u2 = _mm_loadu_ps(in+4*i+offsets[2]);
2674  __m128 u3 = _mm_loadu_ps(in+4*i+offsets[3]);
2675  __m128 v0 = _mm_shuffle_ps (u0, u1, 0x44);
2676  __m128 v1 = _mm_shuffle_ps (u0, u1, 0xee);
2677  __m128 v2 = _mm_shuffle_ps (u2, u3, 0x44);
2678  __m128 v3 = _mm_shuffle_ps (u2, u3, 0xee);
2679  out[4*i+0].data = _mm_shuffle_ps (v0, v2, 0x88);
2680  out[4*i+1].data = _mm_shuffle_ps (v0, v2, 0xdd);
2681  out[4*i+2].data = _mm_shuffle_ps (v1, v3, 0x88);
2682  out[4*i+3].data = _mm_shuffle_ps (v1, v3, 0xdd);
2683  }
2684  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
2685  for (unsigned int v=0; v<4; ++v)
2686  out[i][v] = in[offsets[v]+i];
2687 }
2688 
2689 
2690 
2694 template <>
2695 inline
2696 void
2697 vectorized_transpose_and_store(const bool add_into,
2698  const unsigned int n_entries,
2699  const VectorizedArray<float> *in,
2700  const unsigned int *offsets,
2701  float *out)
2702 {
2703  const unsigned int n_chunks = n_entries/4;
2704  for (unsigned int i=0; i<n_chunks; ++i)
2705  {
2706  __m128 u0 = in[4*i+0].data;
2707  __m128 u1 = in[4*i+1].data;
2708  __m128 u2 = in[4*i+2].data;
2709  __m128 u3 = in[4*i+3].data;
2710  __m128 t0 = _mm_shuffle_ps (u0, u1, 0x44);
2711  __m128 t1 = _mm_shuffle_ps (u0, u1, 0xee);
2712  __m128 t2 = _mm_shuffle_ps (u2, u3, 0x44);
2713  __m128 t3 = _mm_shuffle_ps (u2, u3, 0xee);
2714  u0 = _mm_shuffle_ps (t0, t2, 0x88);
2715  u1 = _mm_shuffle_ps (t0, t2, 0xdd);
2716  u2 = _mm_shuffle_ps (t1, t3, 0x88);
2717  u3 = _mm_shuffle_ps (t1, t3, 0xdd);
2718 
2719  // Cannot use the same store instructions in both paths of the 'if'
2720  // because the compiler cannot know that there is no aliasing between
2721  // pointers
2722  if (add_into)
2723  {
2724  u0 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[0]), u0);
2725  _mm_storeu_ps(out+4*i+offsets[0], u0);
2726  u1 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[1]), u1);
2727  _mm_storeu_ps(out+4*i+offsets[1], u1);
2728  u2 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[2]), u2);
2729  _mm_storeu_ps(out+4*i+offsets[2], u2);
2730  u3 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[3]), u3);
2731  _mm_storeu_ps(out+4*i+offsets[3], u3);
2732  }
2733  else
2734  {
2735  _mm_storeu_ps(out+4*i+offsets[0], u0);
2736  _mm_storeu_ps(out+4*i+offsets[1], u1);
2737  _mm_storeu_ps(out+4*i+offsets[2], u2);
2738  _mm_storeu_ps(out+4*i+offsets[3], u3);
2739  }
2740  }
2741  if (add_into)
2742  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
2743  for (unsigned int v=0; v<4; ++v)
2744  out[offsets[v]+i] += in[i][v];
2745  else
2746  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
2747  for (unsigned int v=0; v<4; ++v)
2748  out[offsets[v]+i] = in[i][v];
2749 }
2750 
2751 
2752 
2753 #endif // if DEAL_II_COMPILER_VECTORIZATION_LEVEL > 0
2754 
2755 
2761 template <typename Number>
2762 inline DEAL_II_ALWAYS_INLINE
2765  const VectorizedArray<Number> &v)
2766 {
2767  VectorizedArray<Number> tmp = u;
2768  return tmp+=v;
2769 }
2770 
2776 template <typename Number>
2777 inline DEAL_II_ALWAYS_INLINE
2780  const VectorizedArray<Number> &v)
2781 {
2782  VectorizedArray<Number> tmp = u;
2783  return tmp-=v;
2784 }
2785 
2791 template <typename Number>
2792 inline DEAL_II_ALWAYS_INLINE
2795  const VectorizedArray<Number> &v)
2796 {
2797  VectorizedArray<Number> tmp = u;
2798  return tmp*=v;
2799 }
2800 
2806 template <typename Number>
2807 inline DEAL_II_ALWAYS_INLINE
2810  const VectorizedArray<Number> &v)
2811 {
2812  VectorizedArray<Number> tmp = u;
2813  return tmp/=v;
2814 }
2815 
2822 template <typename Number>
2823 inline DEAL_II_ALWAYS_INLINE
2825 operator + (const Number &u,
2826  const VectorizedArray<Number> &v)
2827 {
2829  tmp = u;
2830  return tmp+=v;
2831 }
2832 
2841 inline DEAL_II_ALWAYS_INLINE
2843 operator + (const double &u,
2844  const VectorizedArray<float> &v)
2845 {
2847  tmp = u;
2848  return tmp+=v;
2849 }
2850 
2857 template <typename Number>
2858 inline DEAL_II_ALWAYS_INLINE
2861  const Number &u)
2862 {
2863  return u + v;
2864 }
2865 
2874 inline DEAL_II_ALWAYS_INLINE
2877  const double &u)
2878 {
2879  return u + v;
2880 }
2881 
2888 template <typename Number>
2889 inline DEAL_II_ALWAYS_INLINE
2891 operator - (const Number &u,
2892  const VectorizedArray<Number> &v)
2893 {
2895  tmp = u;
2896  return tmp-=v;
2897 }
2898 
2907 inline DEAL_II_ALWAYS_INLINE
2909 operator - (const double &u,
2910  const VectorizedArray<float> &v)
2911 {
2913  tmp = float(u);
2914  return tmp-=v;
2915 }
2916 
2923 template <typename Number>
2924 inline DEAL_II_ALWAYS_INLINE
2927  const Number &u)
2928 {
2930  tmp = u;
2931  return v-tmp;
2932 }
2933 
2942 inline DEAL_II_ALWAYS_INLINE
2945  const double &u)
2946 {
2948  tmp = float(u);
2949  return v-tmp;
2950 }
2951 
2958 template <typename Number>
2959 inline DEAL_II_ALWAYS_INLINE
2961 operator * (const Number &u,
2962  const VectorizedArray<Number> &v)
2963 {
2965  tmp = u;
2966  return tmp*=v;
2967 }
2968 
2977 inline DEAL_II_ALWAYS_INLINE
2979 operator * (const double &u,
2980  const VectorizedArray<float> &v)
2981 {
2983  tmp = float(u);
2984  return tmp*=v;
2985 }
2986 
2993 template <typename Number>
2994 inline DEAL_II_ALWAYS_INLINE
2997  const Number &u)
2998 {
2999  return u * v;
3000 }
3001 
3010 inline DEAL_II_ALWAYS_INLINE
3013  const double &u)
3014 {
3015  return u * v;
3016 }
3017 
3024 template <typename Number>
3025 inline DEAL_II_ALWAYS_INLINE
3027 operator / (const Number &u,
3028  const VectorizedArray<Number> &v)
3029 {
3031  tmp = u;
3032  return tmp/=v;
3033 }
3034 
3043 inline DEAL_II_ALWAYS_INLINE
3045 operator / (const double &u,
3046  const VectorizedArray<float> &v)
3047 {
3049  tmp = float(u);
3050  return tmp/=v;
3051 }
3052 
3059 template <typename Number>
3060 inline DEAL_II_ALWAYS_INLINE
3063  const Number &u)
3064 {
3066  tmp = u;
3067  return v/tmp;
3068 }
3069 
3078 inline DEAL_II_ALWAYS_INLINE
3081  const double &u)
3082 {
3084  tmp = float(u);
3085  return v/tmp;
3086 }
3087 
3093 template <typename Number>
3094 inline DEAL_II_ALWAYS_INLINE
3097 {
3098  return u;
3099 }
3100 
3106 template <typename Number>
3107 inline DEAL_II_ALWAYS_INLINE
3110 {
3111  // to get a negative sign, subtract the input from zero (could also
3112  // multiply by -1, but this one is slightly simpler)
3113  return VectorizedArray<Number>()-u;
3114 }
3115 
3116 
3117 DEAL_II_NAMESPACE_CLOSE
3118 
3119 
3126 namespace std
3127 {
3135  template <typename Number>
3136  inline
3137  ::VectorizedArray<Number>
3138  sin (const ::VectorizedArray<Number> &x)
3139  {
3140  // put values in an array and later read in that array with an unaligned
3141  // read. This should save some instructions as compared to directly
3142  // setting the individual elements and also circumvents a compiler
3143  // optimization bug in gcc-4.6 with SSE2 (see also deal.II developers list
3144  // from April 2014, topic "matrix_free/step-48 Test").
3146  for (unsigned int i=0; i<::VectorizedArray<Number>::n_array_elements; ++i)
3147  values[i] = std::sin(x[i]);
3149  out.load(&values[0]);
3150  return out;
3151  }
3152 
3153 
3154 
3162  template <typename Number>
3163  inline
3164  ::VectorizedArray<Number>
3165  cos (const ::VectorizedArray<Number> &x)
3166  {
3168  for (unsigned int i=0; i<::VectorizedArray<Number>::n_array_elements; ++i)
3169  values[i] = std::cos(x[i]);
3171  out.load(&values[0]);
3172  return out;
3173  }
3174 
3175 
3176 
3184  template <typename Number>
3185  inline
3186  ::VectorizedArray<Number>
3187  tan (const ::VectorizedArray<Number> &x)
3188  {
3190  for (unsigned int i=0; i<::VectorizedArray<Number>::n_array_elements; ++i)
3191  values[i] = std::tan(x[i]);
3193  out.load(&values[0]);
3194  return out;
3195  }
3196 
3197 
3198 
3206  template <typename Number>
3207  inline
3208  ::VectorizedArray<Number>
3209  exp (const ::VectorizedArray<Number> &x)
3210  {
3212  for (unsigned int i=0; i<::VectorizedArray<Number>::n_array_elements; ++i)
3213  values[i] = std::exp(x[i]);
3215  out.load(&values[0]);
3216  return out;
3217  }
3218 
3219 
3220 
3228  template <typename Number>
3229  inline
3230  ::VectorizedArray<Number>
3231  log (const ::VectorizedArray<Number> &x)
3232  {
3234  for (unsigned int i=0; i<::VectorizedArray<Number>::n_array_elements; ++i)
3235  values[i] = std::log(x[i]);
3237  out.load(&values[0]);
3238  return out;
3239  }
3240 
3241 
3242 
3250  template <typename Number>
3251  inline
3252  ::VectorizedArray<Number>
3253  sqrt (const ::VectorizedArray<Number> &x)
3254  {
3255  return x.get_sqrt();
3256  }
3257 
3258 
3259 
3267  template <typename Number>
3268  inline
3269  ::VectorizedArray<Number>
3270  pow (const ::VectorizedArray<Number> &x,
3271  const Number p)
3272  {
3274  for (unsigned int i=0; i<::VectorizedArray<Number>::n_array_elements; ++i)
3275  values[i] = std::pow(x[i], p);
3277  out.load(&values[0]);
3278  return out;
3279  }
3280 
3281 
3282 
3290  template <typename Number>
3291  inline
3292  ::VectorizedArray<Number>
3293  abs (const ::VectorizedArray<Number> &x)
3294  {
3295  return x.get_abs();
3296  }
3297 
3298 
3299 
3307  template <typename Number>
3308  inline
3309  ::VectorizedArray<Number>
3310  max (const ::VectorizedArray<Number> &x,
3311  const ::VectorizedArray<Number> &y)
3312  {
3313  return x.get_max(y);
3314  }
3315 
3316 
3317 
3325  template <typename Number>
3326  inline
3327  ::VectorizedArray<Number>
3328  min (const ::VectorizedArray<Number> &x,
3329  const ::VectorizedArray<Number> &y)
3330  {
3331  return x.get_min(y);
3332  }
3333 
3334 }
3335 
3336 #endif
DEAL_II_ALWAYS_INLINE VectorizedArray get_sqrt() const
DEAL_II_ALWAYS_INLINE VectorizedArray get_max(const VectorizedArray &other) const
VectorizedArray< Number > log(const ::VectorizedArray< Number > &x)
Tensor< rank, dim, typename ProductType< Number, OtherNumber >::type > operator+(const SymmetricTensor< rank, dim, Number > &left, const Tensor< rank, dim, OtherNumber > &right)
DEAL_II_ALWAYS_INLINE VectorizedArray & operator+=(const VectorizedArray< Number > &vec)
SymmetricTensor< rank, dim, Number > operator/(const SymmetricTensor< rank, dim, Number > &t, const Number factor)
DEAL_II_ALWAYS_INLINE Number & operator[](const unsigned int comp)
VectorizedArray< Number > tan(const ::VectorizedArray< Number > &x)
#define AssertIndexRange(index, range)
Definition: exceptions.h:1170
STL namespace.
VectorizedArray< Number > exp(const ::VectorizedArray< Number > &x)
DEAL_II_ALWAYS_INLINE void load(const Number *ptr)
void vectorized_transpose_and_store(const bool add_into, const unsigned int n_entries, const VectorizedArray< Number > *in, const unsigned int *offsets, Number *out)
DEAL_II_ALWAYS_INLINE VectorizedArray get_abs() const
static ::ExceptionBase & ExcMessage(std::string arg1)
static const unsigned int n_array_elements
VectorizedArray< Number > min(const ::VectorizedArray< Number > &x, const ::VectorizedArray< Number > &y)
#define Assert(cond, exc)
Definition: exceptions.h:313
DEAL_II_ALWAYS_INLINE void store(Number *ptr) const
DEAL_II_ALWAYS_INLINE void scatter(const unsigned int *offsets, Number *base_ptr) const
Tensor< rank, dim, typename ProductType< Number, OtherNumber >::type > operator-(const SymmetricTensor< rank, dim, Number > &left, const Tensor< rank, dim, OtherNumber > &right)
VectorizedArray< Number > pow(const ::VectorizedArray< Number > &x, const Number p)
void vectorized_load_and_transpose(const unsigned int n_entries, const Number *in, const unsigned int *offsets, VectorizedArray< Number > *out)
DEAL_II_ALWAYS_INLINE VectorizedArray & operator*=(const VectorizedArray< Number > &vec)
VectorizedArray< Number > sqrt(const ::VectorizedArray< Number > &x)
DEAL_II_ALWAYS_INLINE VectorizedArray & operator-=(const VectorizedArray< Number > &vec)
VectorizedArray< Number > sin(const ::VectorizedArray< Number > &x)
DEAL_II_ALWAYS_INLINE void gather(const Number *base_ptr, const unsigned int *offsets)
DEAL_II_ALWAYS_INLINE VectorizedArray get_min(const VectorizedArray &other) const
DEAL_II_ALWAYS_INLINE VectorizedArray & operator/=(const VectorizedArray< Number > &vec)
DEAL_II_ALWAYS_INLINE VectorizedArray & operator=(const Number scalar)
Point< dim, typename ProductType< Number, typename EnableIfScalar< OtherNumber >::type >::type > operator*(const OtherNumber) const
VectorizedArray< Number > abs(const ::VectorizedArray< Number > &x)
VectorizedArray< Number > max(const ::VectorizedArray< Number > &x, const ::VectorizedArray< Number > &y)
VectorizedArray< Number > cos(const ::VectorizedArray< Number > &x)
DEAL_II_ALWAYS_INLINE VectorizedArray< Number > make_vectorized_array(const Number &u)