Reference documentation for deal.II version 9.0.0
vectorization.h
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2011 - 2018 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE at
12 // the top level of the deal.II distribution.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #ifndef dealii_vectorization_h
18 #define dealii_vectorization_h
19 
20 #include <deal.II/base/config.h>
21 #include <deal.II/base/exceptions.h>
22 #include <deal.II/base/template_constraints.h>
23 
24 #include <cmath>
25 
26 // Note:
27 // The flag DEAL_II_COMPILER_VECTORIZATION_LEVEL is essentially constructed
28 // according to the following scheme
29 // #ifdef __AVX512F__
30 // #define DEAL_II_COMPILER_VECTORIZATION_LEVEL 3
31 // #elif defined (__AVX__)
32 // #define DEAL_II_COMPILER_VECTORIZATION_LEVEL 2
33 // #elif defined (__SSE2__)
34 // #define DEAL_II_COMPILER_VECTORIZATION_LEVEL 1
35 // #else
36 // #define DEAL_II_COMPILER_VECTORIZATION_LEVEL 0
37 // #endif
38 // In addition to checking the flags __AVX__ and __SSE2__, a CMake test,
39 // 'check_01_cpu_features.cmake', ensures that these feature are not only
40 // present in the compilation unit but also working properly.
41 
42 #if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 2 && !defined(__AVX__)
43 #error "Mismatch in vectorization capabilities: AVX was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native."
44 #endif
45 #if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 3 && !defined(__AVX512F__)
46 #error "Mismatch in vectorization capabilities: AVX-512F was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native."
47 #endif
48 
49 #if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 2 // AVX, AVX-512
50 #include <immintrin.h>
51 #elif DEAL_II_COMPILER_VECTORIZATION_LEVEL == 1 // SSE2
52 #include <emmintrin.h>
53 #endif
54 
55 
56 DEAL_II_NAMESPACE_OPEN
57 
58 
59 namespace internal
60 {
71  template <typename T>
73  {
74  static const VectorizedArray<T> &value (const VectorizedArray<T> &t)
75  {
76  return t;
77  }
78 
79  static VectorizedArray<T> value (const T &t)
80  {
82  tmp=t;
83  return tmp;
84  }
85  };
86 }
87 
88 
89 // Enable the EnableIfScalar type trait for VectorizedArray<Number> such
90 // that it can be used as a Number type in Tensor<rank,dim,Number>, etc.
91 
92 template <typename Number>
93 struct EnableIfScalar<VectorizedArray<Number> >
94 {
96 };
97 
98 
99 
150 template <typename Number>
151 class VectorizedArray
152 {
153 public:
157  static const unsigned int n_array_elements = 1;
158 
159  // POD means that there should be no user-defined constructors, destructors
160  // and copy functions (the standard is somewhat relaxed in C++2011, though).
161 
165  DEAL_II_ALWAYS_INLINE
167  operator = (const Number scalar)
168  {
169  data = scalar;
170  return *this;
171  }
172 
176  DEAL_II_ALWAYS_INLINE
177  Number &
178  operator [] (const unsigned int comp)
179  {
180  (void)comp;
181  AssertIndexRange (comp, 1);
182  return data;
183  }
184 
188  DEAL_II_ALWAYS_INLINE
189  const Number &
190  operator [] (const unsigned int comp) const
191  {
192  (void)comp;
193  AssertIndexRange (comp, 1);
194  return data;
195  }
196 
200  DEAL_II_ALWAYS_INLINE
203  {
204  data+=vec.data;
205  return *this;
206  }
207 
211  DEAL_II_ALWAYS_INLINE
214  {
215  data-=vec.data;
216  return *this;
217  }
218 
222  DEAL_II_ALWAYS_INLINE
225  {
226  data*=vec.data;
227  return *this;
228  }
229 
233  DEAL_II_ALWAYS_INLINE
236  {
237  data/=vec.data;
238  return *this;
239  }
240 
247  DEAL_II_ALWAYS_INLINE
248  void load (const Number *ptr)
249  {
250  data = *ptr;
251  }
252 
259  DEAL_II_ALWAYS_INLINE
260  void store (Number *ptr) const
261  {
262  *ptr = data;
263  }
264 
309  DEAL_II_ALWAYS_INLINE
310  void streaming_store (Number *ptr) const
311  {
312  *ptr = data;
313  }
314 
327  DEAL_II_ALWAYS_INLINE
328  void gather (const Number *base_ptr,
329  const unsigned int *offsets)
330  {
331  data = base_ptr[offsets[0]];
332  }
333 
346  DEAL_II_ALWAYS_INLINE
347  void scatter (const unsigned int *offsets,
348  Number *base_ptr) const
349  {
350  base_ptr[offsets[0]] = data;
351  }
352 
357  Number data;
358 
359 private:
364  DEAL_II_ALWAYS_INLINE
366  get_sqrt () const
367  {
368  VectorizedArray res;
369  res.data = std::sqrt(data);
370  return res;
371  }
372 
377  DEAL_II_ALWAYS_INLINE
379  get_abs () const
380  {
381  VectorizedArray res;
382  res.data = std::fabs(data);
383  return res;
384  }
385 
390  DEAL_II_ALWAYS_INLINE
392  get_max (const VectorizedArray &other) const
393  {
394  VectorizedArray res;
395  res.data = std::max (data, other.data);
396  return res;
397  }
398 
403  DEAL_II_ALWAYS_INLINE
405  get_min (const VectorizedArray &other) const
406  {
407  VectorizedArray res;
408  res.data = std::min (data, other.data);
409  return res;
410  }
411 
415  template <typename Number2> friend VectorizedArray<Number2>
416  std::sqrt (const VectorizedArray<Number2> &);
417  template <typename Number2> friend VectorizedArray<Number2>
418  std::abs (const VectorizedArray<Number2> &);
419  template <typename Number2> friend VectorizedArray<Number2>
420  std::max (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
421  template <typename Number2> friend VectorizedArray<Number2>
422  std::min (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
423 };
424 
425 
426 
433 template <typename Number>
434 inline DEAL_II_ALWAYS_INLINE
436 make_vectorized_array (const Number &u)
437 {
439  result = u;
440  return result;
441 }
442 
443 
444 
470 template <typename Number>
471 inline
472 void
473 vectorized_load_and_transpose(const unsigned int n_entries,
474  const Number *in,
475  const unsigned int *offsets,
477 {
478  for (unsigned int i=0; i<n_entries; ++i)
479  for (unsigned int v=0; v<VectorizedArray<Number>::n_array_elements; ++v)
480  out[i][v] = in[offsets[v]+i];
481 }
482 
483 
484 
523 template <typename Number>
524 inline
525 void
526 vectorized_transpose_and_store(const bool add_into,
527  const unsigned int n_entries,
528  const VectorizedArray<Number> *in,
529  const unsigned int *offsets,
530  Number *out)
531 {
532  if (add_into)
533  for (unsigned int i=0; i<n_entries; ++i)
534  for (unsigned int v=0; v<VectorizedArray<Number>::n_array_elements; ++v)
535  out[offsets[v]+i] += in[i][v];
536  else
537  for (unsigned int i=0; i<n_entries; ++i)
538  for (unsigned int v=0; v<VectorizedArray<Number>::n_array_elements; ++v)
539  out[offsets[v]+i] = in[i][v];
540 }
541 
542 
543 
544 // for safety, also check that __AVX512F__ is defined in case the user manually
545 // set some conflicting compile flags which prevent compilation
546 
547 #if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 3 && defined(__AVX512F__)
548 
552 template <>
553 class VectorizedArray<double>
554 {
555 public:
559  static const unsigned int n_array_elements = 8;
560 
564  DEAL_II_ALWAYS_INLINE
566  operator = (const double x)
567  {
568  data = _mm512_set1_pd(x);
569  return *this;
570  }
571 
575  DEAL_II_ALWAYS_INLINE
576  double &
577  operator [] (const unsigned int comp)
578  {
579  AssertIndexRange (comp, 8);
580  return *(reinterpret_cast<double *>(&data)+comp);
581  }
582 
586  DEAL_II_ALWAYS_INLINE
587  const double &
588  operator [] (const unsigned int comp) const
589  {
590  AssertIndexRange (comp, 8);
591  return *(reinterpret_cast<const double *>(&data)+comp);
592  }
593 
597  DEAL_II_ALWAYS_INLINE
599  operator += (const VectorizedArray &vec)
600  {
601  // if the compiler supports vector arithmetics, we can simply use +=
602  // operator on the given data type. this allows the compiler to combine
603  // additions with multiplication (fused multiply-add) if those
604  // instructions are available. Otherwise, we need to use the built-in
605  // intrinsic command for __m512d
606 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
607  data += vec.data;
608 #else
609  data = _mm512_add_pd(data,vec.data);
610 #endif
611  return *this;
612  }
613 
617  DEAL_II_ALWAYS_INLINE
619  operator -= (const VectorizedArray &vec)
620  {
621 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
622  data -= vec.data;
623 #else
624  data = _mm512_sub_pd(data,vec.data);
625 #endif
626  return *this;
627  }
631  DEAL_II_ALWAYS_INLINE
633  operator *= (const VectorizedArray &vec)
634  {
635 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
636  data *= vec.data;
637 #else
638  data = _mm512_mul_pd(data,vec.data);
639 #endif
640  return *this;
641  }
642 
646  DEAL_II_ALWAYS_INLINE
648  operator /= (const VectorizedArray &vec)
649  {
650 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
651  data /= vec.data;
652 #else
653  data = _mm512_div_pd(data,vec.data);
654 #endif
655  return *this;
656  }
657 
663  DEAL_II_ALWAYS_INLINE
664  void load (const double *ptr)
665  {
666  data = _mm512_loadu_pd (ptr);
667  }
668 
675  DEAL_II_ALWAYS_INLINE
676  void store (double *ptr) const
677  {
678  _mm512_storeu_pd (ptr, data);
679  }
680 
684  DEAL_II_ALWAYS_INLINE
685  void streaming_store (double *ptr) const
686  {
687  Assert(reinterpret_cast<std::size_t>(ptr)%64==0, ExcMessage("Memory not aligned"));
688  _mm512_stream_pd(ptr,data);
689  }
690 
703  DEAL_II_ALWAYS_INLINE
704  void gather (const double *base_ptr,
705  const unsigned int *offsets)
706  {
707  // unfortunately, there does not appear to be a 256 bit integer load, so
708  // do it by some reinterpret casts here. this is allowed because the Intel
709  // API allows aliasing between different vector types.
710  const __m256 index_val = _mm256_loadu_ps((const float *)offsets);
711  const __m256i index = *((__m256i *)(&index_val));
712  data = _mm512_i32gather_pd(index, base_ptr, 8);
713  }
714 
727  DEAL_II_ALWAYS_INLINE
728  void scatter (const unsigned int *offsets,
729  double *base_ptr) const
730  {
731  for (unsigned int i=0; i<8; ++i)
732  for (unsigned int j=i+1; j<8; ++j)
733  Assert(offsets[i] != offsets[j],
734  ExcMessage("Result of scatter undefined if two offset elements"
735  " point to the same position"));
736 
737  // unfortunately, there does not appear to be a 256 bit integer load, so
738  // do it by some reinterpret casts here. this is allowed because the Intel
739  // API allows aliasing between different vector types.
740  const __m256 index_val = _mm256_loadu_ps((const float *)offsets);
741  const __m256i index = *((__m256i *)(&index_val));
742  _mm512_i32scatter_pd(base_ptr, index, data, 8);
743  }
744 
749  __m512d data;
750 
751 private:
756  DEAL_II_ALWAYS_INLINE
758  get_sqrt () const
759  {
760  VectorizedArray res;
761  res.data = _mm512_sqrt_pd(data);
762  return res;
763  }
764 
769  DEAL_II_ALWAYS_INLINE
771  get_abs () const
772  {
773  // to compute the absolute value, perform bitwise andnot with -0. This
774  // will leave all value and exponent bits unchanged but force the sign
775  // value to +. Since there is no andnot for AVX512, we interpret the data
776  // as 64 bit integers and do the andnot on those types (note that andnot
777  // is a bitwise operation so the data type does not matter)
778  __m512d mask = _mm512_set1_pd (-0.);
779  VectorizedArray res;
780  res.data = (__m512d)_mm512_andnot_epi64 ((__m512i)mask, (__m512i)data);
781  return res;
782  }
783 
788  DEAL_II_ALWAYS_INLINE
790  get_max (const VectorizedArray &other) const
791  {
792  VectorizedArray res;
793  res.data = _mm512_max_pd (data, other.data);
794  return res;
795  }
796 
801  DEAL_II_ALWAYS_INLINE
803  get_min (const VectorizedArray &other) const
804  {
805  VectorizedArray res;
806  res.data = _mm512_min_pd (data, other.data);
807  return res;
808  }
809 
813  template <typename Number2> friend VectorizedArray<Number2>
814  std::sqrt (const VectorizedArray<Number2> &);
815  template <typename Number2> friend VectorizedArray<Number2>
816  std::abs (const VectorizedArray<Number2> &);
817  template <typename Number2> friend VectorizedArray<Number2>
818  std::max (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
819  template <typename Number2> friend VectorizedArray<Number2>
820  std::min (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
821 };
822 
823 
824 
828 template <>
829 inline
830 void
831 vectorized_load_and_transpose(const unsigned int n_entries,
832  const double *in,
833  const unsigned int *offsets,
835 {
836  const unsigned int n_chunks = n_entries/4;
837  for (unsigned int outer=0; outer<8; outer += 4)
838  {
839  const double *in0 = in + offsets[0+outer];
840  const double *in1 = in + offsets[1+outer];
841  const double *in2 = in + offsets[2+outer];
842  const double *in3 = in + offsets[3+outer];
843 
844  for (unsigned int i=0; i<n_chunks; ++i)
845  {
846  __m256d u0 = _mm256_loadu_pd(in0+4*i);
847  __m256d u1 = _mm256_loadu_pd(in1+4*i);
848  __m256d u2 = _mm256_loadu_pd(in2+4*i);
849  __m256d u3 = _mm256_loadu_pd(in3+4*i);
850  __m256d t0 = _mm256_permute2f128_pd (u0, u2, 0x20);
851  __m256d t1 = _mm256_permute2f128_pd (u1, u3, 0x20);
852  __m256d t2 = _mm256_permute2f128_pd (u0, u2, 0x31);
853  __m256d t3 = _mm256_permute2f128_pd (u1, u3, 0x31);
854  *(__m256d *)((double *)(&out[4*i+0].data)+outer) = _mm256_unpacklo_pd (t0, t1);
855  *(__m256d *)((double *)(&out[4*i+1].data)+outer) = _mm256_unpackhi_pd (t0, t1);
856  *(__m256d *)((double *)(&out[4*i+2].data)+outer) = _mm256_unpacklo_pd (t2, t3);
857  *(__m256d *)((double *)(&out[4*i+3].data)+outer) = _mm256_unpackhi_pd (t2, t3);
858  }
859  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
860  for (unsigned int v=0; v<4; ++v)
861  out[i][outer+v] = in[offsets[v+outer]+i];
862  }
863 }
864 
865 
866 
870 template <>
871 inline
872 void
873 vectorized_transpose_and_store(const bool add_into,
874  const unsigned int n_entries,
875  const VectorizedArray<double> *in,
876  const unsigned int *offsets,
877  double *out)
878 {
879  const unsigned int n_chunks = n_entries/4;
880  // do not do full transpose because the code is too long and will most
881  // likely not pay off. rather do the transposition on the vectorized array
882  // on size smaller, mm256d
883  for (unsigned int outer=0; outer<8; outer += 4)
884  {
885  double *out0 = out + offsets[0+outer];
886  double *out1 = out + offsets[1+outer];
887  double *out2 = out + offsets[2+outer];
888  double *out3 = out + offsets[3+outer];
889  for (unsigned int i=0; i<n_chunks; ++i)
890  {
891  __m256d u0 = *(const __m256d *)((const double *)(&in[4*i+0].data)+outer);
892  __m256d u1 = *(const __m256d *)((const double *)(&in[4*i+1].data)+outer);
893  __m256d u2 = *(const __m256d *)((const double *)(&in[4*i+2].data)+outer);
894  __m256d u3 = *(const __m256d *)((const double *)(&in[4*i+3].data)+outer);
895  __m256d t0 = _mm256_permute2f128_pd (u0, u2, 0x20);
896  __m256d t1 = _mm256_permute2f128_pd (u1, u3, 0x20);
897  __m256d t2 = _mm256_permute2f128_pd (u0, u2, 0x31);
898  __m256d t3 = _mm256_permute2f128_pd (u1, u3, 0x31);
899  __m256d res0 = _mm256_unpacklo_pd (t0, t1);
900  __m256d res1 = _mm256_unpackhi_pd (t0, t1);
901  __m256d res2 = _mm256_unpacklo_pd (t2, t3);
902  __m256d res3 = _mm256_unpackhi_pd (t2, t3);
903 
904  // Cannot use the same store instructions in both paths of the 'if'
905  // because the compiler cannot know that there is no aliasing between
906  // pointers
907  if (add_into)
908  {
909  res0 = _mm256_add_pd(_mm256_loadu_pd(out0+4*i), res0);
910  _mm256_storeu_pd(out0+4*i, res0);
911  res1 = _mm256_add_pd(_mm256_loadu_pd(out1+4*i), res1);
912  _mm256_storeu_pd(out1+4*i, res1);
913  res2 = _mm256_add_pd(_mm256_loadu_pd(out2+4*i), res2);
914  _mm256_storeu_pd(out2+4*i, res2);
915  res3 = _mm256_add_pd(_mm256_loadu_pd(out3+4*i), res3);
916  _mm256_storeu_pd(out3+4*i, res3);
917  }
918  else
919  {
920  _mm256_storeu_pd(out0+4*i, res0);
921  _mm256_storeu_pd(out1+4*i, res1);
922  _mm256_storeu_pd(out2+4*i, res2);
923  _mm256_storeu_pd(out3+4*i, res3);
924  }
925  }
926  if (add_into)
927  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
928  for (unsigned int v=0; v<4; ++v)
929  out[offsets[v+outer]+i] += in[i][v+outer];
930  else
931  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
932  for (unsigned int v=0; v<4; ++v)
933  out[offsets[v+outer]+i] = in[i][v+outer];
934  }
935 }
936 
937 
938 
942 template <>
943 class VectorizedArray<float>
944 {
945 public:
949  static const unsigned int n_array_elements = 16;
950 
954  DEAL_II_ALWAYS_INLINE
956  operator = (const float x)
957  {
958  data = _mm512_set1_ps(x);
959  return *this;
960  }
961 
965  DEAL_II_ALWAYS_INLINE
966  float &
967  operator [] (const unsigned int comp)
968  {
969  AssertIndexRange (comp, 16);
970  return *(reinterpret_cast<float *>(&data)+comp);
971  }
972 
976  DEAL_II_ALWAYS_INLINE
977  const float &
978  operator [] (const unsigned int comp) const
979  {
980  AssertIndexRange (comp, 16);
981  return *(reinterpret_cast<const float *>(&data)+comp);
982  }
983 
987  DEAL_II_ALWAYS_INLINE
989  operator += (const VectorizedArray &vec)
990  {
991  // if the compiler supports vector arithmetics, we can simply use +=
992  // operator on the given data type. this allows the compiler to combine
993  // additions with multiplication (fused multiply-add) if those
994  // instructions are available. Otherwise, we need to use the built-in
995  // intrinsic command for __m512d
996 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
997  data += vec.data;
998 #else
999  data = _mm512_add_ps(data,vec.data);
1000 #endif
1001  return *this;
1002  }
1003 
1007  DEAL_II_ALWAYS_INLINE
1008  VectorizedArray &
1009  operator -= (const VectorizedArray &vec)
1010  {
1011 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1012  data -= vec.data;
1013 #else
1014  data = _mm512_sub_ps(data,vec.data);
1015 #endif
1016  return *this;
1017  }
1021  DEAL_II_ALWAYS_INLINE
1022  VectorizedArray &
1023  operator *= (const VectorizedArray &vec)
1024  {
1025 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1026  data *= vec.data;
1027 #else
1028  data = _mm512_mul_ps(data,vec.data);
1029 #endif
1030  return *this;
1031  }
1032 
1036  DEAL_II_ALWAYS_INLINE
1037  VectorizedArray &
1038  operator /= (const VectorizedArray &vec)
1039  {
1040 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1041  data /= vec.data;
1042 #else
1043  data = _mm512_div_ps(data,vec.data);
1044 #endif
1045  return *this;
1046  }
1047 
1053  DEAL_II_ALWAYS_INLINE
1054  void load (const float *ptr)
1055  {
1056  data = _mm512_loadu_ps (ptr);
1057  }
1058 
1065  DEAL_II_ALWAYS_INLINE
1066  void store (float *ptr) const
1067  {
1068  _mm512_storeu_ps (ptr, data);
1069  }
1070 
1074  DEAL_II_ALWAYS_INLINE
1075  void streaming_store (float *ptr) const
1076  {
1077  Assert(reinterpret_cast<std::size_t>(ptr)%64==0, ExcMessage("Memory not aligned"));
1078  _mm512_stream_ps(ptr,data);
1079  }
1080 
1093  DEAL_II_ALWAYS_INLINE
1094  void gather (const float *base_ptr,
1095  const unsigned int *offsets)
1096  {
1097  // unfortunately, there does not appear to be a 512 bit integer load, so
1098  // do it by some reinterpret casts here. this is allowed because the Intel
1099  // API allows aliasing between different vector types.
1100  const __m512 index_val = _mm512_loadu_ps((const float *)offsets);
1101  const __m512i index = *((__m512i *)(&index_val));
1102  data = _mm512_i32gather_ps(index, base_ptr, 4);
1103  }
1104 
1117  DEAL_II_ALWAYS_INLINE
1118  void scatter (const unsigned int *offsets,
1119  float *base_ptr) const
1120  {
1121  for (unsigned int i=0; i<16; ++i)
1122  for (unsigned int j=i+1; j<16; ++j)
1123  Assert(offsets[i] != offsets[j],
1124  ExcMessage("Result of scatter undefined if two offset elements"
1125  " point to the same position"));
1126 
1127  // unfortunately, there does not appear to be a 512 bit integer load, so
1128  // do it by some reinterpret casts here. this is allowed because the Intel
1129  // API allows aliasing between different vector types.
1130  const __m512 index_val = _mm512_loadu_ps((const float *)offsets);
1131  const __m512i index = *((__m512i *)(&index_val));
1132  _mm512_i32scatter_ps(base_ptr, index, data, 4);
1133  }
1134 
1139  __m512 data;
1140 
1141 private:
1142 
1147  DEAL_II_ALWAYS_INLINE
1149  get_sqrt () const
1150  {
1151  VectorizedArray res;
1152  res.data = _mm512_sqrt_ps(data);
1153  return res;
1154  }
1155 
1160  DEAL_II_ALWAYS_INLINE
1162  get_abs () const
1163  {
1164  // to compute the absolute value, perform bitwise andnot with -0. This
1165  // will leave all value and exponent bits unchanged but force the sign
1166  // value to +. Since there is no andnot for AVX512, we interpret the data
1167  // as 32 bit integers and do the andnot on those types (note that andnot
1168  // is a bitwise operation so the data type does not matter)
1169  __m512 mask = _mm512_set1_ps (-0.f);
1170  VectorizedArray res;
1171  res.data = (__m512)_mm512_andnot_epi32 ((__m512i)mask, (__m512i)data);
1172  return res;
1173  }
1174 
1179  DEAL_II_ALWAYS_INLINE
1181  get_max (const VectorizedArray &other) const
1182  {
1183  VectorizedArray res;
1184  res.data = _mm512_max_ps (data, other.data);
1185  return res;
1186  }
1187 
1192  DEAL_II_ALWAYS_INLINE
1194  get_min (const VectorizedArray &other) const
1195  {
1196  VectorizedArray res;
1197  res.data = _mm512_min_ps (data, other.data);
1198  return res;
1199  }
1200 
1204  template <typename Number2> friend VectorizedArray<Number2>
1205  std::sqrt (const VectorizedArray<Number2> &);
1206  template <typename Number2> friend VectorizedArray<Number2>
1207  std::abs (const VectorizedArray<Number2> &);
1208  template <typename Number2> friend VectorizedArray<Number2>
1209  std::max (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
1210  template <typename Number2> friend VectorizedArray<Number2>
1211  std::min (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
1212 };
1213 
1214 
1215 
1219 template <>
1220 inline
1221 void
1222 vectorized_load_and_transpose(const unsigned int n_entries,
1223  const float *in,
1224  const unsigned int *offsets,
1226 {
1227  const unsigned int n_chunks = n_entries/4;
1228  for (unsigned int outer = 0; outer<16; outer += 8)
1229  {
1230  for (unsigned int i=0; i<n_chunks; ++i)
1231  {
1232  __m128 u0 = _mm_loadu_ps(in+4*i+offsets[0+outer]);
1233  __m128 u1 = _mm_loadu_ps(in+4*i+offsets[1+outer]);
1234  __m128 u2 = _mm_loadu_ps(in+4*i+offsets[2+outer]);
1235  __m128 u3 = _mm_loadu_ps(in+4*i+offsets[3+outer]);
1236  __m128 u4 = _mm_loadu_ps(in+4*i+offsets[4+outer]);
1237  __m128 u5 = _mm_loadu_ps(in+4*i+offsets[5+outer]);
1238  __m128 u6 = _mm_loadu_ps(in+4*i+offsets[6+outer]);
1239  __m128 u7 = _mm_loadu_ps(in+4*i+offsets[7+outer]);
1240  // To avoid warnings about uninitialized variables, need to initialize
1241  // one variable with zero before using it.
1242  __m256 t0, t1, t2, t3 = _mm256_set1_ps(0.F);
1243  t0 = _mm256_insertf128_ps (t3, u0, 0);
1244  t0 = _mm256_insertf128_ps (t0, u4, 1);
1245  t1 = _mm256_insertf128_ps (t3, u1, 0);
1246  t1 = _mm256_insertf128_ps (t1, u5, 1);
1247  t2 = _mm256_insertf128_ps (t3, u2, 0);
1248  t2 = _mm256_insertf128_ps (t2, u6, 1);
1249  t3 = _mm256_insertf128_ps (t3, u3, 0);
1250  t3 = _mm256_insertf128_ps (t3, u7, 1);
1251  __m256 v0 = _mm256_shuffle_ps (t0, t1, 0x44);
1252  __m256 v1 = _mm256_shuffle_ps (t0, t1, 0xee);
1253  __m256 v2 = _mm256_shuffle_ps (t2, t3, 0x44);
1254  __m256 v3 = _mm256_shuffle_ps (t2, t3, 0xee);
1255  *(__m256 *)((float *)(&out[4*i+0].data)+outer) = _mm256_shuffle_ps (v0, v2, 0x88);
1256  *(__m256 *)((float *)(&out[4*i+1].data)+outer) = _mm256_shuffle_ps (v0, v2, 0xdd);
1257  *(__m256 *)((float *)(&out[4*i+2].data)+outer) = _mm256_shuffle_ps (v1, v3, 0x88);
1258  *(__m256 *)((float *)(&out[4*i+3].data)+outer) = _mm256_shuffle_ps (v1, v3, 0xdd);
1259  }
1260  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
1261  for (unsigned int v=0; v<8; ++v)
1262  out[i][v+outer] = in[offsets[v+outer]+i];
1263  }
1264 }
1265 
1266 
1267 
1271 template <>
1272 inline
1273 void
1274 vectorized_transpose_and_store(const bool add_into,
1275  const unsigned int n_entries,
1276  const VectorizedArray<float> *in,
1277  const unsigned int *offsets,
1278  float *out)
1279 {
1280  const unsigned int n_chunks = n_entries/4;
1281  for (unsigned int outer = 0; outer<16; outer += 8)
1282  {
1283  for (unsigned int i=0; i<n_chunks; ++i)
1284  {
1285  __m256 u0 = *(const __m256 *)((const float *)(&in[4*i+0].data)+outer);
1286  __m256 u1 = *(const __m256 *)((const float *)(&in[4*i+1].data)+outer);
1287  __m256 u2 = *(const __m256 *)((const float *)(&in[4*i+2].data)+outer);
1288  __m256 u3 = *(const __m256 *)((const float *)(&in[4*i+3].data)+outer);
1289  __m256 t0 = _mm256_shuffle_ps (u0, u1, 0x44);
1290  __m256 t1 = _mm256_shuffle_ps (u0, u1, 0xee);
1291  __m256 t2 = _mm256_shuffle_ps (u2, u3, 0x44);
1292  __m256 t3 = _mm256_shuffle_ps (u2, u3, 0xee);
1293  u0 = _mm256_shuffle_ps (t0, t2, 0x88);
1294  u1 = _mm256_shuffle_ps (t0, t2, 0xdd);
1295  u2 = _mm256_shuffle_ps (t1, t3, 0x88);
1296  u3 = _mm256_shuffle_ps (t1, t3, 0xdd);
1297  __m128 res0 = _mm256_extractf128_ps (u0, 0);
1298  __m128 res4 = _mm256_extractf128_ps (u0, 1);
1299  __m128 res1 = _mm256_extractf128_ps (u1, 0);
1300  __m128 res5 = _mm256_extractf128_ps (u1, 1);
1301  __m128 res2 = _mm256_extractf128_ps (u2, 0);
1302  __m128 res6 = _mm256_extractf128_ps (u2, 1);
1303  __m128 res3 = _mm256_extractf128_ps (u3, 0);
1304  __m128 res7 = _mm256_extractf128_ps (u3, 1);
1305 
1306  // Cannot use the same store instructions in both paths of the 'if'
1307  // because the compiler cannot know that there is no aliasing between
1308  // pointers
1309  if (add_into)
1310  {
1311  res0 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[0+outer]), res0);
1312  _mm_storeu_ps(out+4*i+offsets[0+outer], res0);
1313  res1 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[1+outer]), res1);
1314  _mm_storeu_ps(out+4*i+offsets[1+outer], res1);
1315  res2 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[2+outer]), res2);
1316  _mm_storeu_ps(out+4*i+offsets[2+outer], res2);
1317  res3 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[3+outer]), res3);
1318  _mm_storeu_ps(out+4*i+offsets[3+outer], res3);
1319  res4 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[4+outer]), res4);
1320  _mm_storeu_ps(out+4*i+offsets[4+outer], res4);
1321  res5 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[5+outer]), res5);
1322  _mm_storeu_ps(out+4*i+offsets[5+outer], res5);
1323  res6 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[6+outer]), res6);
1324  _mm_storeu_ps(out+4*i+offsets[6+outer], res6);
1325  res7 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[7+outer]), res7);
1326  _mm_storeu_ps(out+4*i+offsets[7+outer], res7);
1327  }
1328  else
1329  {
1330  _mm_storeu_ps(out+4*i+offsets[0+outer], res0);
1331  _mm_storeu_ps(out+4*i+offsets[1+outer], res1);
1332  _mm_storeu_ps(out+4*i+offsets[2+outer], res2);
1333  _mm_storeu_ps(out+4*i+offsets[3+outer], res3);
1334  _mm_storeu_ps(out+4*i+offsets[4+outer], res4);
1335  _mm_storeu_ps(out+4*i+offsets[5+outer], res5);
1336  _mm_storeu_ps(out+4*i+offsets[6+outer], res6);
1337  _mm_storeu_ps(out+4*i+offsets[7+outer], res7);
1338  }
1339  }
1340  if (add_into)
1341  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
1342  for (unsigned int v=0; v<8; ++v)
1343  out[offsets[v+outer]+i] += in[i][v+outer];
1344  else
1345  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
1346  for (unsigned int v=0; v<8; ++v)
1347  out[offsets[v+outer]+i] = in[i][v+outer];
1348  }
1349 }
1350 
1351 
1352 
1353 #elif DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 2 && defined(__AVX__)
1354 
1358 template <>
1359 class VectorizedArray<double>
1360 {
1361 public:
1365  static const unsigned int n_array_elements = 4;
1366 
1370  DEAL_II_ALWAYS_INLINE
1371  VectorizedArray &
1372  operator = (const double x)
1373  {
1374  data = _mm256_set1_pd(x);
1375  return *this;
1376  }
1377 
1381  DEAL_II_ALWAYS_INLINE
1382  double &
1383  operator [] (const unsigned int comp)
1384  {
1385  AssertIndexRange (comp, 4);
1386  return *(reinterpret_cast<double *>(&data)+comp);
1387  }
1388 
1392  DEAL_II_ALWAYS_INLINE
1393  const double &
1394  operator [] (const unsigned int comp) const
1395  {
1396  AssertIndexRange (comp, 4);
1397  return *(reinterpret_cast<const double *>(&data)+comp);
1398  }
1399 
1403  DEAL_II_ALWAYS_INLINE
1404  VectorizedArray &
1405  operator += (const VectorizedArray &vec)
1406  {
1407  // if the compiler supports vector arithmetics, we can simply use +=
1408  // operator on the given data type. this allows the compiler to combine
1409  // additions with multiplication (fused multiply-add) if those
1410  // instructions are available. Otherwise, we need to use the built-in
1411  // intrinsic command for __m256d
1412 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1413  data += vec.data;
1414 #else
1415  data = _mm256_add_pd(data,vec.data);
1416 #endif
1417  return *this;
1418  }
1419 
1423  DEAL_II_ALWAYS_INLINE
1424  VectorizedArray &
1425  operator -= (const VectorizedArray &vec)
1426  {
1427 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1428  data -= vec.data;
1429 #else
1430  data = _mm256_sub_pd(data,vec.data);
1431 #endif
1432  return *this;
1433  }
1437  DEAL_II_ALWAYS_INLINE
1438  VectorizedArray &
1439  operator *= (const VectorizedArray &vec)
1440  {
1441 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1442  data *= vec.data;
1443 #else
1444  data = _mm256_mul_pd(data,vec.data);
1445 #endif
1446  return *this;
1447  }
1448 
1452  DEAL_II_ALWAYS_INLINE
1453  VectorizedArray &
1454  operator /= (const VectorizedArray &vec)
1455  {
1456 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1457  data /= vec.data;
1458 #else
1459  data = _mm256_div_pd(data,vec.data);
1460 #endif
1461  return *this;
1462  }
1463 
1469  DEAL_II_ALWAYS_INLINE
1470  void load (const double *ptr)
1471  {
1472  data = _mm256_loadu_pd (ptr);
1473  }
1474 
1481  DEAL_II_ALWAYS_INLINE
1482  void store (double *ptr) const
1483  {
1484  _mm256_storeu_pd (ptr, data);
1485  }
1486 
1490  DEAL_II_ALWAYS_INLINE
1491  void streaming_store (double *ptr) const
1492  {
1493  Assert(reinterpret_cast<std::size_t>(ptr)%32==0, ExcMessage("Memory not aligned"));
1494  _mm256_stream_pd(ptr,data);
1495  }
1496 
1509  DEAL_II_ALWAYS_INLINE
1510  void gather (const double *base_ptr,
1511  const unsigned int *offsets)
1512  {
1513 #ifdef __AVX2__
1514  // unfortunately, there does not appear to be a 128 bit integer load, so
1515  // do it by some reinterpret casts here. this is allowed because the Intel
1516  // API allows aliasing between different vector types.
1517  const __m128 index_val = _mm_loadu_ps((const float *)offsets);
1518  const __m128i index = *((__m128i *)(&index_val));
1519  data = _mm256_i32gather_pd(base_ptr, index, 8);
1520 #else
1521  for (unsigned int i=0; i<4; ++i)
1522  *(reinterpret_cast<double *>(&data)+i) = base_ptr[offsets[i]];
1523 #endif
1524  }
1525 
1538  DEAL_II_ALWAYS_INLINE
1539  void scatter (const unsigned int *offsets,
1540  double *base_ptr) const
1541  {
1542  // no scatter operation in AVX/AVX2
1543  for (unsigned int i=0; i<4; ++i)
1544  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data)+i);
1545  }
1546 
1551  __m256d data;
1552 
1553 private:
1558  DEAL_II_ALWAYS_INLINE
1560  get_sqrt () const
1561  {
1562  VectorizedArray res;
1563  res.data = _mm256_sqrt_pd(data);
1564  return res;
1565  }
1566 
1571  DEAL_II_ALWAYS_INLINE
1573  get_abs () const
1574  {
1575  // to compute the absolute value, perform bitwise andnot with -0. This
1576  // will leave all value and exponent bits unchanged but force the sign
1577  // value to +.
1578  __m256d mask = _mm256_set1_pd (-0.);
1579  VectorizedArray res;
1580  res.data = _mm256_andnot_pd(mask, data);
1581  return res;
1582  }
1583 
1588  DEAL_II_ALWAYS_INLINE
1590  get_max (const VectorizedArray &other) const
1591  {
1592  VectorizedArray res;
1593  res.data = _mm256_max_pd (data, other.data);
1594  return res;
1595  }
1596 
1601  DEAL_II_ALWAYS_INLINE
1603  get_min (const VectorizedArray &other) const
1604  {
1605  VectorizedArray res;
1606  res.data = _mm256_min_pd (data, other.data);
1607  return res;
1608  }
1609 
1613  template <typename Number2> friend VectorizedArray<Number2>
1614  std::sqrt (const VectorizedArray<Number2> &);
1615  template <typename Number2> friend VectorizedArray<Number2>
1616  std::abs (const VectorizedArray<Number2> &);
1617  template <typename Number2> friend VectorizedArray<Number2>
1618  std::max (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
1619  template <typename Number2> friend VectorizedArray<Number2>
1620  std::min (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
1621 };
1622 
1623 
1624 
1628 template <>
1629 inline
1630 void
1631 vectorized_load_and_transpose(const unsigned int n_entries,
1632  const double *in,
1633  const unsigned int *offsets,
1635 {
1636  const unsigned int n_chunks = n_entries/4;
1637  const double *in0 = in + offsets[0];
1638  const double *in1 = in + offsets[1];
1639  const double *in2 = in + offsets[2];
1640  const double *in3 = in + offsets[3];
1641 
1642  for (unsigned int i=0; i<n_chunks; ++i)
1643  {
1644  __m256d u0 = _mm256_loadu_pd(in0+4*i);
1645  __m256d u1 = _mm256_loadu_pd(in1+4*i);
1646  __m256d u2 = _mm256_loadu_pd(in2+4*i);
1647  __m256d u3 = _mm256_loadu_pd(in3+4*i);
1648  __m256d t0 = _mm256_permute2f128_pd (u0, u2, 0x20);
1649  __m256d t1 = _mm256_permute2f128_pd (u1, u3, 0x20);
1650  __m256d t2 = _mm256_permute2f128_pd (u0, u2, 0x31);
1651  __m256d t3 = _mm256_permute2f128_pd (u1, u3, 0x31);
1652  out[4*i+0].data = _mm256_unpacklo_pd (t0, t1);
1653  out[4*i+1].data = _mm256_unpackhi_pd (t0, t1);
1654  out[4*i+2].data = _mm256_unpacklo_pd (t2, t3);
1655  out[4*i+3].data = _mm256_unpackhi_pd (t2, t3);
1656  }
1657  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
1658  for (unsigned int v=0; v<4; ++v)
1659  out[i][v] = in[offsets[v]+i];
1660 }
1661 
1662 
1663 
1667 template <>
1668 inline
1669 void
1670 vectorized_transpose_and_store(const bool add_into,
1671  const unsigned int n_entries,
1672  const VectorizedArray<double> *in,
1673  const unsigned int *offsets,
1674  double *out)
1675 {
1676  const unsigned int n_chunks = n_entries/4;
1677  double *out0 = out + offsets[0];
1678  double *out1 = out + offsets[1];
1679  double *out2 = out + offsets[2];
1680  double *out3 = out + offsets[3];
1681  for (unsigned int i=0; i<n_chunks; ++i)
1682  {
1683  __m256d u0 = in[4*i+0].data;
1684  __m256d u1 = in[4*i+1].data;
1685  __m256d u2 = in[4*i+2].data;
1686  __m256d u3 = in[4*i+3].data;
1687  __m256d t0 = _mm256_permute2f128_pd (u0, u2, 0x20);
1688  __m256d t1 = _mm256_permute2f128_pd (u1, u3, 0x20);
1689  __m256d t2 = _mm256_permute2f128_pd (u0, u2, 0x31);
1690  __m256d t3 = _mm256_permute2f128_pd (u1, u3, 0x31);
1691  __m256d res0 = _mm256_unpacklo_pd (t0, t1);
1692  __m256d res1 = _mm256_unpackhi_pd (t0, t1);
1693  __m256d res2 = _mm256_unpacklo_pd (t2, t3);
1694  __m256d res3 = _mm256_unpackhi_pd (t2, t3);
1695 
1696  // Cannot use the same store instructions in both paths of the 'if'
1697  // because the compiler cannot know that there is no aliasing between
1698  // pointers
1699  if (add_into)
1700  {
1701  res0 = _mm256_add_pd(_mm256_loadu_pd(out0+4*i), res0);
1702  _mm256_storeu_pd(out0+4*i, res0);
1703  res1 = _mm256_add_pd(_mm256_loadu_pd(out1+4*i), res1);
1704  _mm256_storeu_pd(out1+4*i, res1);
1705  res2 = _mm256_add_pd(_mm256_loadu_pd(out2+4*i), res2);
1706  _mm256_storeu_pd(out2+4*i, res2);
1707  res3 = _mm256_add_pd(_mm256_loadu_pd(out3+4*i), res3);
1708  _mm256_storeu_pd(out3+4*i, res3);
1709  }
1710  else
1711  {
1712  _mm256_storeu_pd(out0+4*i, res0);
1713  _mm256_storeu_pd(out1+4*i, res1);
1714  _mm256_storeu_pd(out2+4*i, res2);
1715  _mm256_storeu_pd(out3+4*i, res3);
1716  }
1717  }
1718  if (add_into)
1719  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
1720  for (unsigned int v=0; v<4; ++v)
1721  out[offsets[v]+i] += in[i][v];
1722  else
1723  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
1724  for (unsigned int v=0; v<4; ++v)
1725  out[offsets[v]+i] = in[i][v];
1726 }
1727 
1728 
1729 
1733 template <>
1734 class VectorizedArray<float>
1735 {
1736 public:
1740  static const unsigned int n_array_elements = 8;
1741 
1745  DEAL_II_ALWAYS_INLINE
1746  VectorizedArray &
1747  operator = (const float x)
1748  {
1749  data = _mm256_set1_ps(x);
1750  return *this;
1751  }
1752 
1756  DEAL_II_ALWAYS_INLINE
1757  float &
1758  operator [] (const unsigned int comp)
1759  {
1760  AssertIndexRange (comp, 8);
1761  return *(reinterpret_cast<float *>(&data)+comp);
1762  }
1763 
1767  DEAL_II_ALWAYS_INLINE
1768  const float &
1769  operator [] (const unsigned int comp) const
1770  {
1771  AssertIndexRange (comp, 8);
1772  return *(reinterpret_cast<const float *>(&data)+comp);
1773  }
1774 
1778  DEAL_II_ALWAYS_INLINE
1779  VectorizedArray &
1780  operator += (const VectorizedArray &vec)
1781  {
1782  // if the compiler supports vector arithmetics, we can simply use +=
1783  // operator on the given data type. this allows the compiler to combine
1784  // additions with multiplication (fused multiply-add) if those
1785  // instructions are available. Otherwise, we need to use the built-in
1786  // intrinsic command for __m256d
1787 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1788  data += vec.data;
1789 #else
1790  data = _mm256_add_ps(data,vec.data);
1791 #endif
1792  return *this;
1793  }
1794 
1798  DEAL_II_ALWAYS_INLINE
1799  VectorizedArray &
1800  operator -= (const VectorizedArray &vec)
1801  {
1802 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1803  data -= vec.data;
1804 #else
1805  data = _mm256_sub_ps(data,vec.data);
1806 #endif
1807  return *this;
1808  }
1812  DEAL_II_ALWAYS_INLINE
1813  VectorizedArray &
1814  operator *= (const VectorizedArray &vec)
1815  {
1816 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1817  data *= vec.data;
1818 #else
1819  data = _mm256_mul_ps(data,vec.data);
1820 #endif
1821  return *this;
1822  }
1823 
1827  DEAL_II_ALWAYS_INLINE
1828  VectorizedArray &
1829  operator /= (const VectorizedArray &vec)
1830  {
1831 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1832  data /= vec.data;
1833 #else
1834  data = _mm256_div_ps(data,vec.data);
1835 #endif
1836  return *this;
1837  }
1838 
1844  DEAL_II_ALWAYS_INLINE
1845  void load (const float *ptr)
1846  {
1847  data = _mm256_loadu_ps (ptr);
1848  }
1849 
1856  DEAL_II_ALWAYS_INLINE
1857  void store (float *ptr) const
1858  {
1859  _mm256_storeu_ps (ptr, data);
1860  }
1861 
1865  DEAL_II_ALWAYS_INLINE
1866  void streaming_store (float *ptr) const
1867  {
1868  Assert(reinterpret_cast<std::size_t>(ptr)%32==0, ExcMessage("Memory not aligned"));
1869  _mm256_stream_ps(ptr,data);
1870  }
1871 
1884  DEAL_II_ALWAYS_INLINE
1885  void gather (const float *base_ptr,
1886  const unsigned int *offsets)
1887  {
1888 #ifdef __AVX2__
1889  // unfortunately, there does not appear to be a 256 bit integer load, so
1890  // do it by some reinterpret casts here. this is allowed because the Intel
1891  // API allows aliasing between different vector types.
1892  const __m256 index_val = _mm256_loadu_ps((const float *)offsets);
1893  const __m256i index = *((__m256i *)(&index_val));
1894  data = _mm256_i32gather_ps(base_ptr, index, 4);
1895 #else
1896  for (unsigned int i=0; i<8; ++i)
1897  *(reinterpret_cast<float *>(&data)+i) = base_ptr[offsets[i]];
1898 #endif
1899  }
1900 
1913  DEAL_II_ALWAYS_INLINE
1914  void scatter (const unsigned int *offsets,
1915  float *base_ptr) const
1916  {
1917  // no scatter operation in AVX/AVX2
1918  for (unsigned int i=0; i<8; ++i)
1919  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data)+i);
1920  }
1921 
1926  __m256 data;
1927 
1928 private:
1929 
1934  DEAL_II_ALWAYS_INLINE
1936  get_sqrt () const
1937  {
1938  VectorizedArray res;
1939  res.data = _mm256_sqrt_ps(data);
1940  return res;
1941  }
1942 
1947  DEAL_II_ALWAYS_INLINE
1949  get_abs () const
1950  {
1951  // to compute the absolute value, perform bitwise andnot with -0. This
1952  // will leave all value and exponent bits unchanged but force the sign
1953  // value to +.
1954  __m256 mask = _mm256_set1_ps (-0.f);
1955  VectorizedArray res;
1956  res.data = _mm256_andnot_ps(mask, data);
1957  return res;
1958  }
1959 
1964  DEAL_II_ALWAYS_INLINE
1966  get_max (const VectorizedArray &other) const
1967  {
1968  VectorizedArray res;
1969  res.data = _mm256_max_ps (data, other.data);
1970  return res;
1971  }
1972 
1977  DEAL_II_ALWAYS_INLINE
1979  get_min (const VectorizedArray &other) const
1980  {
1981  VectorizedArray res;
1982  res.data = _mm256_min_ps (data, other.data);
1983  return res;
1984  }
1985 
1989  template <typename Number2> friend VectorizedArray<Number2>
1990  std::sqrt (const VectorizedArray<Number2> &);
1991  template <typename Number2> friend VectorizedArray<Number2>
1992  std::abs (const VectorizedArray<Number2> &);
1993  template <typename Number2> friend VectorizedArray<Number2>
1994  std::max (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
1995  template <typename Number2> friend VectorizedArray<Number2>
1996  std::min (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
1997 };
1998 
1999 
2000 
2004 template <>
2005 inline
2006 void
2007 vectorized_load_and_transpose(const unsigned int n_entries,
2008  const float *in,
2009  const unsigned int *offsets,
2011 {
2012  const unsigned int n_chunks = n_entries/4;
2013  for (unsigned int i=0; i<n_chunks; ++i)
2014  {
2015  __m128 u0 = _mm_loadu_ps(in+4*i+offsets[0]);
2016  __m128 u1 = _mm_loadu_ps(in+4*i+offsets[1]);
2017  __m128 u2 = _mm_loadu_ps(in+4*i+offsets[2]);
2018  __m128 u3 = _mm_loadu_ps(in+4*i+offsets[3]);
2019  __m128 u4 = _mm_loadu_ps(in+4*i+offsets[4]);
2020  __m128 u5 = _mm_loadu_ps(in+4*i+offsets[5]);
2021  __m128 u6 = _mm_loadu_ps(in+4*i+offsets[6]);
2022  __m128 u7 = _mm_loadu_ps(in+4*i+offsets[7]);
2023  // To avoid warnings about uninitialized variables, need to initialize
2024  // one variable with zero before using it.
2025  __m256 t0, t1, t2, t3 = _mm256_set1_ps(0.F);
2026  t0 = _mm256_insertf128_ps (t3, u0, 0);
2027  t0 = _mm256_insertf128_ps (t0, u4, 1);
2028  t1 = _mm256_insertf128_ps (t3, u1, 0);
2029  t1 = _mm256_insertf128_ps (t1, u5, 1);
2030  t2 = _mm256_insertf128_ps (t3, u2, 0);
2031  t2 = _mm256_insertf128_ps (t2, u6, 1);
2032  t3 = _mm256_insertf128_ps (t3, u3, 0);
2033  t3 = _mm256_insertf128_ps (t3, u7, 1);
2034  __m256 v0 = _mm256_shuffle_ps (t0, t1, 0x44);
2035  __m256 v1 = _mm256_shuffle_ps (t0, t1, 0xee);
2036  __m256 v2 = _mm256_shuffle_ps (t2, t3, 0x44);
2037  __m256 v3 = _mm256_shuffle_ps (t2, t3, 0xee);
2038  out[4*i+0].data = _mm256_shuffle_ps (v0, v2, 0x88);
2039  out[4*i+1].data = _mm256_shuffle_ps (v0, v2, 0xdd);
2040  out[4*i+2].data = _mm256_shuffle_ps (v1, v3, 0x88);
2041  out[4*i+3].data = _mm256_shuffle_ps (v1, v3, 0xdd);
2042  }
2043  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
2044  for (unsigned int v=0; v<8; ++v)
2045  out[i][v] = in[offsets[v]+i];
2046 }
2047 
2048 
2049 
2053 template <>
2054 inline
2055 void
2056 vectorized_transpose_and_store(const bool add_into,
2057  const unsigned int n_entries,
2058  const VectorizedArray<float> *in,
2059  const unsigned int *offsets,
2060  float *out)
2061 {
2062  const unsigned int n_chunks = n_entries/4;
2063  for (unsigned int i=0; i<n_chunks; ++i)
2064  {
2065  __m256 u0 = in[4*i+0].data;
2066  __m256 u1 = in[4*i+1].data;
2067  __m256 u2 = in[4*i+2].data;
2068  __m256 u3 = in[4*i+3].data;
2069  __m256 t0 = _mm256_shuffle_ps (u0, u1, 0x44);
2070  __m256 t1 = _mm256_shuffle_ps (u0, u1, 0xee);
2071  __m256 t2 = _mm256_shuffle_ps (u2, u3, 0x44);
2072  __m256 t3 = _mm256_shuffle_ps (u2, u3, 0xee);
2073  u0 = _mm256_shuffle_ps (t0, t2, 0x88);
2074  u1 = _mm256_shuffle_ps (t0, t2, 0xdd);
2075  u2 = _mm256_shuffle_ps (t1, t3, 0x88);
2076  u3 = _mm256_shuffle_ps (t1, t3, 0xdd);
2077  __m128 res0 = _mm256_extractf128_ps (u0, 0);
2078  __m128 res4 = _mm256_extractf128_ps (u0, 1);
2079  __m128 res1 = _mm256_extractf128_ps (u1, 0);
2080  __m128 res5 = _mm256_extractf128_ps (u1, 1);
2081  __m128 res2 = _mm256_extractf128_ps (u2, 0);
2082  __m128 res6 = _mm256_extractf128_ps (u2, 1);
2083  __m128 res3 = _mm256_extractf128_ps (u3, 0);
2084  __m128 res7 = _mm256_extractf128_ps (u3, 1);
2085 
2086  // Cannot use the same store instructions in both paths of the 'if'
2087  // because the compiler cannot know that there is no aliasing between
2088  // pointers
2089  if (add_into)
2090  {
2091  res0 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[0]), res0);
2092  _mm_storeu_ps(out+4*i+offsets[0], res0);
2093  res1 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[1]), res1);
2094  _mm_storeu_ps(out+4*i+offsets[1], res1);
2095  res2 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[2]), res2);
2096  _mm_storeu_ps(out+4*i+offsets[2], res2);
2097  res3 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[3]), res3);
2098  _mm_storeu_ps(out+4*i+offsets[3], res3);
2099  res4 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[4]), res4);
2100  _mm_storeu_ps(out+4*i+offsets[4], res4);
2101  res5 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[5]), res5);
2102  _mm_storeu_ps(out+4*i+offsets[5], res5);
2103  res6 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[6]), res6);
2104  _mm_storeu_ps(out+4*i+offsets[6], res6);
2105  res7 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[7]), res7);
2106  _mm_storeu_ps(out+4*i+offsets[7], res7);
2107  }
2108  else
2109  {
2110  _mm_storeu_ps(out+4*i+offsets[0], res0);
2111  _mm_storeu_ps(out+4*i+offsets[1], res1);
2112  _mm_storeu_ps(out+4*i+offsets[2], res2);
2113  _mm_storeu_ps(out+4*i+offsets[3], res3);
2114  _mm_storeu_ps(out+4*i+offsets[4], res4);
2115  _mm_storeu_ps(out+4*i+offsets[5], res5);
2116  _mm_storeu_ps(out+4*i+offsets[6], res6);
2117  _mm_storeu_ps(out+4*i+offsets[7], res7);
2118  }
2119  }
2120  if (add_into)
2121  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
2122  for (unsigned int v=0; v<8; ++v)
2123  out[offsets[v]+i] += in[i][v];
2124  else
2125  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
2126  for (unsigned int v=0; v<8; ++v)
2127  out[offsets[v]+i] = in[i][v];
2128 }
2129 
2130 
2131 
2132 // for safety, also check that __SSE2__ is defined in case the user manually
2133 // set some conflicting compile flags which prevent compilation
2134 
2135 #elif DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 1
2136 
2140 template <>
2141 class VectorizedArray<double>
2142 {
2143 public:
2147  static const unsigned int n_array_elements = 2;
2148 
2152  DEAL_II_ALWAYS_INLINE
2153  VectorizedArray &
2154  operator = (const double x)
2155  {
2156  data = _mm_set1_pd(x);
2157  return *this;
2158  }
2159 
2163  DEAL_II_ALWAYS_INLINE
2164  double &
2165  operator [] (const unsigned int comp)
2166  {
2167  AssertIndexRange (comp, 2);
2168  return *(reinterpret_cast<double *>(&data)+comp);
2169  }
2170 
2174  DEAL_II_ALWAYS_INLINE
2175  const double &
2176  operator [] (const unsigned int comp) const
2177  {
2178  AssertIndexRange (comp, 2);
2179  return *(reinterpret_cast<const double *>(&data)+comp);
2180  }
2181 
2185  DEAL_II_ALWAYS_INLINE
2186  VectorizedArray &
2188  {
2189 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2190  data += vec.data;
2191 #else
2192  data = _mm_add_pd(data,vec.data);
2193 #endif
2194  return *this;
2195  }
2196 
2200  DEAL_II_ALWAYS_INLINE
2201  VectorizedArray &
2203  {
2204 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2205  data -= vec.data;
2206 #else
2207  data = _mm_sub_pd(data,vec.data);
2208 #endif
2209  return *this;
2210  }
2211 
2215  DEAL_II_ALWAYS_INLINE
2216  VectorizedArray &
2218  {
2219 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2220  data *= vec.data;
2221 #else
2222  data = _mm_mul_pd(data,vec.data);
2223 #endif
2224  return *this;
2225  }
2226 
2230  DEAL_II_ALWAYS_INLINE
2231  VectorizedArray &
2233  {
2234 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2235  data /= vec.data;
2236 #else
2237  data = _mm_div_pd(data,vec.data);
2238 #endif
2239  return *this;
2240  }
2241 
2247  DEAL_II_ALWAYS_INLINE
2248  void load (const double *ptr)
2249  {
2250  data = _mm_loadu_pd (ptr);
2251  }
2252 
2259  DEAL_II_ALWAYS_INLINE
2260  void store (double *ptr) const
2261  {
2262  _mm_storeu_pd (ptr, data);
2263  }
2264 
2268  DEAL_II_ALWAYS_INLINE
2269  void streaming_store (double *ptr) const
2270  {
2271  Assert(reinterpret_cast<std::size_t>(ptr)%16==0, ExcMessage("Memory not aligned"));
2272  _mm_stream_pd(ptr,data);
2273  }
2274 
2287  DEAL_II_ALWAYS_INLINE
2288  void gather (const double *base_ptr,
2289  const unsigned int *offsets)
2290  {
2291  for (unsigned int i=0; i<2; ++i)
2292  *(reinterpret_cast<double *>(&data)+i) = base_ptr[offsets[i]];
2293  }
2294 
2307  DEAL_II_ALWAYS_INLINE
2308  void scatter (const unsigned int *offsets,
2309  double *base_ptr) const
2310  {
2311  for (unsigned int i=0; i<2; ++i)
2312  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data)+i);
2313  }
2314 
2319  __m128d data;
2320 
2321 private:
2326  DEAL_II_ALWAYS_INLINE
2328  get_sqrt () const
2329  {
2330  VectorizedArray res;
2331  res.data = _mm_sqrt_pd(data);
2332  return res;
2333  }
2334 
2339  DEAL_II_ALWAYS_INLINE
2341  get_abs () const
2342  {
2343  // to compute the absolute value, perform
2344  // bitwise andnot with -0. This will leave all
2345  // value and exponent bits unchanged but force
2346  // the sign value to +.
2347  __m128d mask = _mm_set1_pd (-0.);
2348  VectorizedArray res;
2349  res.data = _mm_andnot_pd(mask, data);
2350  return res;
2351  }
2352 
2357  DEAL_II_ALWAYS_INLINE
2359  get_max (const VectorizedArray &other) const
2360  {
2361  VectorizedArray res;
2362  res.data = _mm_max_pd (data, other.data);
2363  return res;
2364  }
2365 
2370  DEAL_II_ALWAYS_INLINE
2372  get_min (const VectorizedArray &other) const
2373  {
2374  VectorizedArray res;
2375  res.data = _mm_min_pd (data, other.data);
2376  return res;
2377  }
2378 
2382  template <typename Number2> friend VectorizedArray<Number2>
2383  std::sqrt (const VectorizedArray<Number2> &);
2384  template <typename Number2> friend VectorizedArray<Number2>
2385  std::abs (const VectorizedArray<Number2> &);
2386  template <typename Number2> friend VectorizedArray<Number2>
2387  std::max (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
2388  template <typename Number2> friend VectorizedArray<Number2>
2389  std::min (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
2390 };
2391 
2392 
2393 
2397 template <>
2398 inline
2399 void vectorized_load_and_transpose(const unsigned int n_entries,
2400  const double *in,
2401  const unsigned int *offsets,
2403 {
2404  const unsigned int n_chunks = n_entries/2;
2405  for (unsigned int i=0; i<n_chunks; ++i)
2406  {
2407  __m128d u0 = _mm_loadu_pd(in+2*i+offsets[0]);
2408  __m128d u1 = _mm_loadu_pd(in+2*i+offsets[1]);
2409  out[2*i+0].data = _mm_unpacklo_pd (u0, u1);
2410  out[2*i+1].data = _mm_unpackhi_pd (u0, u1);
2411  }
2412  for (unsigned int i=2*n_chunks; i<n_entries; ++i)
2413  for (unsigned int v=0; v<2; ++v)
2414  out[i][v] = in[offsets[v]+i];
2415 }
2416 
2417 
2418 
2422 template <>
2423 inline
2424 void
2425 vectorized_transpose_and_store(const bool add_into,
2426  const unsigned int n_entries,
2427  const VectorizedArray<double> *in,
2428  const unsigned int *offsets,
2429  double *out)
2430 {
2431  const unsigned int n_chunks = n_entries/2;
2432  if (add_into)
2433  {
2434  for (unsigned int i=0; i<n_chunks; ++i)
2435  {
2436  __m128d u0 = in[2*i+0].data;
2437  __m128d u1 = in[2*i+1].data;
2438  __m128d res0 = _mm_unpacklo_pd (u0, u1);
2439  __m128d res1 = _mm_unpackhi_pd (u0, u1);
2440  _mm_storeu_pd(out+2*i+offsets[0], _mm_add_pd(_mm_loadu_pd(out+2*i+offsets[0]), res0));
2441  _mm_storeu_pd(out+2*i+offsets[1], _mm_add_pd(_mm_loadu_pd(out+2*i+offsets[1]), res1));
2442  }
2443  for (unsigned int i=2*n_chunks; i<n_entries; ++i)
2444  for (unsigned int v=0; v<2; ++v)
2445  out[offsets[v]+i] += in[i][v];
2446  }
2447  else
2448  {
2449  for (unsigned int i=0; i<n_chunks; ++i)
2450  {
2451  __m128d u0 = in[2*i+0].data;
2452  __m128d u1 = in[2*i+1].data;
2453  __m128d res0 = _mm_unpacklo_pd (u0, u1);
2454  __m128d res1 = _mm_unpackhi_pd (u0, u1);
2455  _mm_storeu_pd(out+2*i+offsets[0], res0);
2456  _mm_storeu_pd(out+2*i+offsets[1], res1);
2457  }
2458  for (unsigned int i=2*n_chunks; i<n_entries; ++i)
2459  for (unsigned int v=0; v<2; ++v)
2460  out[offsets[v]+i] = in[i][v];
2461  }
2462 }
2463 
2464 
2465 
2469 template <>
2470 class VectorizedArray<float>
2471 {
2472 public:
2476  static const unsigned int n_array_elements = 4;
2477 
2482  DEAL_II_ALWAYS_INLINE
2483  VectorizedArray &
2484  operator = (const float x)
2485  {
2486  data = _mm_set1_ps(x);
2487  return *this;
2488  }
2489 
2493  DEAL_II_ALWAYS_INLINE
2494  float &
2495  operator [] (const unsigned int comp)
2496  {
2497  AssertIndexRange (comp, 4);
2498  return *(reinterpret_cast<float *>(&data)+comp);
2499  }
2500 
2504  DEAL_II_ALWAYS_INLINE
2505  const float &
2506  operator [] (const unsigned int comp) const
2507  {
2508  AssertIndexRange (comp, 4);
2509  return *(reinterpret_cast<const float *>(&data)+comp);
2510  }
2511 
2515  DEAL_II_ALWAYS_INLINE
2516  VectorizedArray &
2518  {
2519 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2520  data += vec.data;
2521 #else
2522  data = _mm_add_ps(data,vec.data);
2523 #endif
2524  return *this;
2525  }
2526 
2530  DEAL_II_ALWAYS_INLINE
2531  VectorizedArray &
2533  {
2534 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2535  data -= vec.data;
2536 #else
2537  data = _mm_sub_ps(data,vec.data);
2538 #endif
2539  return *this;
2540  }
2541 
2545  DEAL_II_ALWAYS_INLINE
2546  VectorizedArray &
2548  {
2549 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2550  data *= vec.data;
2551 #else
2552  data = _mm_mul_ps(data,vec.data);
2553 #endif
2554  return *this;
2555  }
2556 
2560  DEAL_II_ALWAYS_INLINE
2561  VectorizedArray &
2563  {
2564 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2565  data /= vec.data;
2566 #else
2567  data = _mm_div_ps(data,vec.data);
2568 #endif
2569  return *this;
2570  }
2571 
2577  DEAL_II_ALWAYS_INLINE
2578  void load (const float *ptr)
2579  {
2580  data = _mm_loadu_ps (ptr);
2581  }
2582 
2589  DEAL_II_ALWAYS_INLINE
2590  void store (float *ptr) const
2591  {
2592  _mm_storeu_ps (ptr, data);
2593  }
2594 
2598  DEAL_II_ALWAYS_INLINE
2599  void streaming_store (float *ptr) const
2600  {
2601  Assert(reinterpret_cast<std::size_t>(ptr)%16==0, ExcMessage("Memory not aligned"));
2602  _mm_stream_ps(ptr,data);
2603  }
2604 
2617  DEAL_II_ALWAYS_INLINE
2618  void gather (const float *base_ptr,
2619  const unsigned int *offsets)
2620  {
2621  for (unsigned int i=0; i<4; ++i)
2622  *(reinterpret_cast<float *>(&data)+i) = base_ptr[offsets[i]];
2623  }
2624 
2637  DEAL_II_ALWAYS_INLINE
2638  void scatter (const unsigned int *offsets,
2639  float *base_ptr) const
2640  {
2641  for (unsigned int i=0; i<4; ++i)
2642  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data)+i);
2643  }
2644 
2649  __m128 data;
2650 
2651 private:
2656  DEAL_II_ALWAYS_INLINE
2658  get_sqrt () const
2659  {
2660  VectorizedArray res;
2661  res.data = _mm_sqrt_ps(data);
2662  return res;
2663  }
2664 
2669  DEAL_II_ALWAYS_INLINE
2671  get_abs () const
2672  {
2673  // to compute the absolute value, perform bitwise andnot with -0. This
2674  // will leave all value and exponent bits unchanged but force the sign
2675  // value to +.
2676  __m128 mask = _mm_set1_ps (-0.f);
2677  VectorizedArray res;
2678  res.data = _mm_andnot_ps(mask, data);
2679  return res;
2680  }
2681 
2686  DEAL_II_ALWAYS_INLINE
2688  get_max (const VectorizedArray &other) const
2689  {
2690  VectorizedArray res;
2691  res.data = _mm_max_ps (data, other.data);
2692  return res;
2693  }
2694 
2699  DEAL_II_ALWAYS_INLINE
2701  get_min (const VectorizedArray &other) const
2702  {
2703  VectorizedArray res;
2704  res.data = _mm_min_ps (data, other.data);
2705  return res;
2706  }
2707 
2711  template <typename Number2> friend VectorizedArray<Number2>
2712  std::sqrt (const VectorizedArray<Number2> &);
2713  template <typename Number2> friend VectorizedArray<Number2>
2714  std::abs (const VectorizedArray<Number2> &);
2715  template <typename Number2> friend VectorizedArray<Number2>
2716  std::max (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
2717  template <typename Number2> friend VectorizedArray<Number2>
2718  std::min (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
2719 };
2720 
2721 
2722 
2726 template <>
2727 inline
2728 void vectorized_load_and_transpose(const unsigned int n_entries,
2729  const float *in,
2730  const unsigned int *offsets,
2732 {
2733  const unsigned int n_chunks = n_entries/4;
2734  for (unsigned int i=0; i<n_chunks; ++i)
2735  {
2736  __m128 u0 = _mm_loadu_ps(in+4*i+offsets[0]);
2737  __m128 u1 = _mm_loadu_ps(in+4*i+offsets[1]);
2738  __m128 u2 = _mm_loadu_ps(in+4*i+offsets[2]);
2739  __m128 u3 = _mm_loadu_ps(in+4*i+offsets[3]);
2740  __m128 v0 = _mm_shuffle_ps (u0, u1, 0x44);
2741  __m128 v1 = _mm_shuffle_ps (u0, u1, 0xee);
2742  __m128 v2 = _mm_shuffle_ps (u2, u3, 0x44);
2743  __m128 v3 = _mm_shuffle_ps (u2, u3, 0xee);
2744  out[4*i+0].data = _mm_shuffle_ps (v0, v2, 0x88);
2745  out[4*i+1].data = _mm_shuffle_ps (v0, v2, 0xdd);
2746  out[4*i+2].data = _mm_shuffle_ps (v1, v3, 0x88);
2747  out[4*i+3].data = _mm_shuffle_ps (v1, v3, 0xdd);
2748  }
2749  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
2750  for (unsigned int v=0; v<4; ++v)
2751  out[i][v] = in[offsets[v]+i];
2752 }
2753 
2754 
2755 
2759 template <>
2760 inline
2761 void
2762 vectorized_transpose_and_store(const bool add_into,
2763  const unsigned int n_entries,
2764  const VectorizedArray<float> *in,
2765  const unsigned int *offsets,
2766  float *out)
2767 {
2768  const unsigned int n_chunks = n_entries/4;
2769  for (unsigned int i=0; i<n_chunks; ++i)
2770  {
2771  __m128 u0 = in[4*i+0].data;
2772  __m128 u1 = in[4*i+1].data;
2773  __m128 u2 = in[4*i+2].data;
2774  __m128 u3 = in[4*i+3].data;
2775  __m128 t0 = _mm_shuffle_ps (u0, u1, 0x44);
2776  __m128 t1 = _mm_shuffle_ps (u0, u1, 0xee);
2777  __m128 t2 = _mm_shuffle_ps (u2, u3, 0x44);
2778  __m128 t3 = _mm_shuffle_ps (u2, u3, 0xee);
2779  u0 = _mm_shuffle_ps (t0, t2, 0x88);
2780  u1 = _mm_shuffle_ps (t0, t2, 0xdd);
2781  u2 = _mm_shuffle_ps (t1, t3, 0x88);
2782  u3 = _mm_shuffle_ps (t1, t3, 0xdd);
2783 
2784  // Cannot use the same store instructions in both paths of the 'if'
2785  // because the compiler cannot know that there is no aliasing between
2786  // pointers
2787  if (add_into)
2788  {
2789  u0 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[0]), u0);
2790  _mm_storeu_ps(out+4*i+offsets[0], u0);
2791  u1 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[1]), u1);
2792  _mm_storeu_ps(out+4*i+offsets[1], u1);
2793  u2 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[2]), u2);
2794  _mm_storeu_ps(out+4*i+offsets[2], u2);
2795  u3 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[3]), u3);
2796  _mm_storeu_ps(out+4*i+offsets[3], u3);
2797  }
2798  else
2799  {
2800  _mm_storeu_ps(out+4*i+offsets[0], u0);
2801  _mm_storeu_ps(out+4*i+offsets[1], u1);
2802  _mm_storeu_ps(out+4*i+offsets[2], u2);
2803  _mm_storeu_ps(out+4*i+offsets[3], u3);
2804  }
2805  }
2806  if (add_into)
2807  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
2808  for (unsigned int v=0; v<4; ++v)
2809  out[offsets[v]+i] += in[i][v];
2810  else
2811  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
2812  for (unsigned int v=0; v<4; ++v)
2813  out[offsets[v]+i] = in[i][v];
2814 }
2815 
2816 
2817 
2818 #endif // if DEAL_II_COMPILER_VECTORIZATION_LEVEL > 0
2819 
2820 
2826 template <typename Number>
2827 inline DEAL_II_ALWAYS_INLINE
2828 bool
2830  const VectorizedArray<Number> &rhs)
2831 {
2832  for (unsigned int i=0; i<VectorizedArray<Number>::n_array_elements; ++i)
2833  if (lhs[i] != rhs[i])
2834  return false;
2835 
2836  return true;
2837 }
2838 
2839 
2845 template <typename Number>
2846 inline DEAL_II_ALWAYS_INLINE
2849  const VectorizedArray<Number> &v)
2850 {
2851  VectorizedArray<Number> tmp = u;
2852  return tmp+=v;
2853 }
2854 
2860 template <typename Number>
2861 inline DEAL_II_ALWAYS_INLINE
2864  const VectorizedArray<Number> &v)
2865 {
2866  VectorizedArray<Number> tmp = u;
2867  return tmp-=v;
2868 }
2869 
2875 template <typename Number>
2876 inline DEAL_II_ALWAYS_INLINE
2879  const VectorizedArray<Number> &v)
2880 {
2881  VectorizedArray<Number> tmp = u;
2882  return tmp*=v;
2883 }
2884 
2890 template <typename Number>
2891 inline DEAL_II_ALWAYS_INLINE
2894  const VectorizedArray<Number> &v)
2895 {
2896  VectorizedArray<Number> tmp = u;
2897  return tmp/=v;
2898 }
2899 
2906 template <typename Number>
2907 inline DEAL_II_ALWAYS_INLINE
2909 operator + (const Number &u,
2910  const VectorizedArray<Number> &v)
2911 {
2913  tmp = u;
2914  return tmp+=v;
2915 }
2916 
2925 inline DEAL_II_ALWAYS_INLINE
2927 operator + (const double &u,
2928  const VectorizedArray<float> &v)
2929 {
2931  tmp = u;
2932  return tmp+=v;
2933 }
2934 
2941 template <typename Number>
2942 inline DEAL_II_ALWAYS_INLINE
2945  const Number &u)
2946 {
2947  return u + v;
2948 }
2949 
2958 inline DEAL_II_ALWAYS_INLINE
2961  const double &u)
2962 {
2963  return u + v;
2964 }
2965 
2972 template <typename Number>
2973 inline DEAL_II_ALWAYS_INLINE
2975 operator - (const Number &u,
2976  const VectorizedArray<Number> &v)
2977 {
2979  tmp = u;
2980  return tmp-=v;
2981 }
2982 
2991 inline DEAL_II_ALWAYS_INLINE
2993 operator - (const double &u,
2994  const VectorizedArray<float> &v)
2995 {
2997  tmp = float(u);
2998  return tmp-=v;
2999 }
3000 
3007 template <typename Number>
3008 inline DEAL_II_ALWAYS_INLINE
3011  const Number &u)
3012 {
3014  tmp = u;
3015  return v-tmp;
3016 }
3017 
3026 inline DEAL_II_ALWAYS_INLINE
3029  const double &u)
3030 {
3032  tmp = float(u);
3033  return v-tmp;
3034 }
3035 
3042 template <typename Number>
3043 inline DEAL_II_ALWAYS_INLINE
3045 operator * (const Number &u,
3046  const VectorizedArray<Number> &v)
3047 {
3049  tmp = u;
3050  return tmp*=v;
3051 }
3052 
3061 inline DEAL_II_ALWAYS_INLINE
3063 operator * (const double &u,
3064  const VectorizedArray<float> &v)
3065 {
3067  tmp = float(u);
3068  return tmp*=v;
3069 }
3070 
3077 template <typename Number>
3078 inline DEAL_II_ALWAYS_INLINE
3081  const Number &u)
3082 {
3083  return u * v;
3084 }
3085 
3094 inline DEAL_II_ALWAYS_INLINE
3097  const double &u)
3098 {
3099  return u * v;
3100 }
3101 
3108 template <typename Number>
3109 inline DEAL_II_ALWAYS_INLINE
3111 operator / (const Number &u,
3112  const VectorizedArray<Number> &v)
3113 {
3115  tmp = u;
3116  return tmp/=v;
3117 }
3118 
3127 inline DEAL_II_ALWAYS_INLINE
3129 operator / (const double &u,
3130  const VectorizedArray<float> &v)
3131 {
3133  tmp = float(u);
3134  return tmp/=v;
3135 }
3136 
3143 template <typename Number>
3144 inline DEAL_II_ALWAYS_INLINE
3147  const Number &u)
3148 {
3150  tmp = u;
3151  return v/tmp;
3152 }
3153 
3162 inline DEAL_II_ALWAYS_INLINE
3165  const double &u)
3166 {
3168  tmp = float(u);
3169  return v/tmp;
3170 }
3171 
3177 template <typename Number>
3178 inline DEAL_II_ALWAYS_INLINE
3181 {
3182  return u;
3183 }
3184 
3190 template <typename Number>
3191 inline DEAL_II_ALWAYS_INLINE
3194 {
3195  // to get a negative sign, subtract the input from zero (could also
3196  // multiply by -1, but this one is slightly simpler)
3197  return VectorizedArray<Number>()-u;
3198 }
3199 
3200 
3201 DEAL_II_NAMESPACE_CLOSE
3202 
3203 
3210 namespace std
3211 {
3219  template <typename Number>
3220  inline
3221  ::VectorizedArray<Number>
3222  sin (const ::VectorizedArray<Number> &x)
3223  {
3224  // put values in an array and later read in that array with an unaligned
3225  // read. This should save some instructions as compared to directly
3226  // setting the individual elements and also circumvents a compiler
3227  // optimization bug in gcc-4.6 with SSE2 (see also deal.II developers list
3228  // from April 2014, topic "matrix_free/step-48 Test").
3230  for (unsigned int i=0; i<::VectorizedArray<Number>::n_array_elements; ++i)
3231  values[i] = std::sin(x[i]);
3233  out.load(&values[0]);
3234  return out;
3235  }
3236 
3237 
3238 
3246  template <typename Number>
3247  inline
3248  ::VectorizedArray<Number>
3249  cos (const ::VectorizedArray<Number> &x)
3250  {
3252  for (unsigned int i=0; i<::VectorizedArray<Number>::n_array_elements; ++i)
3253  values[i] = std::cos(x[i]);
3255  out.load(&values[0]);
3256  return out;
3257  }
3258 
3259 
3260 
3268  template <typename Number>
3269  inline
3270  ::VectorizedArray<Number>
3271  tan (const ::VectorizedArray<Number> &x)
3272  {
3274  for (unsigned int i=0; i<::VectorizedArray<Number>::n_array_elements; ++i)
3275  values[i] = std::tan(x[i]);
3277  out.load(&values[0]);
3278  return out;
3279  }
3280 
3281 
3282 
3290  template <typename Number>
3291  inline
3292  ::VectorizedArray<Number>
3293  exp (const ::VectorizedArray<Number> &x)
3294  {
3296  for (unsigned int i=0; i<::VectorizedArray<Number>::n_array_elements; ++i)
3297  values[i] = std::exp(x[i]);
3299  out.load(&values[0]);
3300  return out;
3301  }
3302 
3303 
3304 
3312  template <typename Number>
3313  inline
3314  ::VectorizedArray<Number>
3315  log (const ::VectorizedArray<Number> &x)
3316  {
3318  for (unsigned int i=0; i<::VectorizedArray<Number>::n_array_elements; ++i)
3319  values[i] = std::log(x[i]);
3321  out.load(&values[0]);
3322  return out;
3323  }
3324 
3325 
3326 
3334  template <typename Number>
3335  inline
3336  ::VectorizedArray<Number>
3337  sqrt (const ::VectorizedArray<Number> &x)
3338  {
3339  return x.get_sqrt();
3340  }
3341 
3342 
3343 
3351  template <typename Number>
3352  inline
3353  ::VectorizedArray<Number>
3354  pow (const ::VectorizedArray<Number> &x,
3355  const Number p)
3356  {
3358  for (unsigned int i=0; i<::VectorizedArray<Number>::n_array_elements; ++i)
3359  values[i] = std::pow(x[i], p);
3361  out.load(&values[0]);
3362  return out;
3363  }
3364 
3365 
3366 
3374  template <typename Number>
3375  inline
3376  ::VectorizedArray<Number>
3377  abs (const ::VectorizedArray<Number> &x)
3378  {
3379  return x.get_abs();
3380  }
3381 
3382 
3383 
3391  template <typename Number>
3392  inline
3393  ::VectorizedArray<Number>
3394  max (const ::VectorizedArray<Number> &x,
3395  const ::VectorizedArray<Number> &y)
3396  {
3397  return x.get_max(y);
3398  }
3399 
3400 
3401 
3409  template <typename Number>
3410  inline
3411  ::VectorizedArray<Number>
3412  min (const ::VectorizedArray<Number> &x,
3413  const ::VectorizedArray<Number> &y)
3414  {
3415  return x.get_min(y);
3416  }
3417 
3418 }
3419 
3420 #endif
SymmetricTensor< rank_, dim, typename ProductType< Number, typename EnableIfScalar< OtherNumber >::type >::type > operator/(const SymmetricTensor< rank_, dim, Number > &t, const OtherNumber &factor)
void gather(const double *base_ptr, const unsigned int *offsets)
void store(Number *ptr) const
VectorizedArray get_sqrt() const
VectorizedArray get_abs() const
VectorizedArray< Number > log(const ::VectorizedArray< Number > &x)
VectorizedArray get_max(const VectorizedArray &other) const
VectorizedArray< Number > make_vectorized_array(const Number &u)
VectorizedArray< Number > tan(const ::VectorizedArray< Number > &x)
#define AssertIndexRange(index, range)
Definition: exceptions.h:1284
VectorizedArray get_abs() const
STL namespace.
VectorizedArray get_max(const VectorizedArray &other) const
VectorizedArray< Number > exp(const ::VectorizedArray< Number > &x)
SymmetricTensor< rank_, dim, typename ProductType< Number, OtherNumber >::type > operator+(const SymmetricTensor< rank_, dim, Number > &left, const SymmetricTensor< rank_, dim, OtherNumber > &right)
void scatter(const unsigned int *offsets, Number *base_ptr) const
void gather(const float *base_ptr, const unsigned int *offsets)
VectorizedArray get_abs() const
VectorizedArray get_max(const VectorizedArray &other) const
SymmetricTensor< rank_, dim, typename ProductType< Number, OtherNumber >::type > operator-(const SymmetricTensor< rank_, dim, Number > &left, const SymmetricTensor< rank_, dim, OtherNumber > &right)
VectorizedArray & operator+=(const VectorizedArray< Number > &vec)
bool operator==(const AlignedVector< T > &lhs, const AlignedVector< T > &rhs)
void vectorized_transpose_and_store(const bool add_into, const unsigned int n_entries, const VectorizedArray< Number > *in, const unsigned int *offsets, Number *out)
static ::ExceptionBase & ExcMessage(std::string arg1)
Number & operator[](const unsigned int comp)
static const unsigned int n_array_elements
VectorizedArray< Number > min(const ::VectorizedArray< Number > &x, const ::VectorizedArray< Number > &y)
void load(const double *ptr)
#define Assert(cond, exc)
Definition: exceptions.h:1142
void streaming_store(float *ptr) const
VectorizedArray get_sqrt() const
void streaming_store(Number *ptr) const
void scatter(const unsigned int *offsets, float *base_ptr) const
VectorizedArray< Number > pow(const ::VectorizedArray< Number > &x, const Number p)
VectorizedArray get_min(const VectorizedArray &other) const
void vectorized_load_and_transpose(const unsigned int n_entries, const Number *in, const unsigned int *offsets, VectorizedArray< Number > *out)
VectorizedArray & operator-=(const VectorizedArray< Number > &vec)
void load(const float *ptr)
VectorizedArray< Number > sqrt(const ::VectorizedArray< Number > &x)
VectorizedArray< Number > sin(const ::VectorizedArray< Number > &x)
VectorizedArray get_min(const VectorizedArray &other) const
void scatter(const unsigned int *offsets, double *base_ptr) const
VectorizedArray & operator/=(const VectorizedArray< Number > &vec)
void load(const Number *ptr)
VectorizedArray get_min(const VectorizedArray &other) const
void gather(const Number *base_ptr, const unsigned int *offsets)
VectorizedArray & operator*=(const VectorizedArray< Number > &vec)
void store(double *ptr) const
void streaming_store(double *ptr) const
VectorizedArray & operator=(const Number scalar)
VectorizedArray get_sqrt() const
Point< dim, typename ProductType< Number, typename EnableIfScalar< OtherNumber >::type >::type > operator*(const OtherNumber) const
VectorizedArray< Number > abs(const ::VectorizedArray< Number > &x)
void store(float *ptr) const
VectorizedArray< Number > max(const ::VectorizedArray< Number > &x, const ::VectorizedArray< Number > &y)
VectorizedArray< Number > cos(const ::VectorizedArray< Number > &x)