17 #ifndef dealii_vectorization_h 18 #define dealii_vectorization_h 20 #include <deal.II/base/config.h> 21 #include <deal.II/base/exceptions.h> 22 #include <deal.II/base/template_constraints.h> 42 #if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 2 && !defined(__AVX__) 43 #error "Mismatch in vectorization capabilities: AVX was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native." 45 #if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 3 && !defined(__AVX512F__) 46 #error "Mismatch in vectorization capabilities: AVX-512F was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native." 49 #if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 2 // AVX, AVX-512 50 #include <immintrin.h> 51 #elif DEAL_II_COMPILER_VECTORIZATION_LEVEL == 1 // SSE2 52 #include <emmintrin.h> 56 DEAL_II_NAMESPACE_OPEN
92 template <
typename Number>
150 template <
typename Number>
165 DEAL_II_ALWAYS_INLINE
176 DEAL_II_ALWAYS_INLINE
188 DEAL_II_ALWAYS_INLINE
200 DEAL_II_ALWAYS_INLINE
211 DEAL_II_ALWAYS_INLINE
222 DEAL_II_ALWAYS_INLINE
233 DEAL_II_ALWAYS_INLINE
247 DEAL_II_ALWAYS_INLINE
259 DEAL_II_ALWAYS_INLINE
309 DEAL_II_ALWAYS_INLINE
327 DEAL_II_ALWAYS_INLINE
329 const unsigned int *offsets)
331 data = base_ptr[offsets[0]];
346 DEAL_II_ALWAYS_INLINE
348 Number *base_ptr)
const 350 base_ptr[offsets[0]] =
data;
364 DEAL_II_ALWAYS_INLINE
377 DEAL_II_ALWAYS_INLINE
390 DEAL_II_ALWAYS_INLINE
403 DEAL_II_ALWAYS_INLINE
433 template <
typename Number>
434 inline DEAL_II_ALWAYS_INLINE
470 template <
typename Number>
475 const unsigned int *offsets,
478 for (
unsigned int i=0; i<n_entries; ++i)
479 for (
unsigned int v=0; v<VectorizedArray<Number>::n_array_elements; ++v)
480 out[i][v] = in[offsets[v]+i];
523 template <
typename Number>
527 const unsigned int n_entries,
529 const unsigned int *offsets,
533 for (
unsigned int i=0; i<n_entries; ++i)
534 for (
unsigned int v=0; v<VectorizedArray<Number>::n_array_elements; ++v)
535 out[offsets[v]+i] += in[i][v];
537 for (
unsigned int i=0; i<n_entries; ++i)
538 for (
unsigned int v=0; v<VectorizedArray<Number>::n_array_elements; ++v)
539 out[offsets[v]+i] = in[i][v];
547 #if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 3 && defined(__AVX512F__) 564 DEAL_II_ALWAYS_INLINE
568 data = _mm512_set1_pd(x);
575 DEAL_II_ALWAYS_INLINE
580 return *(
reinterpret_cast<double *
>(&
data)+comp);
586 DEAL_II_ALWAYS_INLINE
591 return *(
reinterpret_cast<const double *
>(&
data)+comp);
597 DEAL_II_ALWAYS_INLINE
606 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS 617 DEAL_II_ALWAYS_INLINE
621 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS 631 DEAL_II_ALWAYS_INLINE
635 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS 646 DEAL_II_ALWAYS_INLINE
650 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS 663 DEAL_II_ALWAYS_INLINE
664 void load (
const double *ptr)
666 data = _mm512_loadu_pd (ptr);
675 DEAL_II_ALWAYS_INLINE
676 void store (
double *ptr)
const 678 _mm512_storeu_pd (ptr,
data);
684 DEAL_II_ALWAYS_INLINE
687 Assert(reinterpret_cast<std::size_t>(ptr)%64==0,
ExcMessage(
"Memory not aligned"));
688 _mm512_stream_pd(ptr,
data);
703 DEAL_II_ALWAYS_INLINE
704 void gather (
const double *base_ptr,
705 const unsigned int *offsets)
710 const __m256 index_val = _mm256_loadu_ps((
const float *)offsets);
711 const __m256i index = *((__m256i *)(&index_val));
712 data = _mm512_i32gather_pd(index, base_ptr, 8);
727 DEAL_II_ALWAYS_INLINE
728 void scatter (
const unsigned int *offsets,
729 double *base_ptr)
const 731 for (
unsigned int i=0; i<8; ++i)
732 for (
unsigned int j=i+1; j<8; ++j)
733 Assert(offsets[i] != offsets[j],
734 ExcMessage(
"Result of scatter undefined if two offset elements" 735 " point to the same position"));
740 const __m256 index_val = _mm256_loadu_ps((
const float *)offsets);
741 const __m256i index = *((__m256i *)(&index_val));
742 _mm512_i32scatter_pd(base_ptr, index,
data, 8);
756 DEAL_II_ALWAYS_INLINE
769 DEAL_II_ALWAYS_INLINE
778 __m512d mask = _mm512_set1_pd (-0.);
780 res.
data = (__m512d)_mm512_andnot_epi64 ((__m512i)mask, (__m512i)
data);
788 DEAL_II_ALWAYS_INLINE
801 DEAL_II_ALWAYS_INLINE
833 const unsigned int *offsets,
836 const unsigned int n_chunks = n_entries/4;
837 for (
unsigned int outer=0; outer<8; outer += 4)
839 const double *in0 = in + offsets[0+outer];
840 const double *in1 = in + offsets[1+outer];
841 const double *in2 = in + offsets[2+outer];
842 const double *in3 = in + offsets[3+outer];
844 for (
unsigned int i=0; i<n_chunks; ++i)
846 __m256d u0 = _mm256_loadu_pd(in0+4*i);
847 __m256d u1 = _mm256_loadu_pd(in1+4*i);
848 __m256d u2 = _mm256_loadu_pd(in2+4*i);
849 __m256d u3 = _mm256_loadu_pd(in3+4*i);
850 __m256d t0 = _mm256_permute2f128_pd (u0, u2, 0x20);
851 __m256d t1 = _mm256_permute2f128_pd (u1, u3, 0x20);
852 __m256d t2 = _mm256_permute2f128_pd (u0, u2, 0x31);
853 __m256d t3 = _mm256_permute2f128_pd (u1, u3, 0x31);
854 *(__m256d *)((
double *)(&out[4*i+0].
data)+outer) = _mm256_unpacklo_pd (t0, t1);
855 *(__m256d *)((
double *)(&out[4*i+1].
data)+outer) = _mm256_unpackhi_pd (t0, t1);
856 *(__m256d *)((
double *)(&out[4*i+2].
data)+outer) = _mm256_unpacklo_pd (t2, t3);
857 *(__m256d *)((
double *)(&out[4*i+3].
data)+outer) = _mm256_unpackhi_pd (t2, t3);
859 for (
unsigned int i=4*n_chunks; i<n_entries; ++i)
860 for (
unsigned int v=0; v<4; ++v)
861 out[i][outer+v] = in[offsets[v+outer]+i];
874 const unsigned int n_entries,
876 const unsigned int *offsets,
879 const unsigned int n_chunks = n_entries/4;
883 for (
unsigned int outer=0; outer<8; outer += 4)
885 double *out0 = out + offsets[0+outer];
886 double *out1 = out + offsets[1+outer];
887 double *out2 = out + offsets[2+outer];
888 double *out3 = out + offsets[3+outer];
889 for (
unsigned int i=0; i<n_chunks; ++i)
891 __m256d u0 = *(
const __m256d *)((
const double *)(&in[4*i+0].
data)+outer);
892 __m256d u1 = *(
const __m256d *)((
const double *)(&in[4*i+1].
data)+outer);
893 __m256d u2 = *(
const __m256d *)((
const double *)(&in[4*i+2].
data)+outer);
894 __m256d u3 = *(
const __m256d *)((
const double *)(&in[4*i+3].
data)+outer);
895 __m256d t0 = _mm256_permute2f128_pd (u0, u2, 0x20);
896 __m256d t1 = _mm256_permute2f128_pd (u1, u3, 0x20);
897 __m256d t2 = _mm256_permute2f128_pd (u0, u2, 0x31);
898 __m256d t3 = _mm256_permute2f128_pd (u1, u3, 0x31);
899 __m256d res0 = _mm256_unpacklo_pd (t0, t1);
900 __m256d res1 = _mm256_unpackhi_pd (t0, t1);
901 __m256d res2 = _mm256_unpacklo_pd (t2, t3);
902 __m256d res3 = _mm256_unpackhi_pd (t2, t3);
909 res0 = _mm256_add_pd(_mm256_loadu_pd(out0+4*i), res0);
910 _mm256_storeu_pd(out0+4*i, res0);
911 res1 = _mm256_add_pd(_mm256_loadu_pd(out1+4*i), res1);
912 _mm256_storeu_pd(out1+4*i, res1);
913 res2 = _mm256_add_pd(_mm256_loadu_pd(out2+4*i), res2);
914 _mm256_storeu_pd(out2+4*i, res2);
915 res3 = _mm256_add_pd(_mm256_loadu_pd(out3+4*i), res3);
916 _mm256_storeu_pd(out3+4*i, res3);
920 _mm256_storeu_pd(out0+4*i, res0);
921 _mm256_storeu_pd(out1+4*i, res1);
922 _mm256_storeu_pd(out2+4*i, res2);
923 _mm256_storeu_pd(out3+4*i, res3);
927 for (
unsigned int i=4*n_chunks; i<n_entries; ++i)
928 for (
unsigned int v=0; v<4; ++v)
929 out[offsets[v+outer]+i] += in[i][v+outer];
931 for (
unsigned int i=4*n_chunks; i<n_entries; ++i)
932 for (
unsigned int v=0; v<4; ++v)
933 out[offsets[v+outer]+i] = in[i][v+outer];
954 DEAL_II_ALWAYS_INLINE
958 data = _mm512_set1_ps(x);
965 DEAL_II_ALWAYS_INLINE
970 return *(
reinterpret_cast<float *
>(&
data)+comp);
976 DEAL_II_ALWAYS_INLINE
981 return *(
reinterpret_cast<const float *
>(&
data)+comp);
987 DEAL_II_ALWAYS_INLINE
996 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS 1007 DEAL_II_ALWAYS_INLINE
1011 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS 1021 DEAL_II_ALWAYS_INLINE
1025 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS 1036 DEAL_II_ALWAYS_INLINE
1040 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS 1053 DEAL_II_ALWAYS_INLINE
1054 void load (
const float *ptr)
1056 data = _mm512_loadu_ps (ptr);
1065 DEAL_II_ALWAYS_INLINE
1066 void store (
float *ptr)
const 1068 _mm512_storeu_ps (ptr,
data);
1074 DEAL_II_ALWAYS_INLINE
1077 Assert(reinterpret_cast<std::size_t>(ptr)%64==0,
ExcMessage(
"Memory not aligned"));
1078 _mm512_stream_ps(ptr,
data);
1093 DEAL_II_ALWAYS_INLINE
1094 void gather (
const float *base_ptr,
1095 const unsigned int *offsets)
1100 const __m512 index_val = _mm512_loadu_ps((
const float *)offsets);
1101 const __m512i index = *((__m512i *)(&index_val));
1102 data = _mm512_i32gather_ps(index, base_ptr, 4);
1117 DEAL_II_ALWAYS_INLINE
1118 void scatter (
const unsigned int *offsets,
1119 float *base_ptr)
const 1121 for (
unsigned int i=0; i<16; ++i)
1122 for (
unsigned int j=i+1; j<16; ++j)
1123 Assert(offsets[i] != offsets[j],
1124 ExcMessage(
"Result of scatter undefined if two offset elements" 1125 " point to the same position"));
1130 const __m512 index_val = _mm512_loadu_ps((
const float *)offsets);
1131 const __m512i index = *((__m512i *)(&index_val));
1132 _mm512_i32scatter_ps(base_ptr, index,
data, 4);
1147 DEAL_II_ALWAYS_INLINE
1160 DEAL_II_ALWAYS_INLINE
1169 __m512 mask = _mm512_set1_ps (-0.f);
1171 res.
data = (__m512)_mm512_andnot_epi32 ((__m512i)mask, (__m512i)
data);
1179 DEAL_II_ALWAYS_INLINE
1192 DEAL_II_ALWAYS_INLINE
1224 const unsigned int *offsets,
1227 const unsigned int n_chunks = n_entries/4;
1228 for (
unsigned int outer = 0; outer<16; outer += 8)
1230 for (
unsigned int i=0; i<n_chunks; ++i)
1232 __m128 u0 = _mm_loadu_ps(in+4*i+offsets[0+outer]);
1233 __m128 u1 = _mm_loadu_ps(in+4*i+offsets[1+outer]);
1234 __m128 u2 = _mm_loadu_ps(in+4*i+offsets[2+outer]);
1235 __m128 u3 = _mm_loadu_ps(in+4*i+offsets[3+outer]);
1236 __m128 u4 = _mm_loadu_ps(in+4*i+offsets[4+outer]);
1237 __m128 u5 = _mm_loadu_ps(in+4*i+offsets[5+outer]);
1238 __m128 u6 = _mm_loadu_ps(in+4*i+offsets[6+outer]);
1239 __m128 u7 = _mm_loadu_ps(in+4*i+offsets[7+outer]);
1242 __m256 t0, t1, t2, t3 = _mm256_set1_ps(0.F);
1243 t0 = _mm256_insertf128_ps (t3, u0, 0);
1244 t0 = _mm256_insertf128_ps (t0, u4, 1);
1245 t1 = _mm256_insertf128_ps (t3, u1, 0);
1246 t1 = _mm256_insertf128_ps (t1, u5, 1);
1247 t2 = _mm256_insertf128_ps (t3, u2, 0);
1248 t2 = _mm256_insertf128_ps (t2, u6, 1);
1249 t3 = _mm256_insertf128_ps (t3, u3, 0);
1250 t3 = _mm256_insertf128_ps (t3, u7, 1);
1251 __m256 v0 = _mm256_shuffle_ps (t0, t1, 0x44);
1252 __m256 v1 = _mm256_shuffle_ps (t0, t1, 0xee);
1253 __m256 v2 = _mm256_shuffle_ps (t2, t3, 0x44);
1254 __m256 v3 = _mm256_shuffle_ps (t2, t3, 0xee);
1255 *(__m256 *)((
float *)(&out[4*i+0].
data)+outer) = _mm256_shuffle_ps (v0, v2, 0x88);
1256 *(__m256 *)((
float *)(&out[4*i+1].
data)+outer) = _mm256_shuffle_ps (v0, v2, 0xdd);
1257 *(__m256 *)((
float *)(&out[4*i+2].
data)+outer) = _mm256_shuffle_ps (v1, v3, 0x88);
1258 *(__m256 *)((
float *)(&out[4*i+3].
data)+outer) = _mm256_shuffle_ps (v1, v3, 0xdd);
1260 for (
unsigned int i=4*n_chunks; i<n_entries; ++i)
1261 for (
unsigned int v=0; v<8; ++v)
1262 out[i][v+outer] = in[offsets[v+outer]+i];
1275 const unsigned int n_entries,
1277 const unsigned int *offsets,
1280 const unsigned int n_chunks = n_entries/4;
1281 for (
unsigned int outer = 0; outer<16; outer += 8)
1283 for (
unsigned int i=0; i<n_chunks; ++i)
1285 __m256 u0 = *(
const __m256 *)((
const float *)(&in[4*i+0].
data)+outer);
1286 __m256 u1 = *(
const __m256 *)((
const float *)(&in[4*i+1].
data)+outer);
1287 __m256 u2 = *(
const __m256 *)((
const float *)(&in[4*i+2].
data)+outer);
1288 __m256 u3 = *(
const __m256 *)((
const float *)(&in[4*i+3].
data)+outer);
1289 __m256 t0 = _mm256_shuffle_ps (u0, u1, 0x44);
1290 __m256 t1 = _mm256_shuffle_ps (u0, u1, 0xee);
1291 __m256 t2 = _mm256_shuffle_ps (u2, u3, 0x44);
1292 __m256 t3 = _mm256_shuffle_ps (u2, u3, 0xee);
1293 u0 = _mm256_shuffle_ps (t0, t2, 0x88);
1294 u1 = _mm256_shuffle_ps (t0, t2, 0xdd);
1295 u2 = _mm256_shuffle_ps (t1, t3, 0x88);
1296 u3 = _mm256_shuffle_ps (t1, t3, 0xdd);
1297 __m128 res0 = _mm256_extractf128_ps (u0, 0);
1298 __m128 res4 = _mm256_extractf128_ps (u0, 1);
1299 __m128 res1 = _mm256_extractf128_ps (u1, 0);
1300 __m128 res5 = _mm256_extractf128_ps (u1, 1);
1301 __m128 res2 = _mm256_extractf128_ps (u2, 0);
1302 __m128 res6 = _mm256_extractf128_ps (u2, 1);
1303 __m128 res3 = _mm256_extractf128_ps (u3, 0);
1304 __m128 res7 = _mm256_extractf128_ps (u3, 1);
1311 res0 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[0+outer]), res0);
1312 _mm_storeu_ps(out+4*i+offsets[0+outer], res0);
1313 res1 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[1+outer]), res1);
1314 _mm_storeu_ps(out+4*i+offsets[1+outer], res1);
1315 res2 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[2+outer]), res2);
1316 _mm_storeu_ps(out+4*i+offsets[2+outer], res2);
1317 res3 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[3+outer]), res3);
1318 _mm_storeu_ps(out+4*i+offsets[3+outer], res3);
1319 res4 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[4+outer]), res4);
1320 _mm_storeu_ps(out+4*i+offsets[4+outer], res4);
1321 res5 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[5+outer]), res5);
1322 _mm_storeu_ps(out+4*i+offsets[5+outer], res5);
1323 res6 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[6+outer]), res6);
1324 _mm_storeu_ps(out+4*i+offsets[6+outer], res6);
1325 res7 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[7+outer]), res7);
1326 _mm_storeu_ps(out+4*i+offsets[7+outer], res7);
1330 _mm_storeu_ps(out+4*i+offsets[0+outer], res0);
1331 _mm_storeu_ps(out+4*i+offsets[1+outer], res1);
1332 _mm_storeu_ps(out+4*i+offsets[2+outer], res2);
1333 _mm_storeu_ps(out+4*i+offsets[3+outer], res3);
1334 _mm_storeu_ps(out+4*i+offsets[4+outer], res4);
1335 _mm_storeu_ps(out+4*i+offsets[5+outer], res5);
1336 _mm_storeu_ps(out+4*i+offsets[6+outer], res6);
1337 _mm_storeu_ps(out+4*i+offsets[7+outer], res7);
1341 for (
unsigned int i=4*n_chunks; i<n_entries; ++i)
1342 for (
unsigned int v=0; v<8; ++v)
1343 out[offsets[v+outer]+i] += in[i][v+outer];
1345 for (
unsigned int i=4*n_chunks; i<n_entries; ++i)
1346 for (
unsigned int v=0; v<8; ++v)
1347 out[offsets[v+outer]+i] = in[i][v+outer];
1353 #elif DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 2 && defined(__AVX__) 1370 DEAL_II_ALWAYS_INLINE
1374 data = _mm256_set1_pd(x);
1381 DEAL_II_ALWAYS_INLINE
1386 return *(
reinterpret_cast<double *
>(&
data)+comp);
1392 DEAL_II_ALWAYS_INLINE
1397 return *(
reinterpret_cast<const double *
>(&
data)+comp);
1403 DEAL_II_ALWAYS_INLINE
1412 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS 1423 DEAL_II_ALWAYS_INLINE
1427 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS 1437 DEAL_II_ALWAYS_INLINE
1441 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS 1452 DEAL_II_ALWAYS_INLINE
1456 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS 1469 DEAL_II_ALWAYS_INLINE
1470 void load (
const double *ptr)
1472 data = _mm256_loadu_pd (ptr);
1481 DEAL_II_ALWAYS_INLINE
1482 void store (
double *ptr)
const 1484 _mm256_storeu_pd (ptr,
data);
1490 DEAL_II_ALWAYS_INLINE
1493 Assert(reinterpret_cast<std::size_t>(ptr)%32==0,
ExcMessage(
"Memory not aligned"));
1494 _mm256_stream_pd(ptr,
data);
1509 DEAL_II_ALWAYS_INLINE
1510 void gather (
const double *base_ptr,
1511 const unsigned int *offsets)
1517 const __m128 index_val = _mm_loadu_ps((
const float *)offsets);
1518 const __m128i index = *((__m128i *)(&index_val));
1519 data = _mm256_i32gather_pd(base_ptr, index, 8);
1521 for (
unsigned int i=0; i<4; ++i)
1522 *(reinterpret_cast<double *>(&
data)+i) = base_ptr[offsets[i]];
1538 DEAL_II_ALWAYS_INLINE
1539 void scatter (
const unsigned int *offsets,
1540 double *base_ptr)
const 1543 for (
unsigned int i=0; i<4; ++i)
1544 base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&
data)+i);
1558 DEAL_II_ALWAYS_INLINE
1571 DEAL_II_ALWAYS_INLINE
1578 __m256d mask = _mm256_set1_pd (-0.);
1580 res.
data = _mm256_andnot_pd(mask,
data);
1588 DEAL_II_ALWAYS_INLINE
1601 DEAL_II_ALWAYS_INLINE
1633 const unsigned int *offsets,
1636 const unsigned int n_chunks = n_entries/4;
1637 const double *in0 = in + offsets[0];
1638 const double *in1 = in + offsets[1];
1639 const double *in2 = in + offsets[2];
1640 const double *in3 = in + offsets[3];
1642 for (
unsigned int i=0; i<n_chunks; ++i)
1644 __m256d u0 = _mm256_loadu_pd(in0+4*i);
1645 __m256d u1 = _mm256_loadu_pd(in1+4*i);
1646 __m256d u2 = _mm256_loadu_pd(in2+4*i);
1647 __m256d u3 = _mm256_loadu_pd(in3+4*i);
1648 __m256d t0 = _mm256_permute2f128_pd (u0, u2, 0x20);
1649 __m256d t1 = _mm256_permute2f128_pd (u1, u3, 0x20);
1650 __m256d t2 = _mm256_permute2f128_pd (u0, u2, 0x31);
1651 __m256d t3 = _mm256_permute2f128_pd (u1, u3, 0x31);
1652 out[4*i+0].
data = _mm256_unpacklo_pd (t0, t1);
1653 out[4*i+1].
data = _mm256_unpackhi_pd (t0, t1);
1654 out[4*i+2].
data = _mm256_unpacklo_pd (t2, t3);
1655 out[4*i+3].
data = _mm256_unpackhi_pd (t2, t3);
1657 for (
unsigned int i=4*n_chunks; i<n_entries; ++i)
1658 for (
unsigned int v=0; v<4; ++v)
1659 out[i][v] = in[offsets[v]+i];
1671 const unsigned int n_entries,
1673 const unsigned int *offsets,
1676 const unsigned int n_chunks = n_entries/4;
1677 double *out0 = out + offsets[0];
1678 double *out1 = out + offsets[1];
1679 double *out2 = out + offsets[2];
1680 double *out3 = out + offsets[3];
1681 for (
unsigned int i=0; i<n_chunks; ++i)
1683 __m256d u0 = in[4*i+0].
data;
1684 __m256d u1 = in[4*i+1].
data;
1685 __m256d u2 = in[4*i+2].
data;
1686 __m256d u3 = in[4*i+3].
data;
1687 __m256d t0 = _mm256_permute2f128_pd (u0, u2, 0x20);
1688 __m256d t1 = _mm256_permute2f128_pd (u1, u3, 0x20);
1689 __m256d t2 = _mm256_permute2f128_pd (u0, u2, 0x31);
1690 __m256d t3 = _mm256_permute2f128_pd (u1, u3, 0x31);
1691 __m256d res0 = _mm256_unpacklo_pd (t0, t1);
1692 __m256d res1 = _mm256_unpackhi_pd (t0, t1);
1693 __m256d res2 = _mm256_unpacklo_pd (t2, t3);
1694 __m256d res3 = _mm256_unpackhi_pd (t2, t3);
1701 res0 = _mm256_add_pd(_mm256_loadu_pd(out0+4*i), res0);
1702 _mm256_storeu_pd(out0+4*i, res0);
1703 res1 = _mm256_add_pd(_mm256_loadu_pd(out1+4*i), res1);
1704 _mm256_storeu_pd(out1+4*i, res1);
1705 res2 = _mm256_add_pd(_mm256_loadu_pd(out2+4*i), res2);
1706 _mm256_storeu_pd(out2+4*i, res2);
1707 res3 = _mm256_add_pd(_mm256_loadu_pd(out3+4*i), res3);
1708 _mm256_storeu_pd(out3+4*i, res3);
1712 _mm256_storeu_pd(out0+4*i, res0);
1713 _mm256_storeu_pd(out1+4*i, res1);
1714 _mm256_storeu_pd(out2+4*i, res2);
1715 _mm256_storeu_pd(out3+4*i, res3);
1719 for (
unsigned int i=4*n_chunks; i<n_entries; ++i)
1720 for (
unsigned int v=0; v<4; ++v)
1721 out[offsets[v]+i] += in[i][v];
1723 for (
unsigned int i=4*n_chunks; i<n_entries; ++i)
1724 for (
unsigned int v=0; v<4; ++v)
1725 out[offsets[v]+i] = in[i][v];
1745 DEAL_II_ALWAYS_INLINE
1749 data = _mm256_set1_ps(x);
1756 DEAL_II_ALWAYS_INLINE
1761 return *(
reinterpret_cast<float *
>(&
data)+comp);
1767 DEAL_II_ALWAYS_INLINE
1772 return *(
reinterpret_cast<const float *
>(&
data)+comp);
1778 DEAL_II_ALWAYS_INLINE
1787 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS 1798 DEAL_II_ALWAYS_INLINE
1802 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS 1812 DEAL_II_ALWAYS_INLINE
1816 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS 1827 DEAL_II_ALWAYS_INLINE
1831 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS 1844 DEAL_II_ALWAYS_INLINE
1845 void load (
const float *ptr)
1847 data = _mm256_loadu_ps (ptr);
1856 DEAL_II_ALWAYS_INLINE
1857 void store (
float *ptr)
const 1859 _mm256_storeu_ps (ptr,
data);
1865 DEAL_II_ALWAYS_INLINE
1868 Assert(reinterpret_cast<std::size_t>(ptr)%32==0,
ExcMessage(
"Memory not aligned"));
1869 _mm256_stream_ps(ptr,
data);
1884 DEAL_II_ALWAYS_INLINE
1885 void gather (
const float *base_ptr,
1886 const unsigned int *offsets)
1892 const __m256 index_val = _mm256_loadu_ps((
const float *)offsets);
1893 const __m256i index = *((__m256i *)(&index_val));
1894 data = _mm256_i32gather_ps(base_ptr, index, 4);
1896 for (
unsigned int i=0; i<8; ++i)
1897 *(reinterpret_cast<float *>(&
data)+i) = base_ptr[offsets[i]];
1913 DEAL_II_ALWAYS_INLINE
1914 void scatter (
const unsigned int *offsets,
1915 float *base_ptr)
const 1918 for (
unsigned int i=0; i<8; ++i)
1919 base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&
data)+i);
1934 DEAL_II_ALWAYS_INLINE
1947 DEAL_II_ALWAYS_INLINE
1954 __m256 mask = _mm256_set1_ps (-0.f);
1956 res.
data = _mm256_andnot_ps(mask,
data);
1964 DEAL_II_ALWAYS_INLINE
1977 DEAL_II_ALWAYS_INLINE
2009 const unsigned int *offsets,
2012 const unsigned int n_chunks = n_entries/4;
2013 for (
unsigned int i=0; i<n_chunks; ++i)
2015 __m128 u0 = _mm_loadu_ps(in+4*i+offsets[0]);
2016 __m128 u1 = _mm_loadu_ps(in+4*i+offsets[1]);
2017 __m128 u2 = _mm_loadu_ps(in+4*i+offsets[2]);
2018 __m128 u3 = _mm_loadu_ps(in+4*i+offsets[3]);
2019 __m128 u4 = _mm_loadu_ps(in+4*i+offsets[4]);
2020 __m128 u5 = _mm_loadu_ps(in+4*i+offsets[5]);
2021 __m128 u6 = _mm_loadu_ps(in+4*i+offsets[6]);
2022 __m128 u7 = _mm_loadu_ps(in+4*i+offsets[7]);
2025 __m256 t0, t1, t2, t3 = _mm256_set1_ps(0.F);
2026 t0 = _mm256_insertf128_ps (t3, u0, 0);
2027 t0 = _mm256_insertf128_ps (t0, u4, 1);
2028 t1 = _mm256_insertf128_ps (t3, u1, 0);
2029 t1 = _mm256_insertf128_ps (t1, u5, 1);
2030 t2 = _mm256_insertf128_ps (t3, u2, 0);
2031 t2 = _mm256_insertf128_ps (t2, u6, 1);
2032 t3 = _mm256_insertf128_ps (t3, u3, 0);
2033 t3 = _mm256_insertf128_ps (t3, u7, 1);
2034 __m256 v0 = _mm256_shuffle_ps (t0, t1, 0x44);
2035 __m256 v1 = _mm256_shuffle_ps (t0, t1, 0xee);
2036 __m256 v2 = _mm256_shuffle_ps (t2, t3, 0x44);
2037 __m256 v3 = _mm256_shuffle_ps (t2, t3, 0xee);
2038 out[4*i+0].
data = _mm256_shuffle_ps (v0, v2, 0x88);
2039 out[4*i+1].
data = _mm256_shuffle_ps (v0, v2, 0xdd);
2040 out[4*i+2].
data = _mm256_shuffle_ps (v1, v3, 0x88);
2041 out[4*i+3].
data = _mm256_shuffle_ps (v1, v3, 0xdd);
2043 for (
unsigned int i=4*n_chunks; i<n_entries; ++i)
2044 for (
unsigned int v=0; v<8; ++v)
2045 out[i][v] = in[offsets[v]+i];
2057 const unsigned int n_entries,
2059 const unsigned int *offsets,
2062 const unsigned int n_chunks = n_entries/4;
2063 for (
unsigned int i=0; i<n_chunks; ++i)
2065 __m256 u0 = in[4*i+0].
data;
2066 __m256 u1 = in[4*i+1].
data;
2067 __m256 u2 = in[4*i+2].
data;
2068 __m256 u3 = in[4*i+3].
data;
2069 __m256 t0 = _mm256_shuffle_ps (u0, u1, 0x44);
2070 __m256 t1 = _mm256_shuffle_ps (u0, u1, 0xee);
2071 __m256 t2 = _mm256_shuffle_ps (u2, u3, 0x44);
2072 __m256 t3 = _mm256_shuffle_ps (u2, u3, 0xee);
2073 u0 = _mm256_shuffle_ps (t0, t2, 0x88);
2074 u1 = _mm256_shuffle_ps (t0, t2, 0xdd);
2075 u2 = _mm256_shuffle_ps (t1, t3, 0x88);
2076 u3 = _mm256_shuffle_ps (t1, t3, 0xdd);
2077 __m128 res0 = _mm256_extractf128_ps (u0, 0);
2078 __m128 res4 = _mm256_extractf128_ps (u0, 1);
2079 __m128 res1 = _mm256_extractf128_ps (u1, 0);
2080 __m128 res5 = _mm256_extractf128_ps (u1, 1);
2081 __m128 res2 = _mm256_extractf128_ps (u2, 0);
2082 __m128 res6 = _mm256_extractf128_ps (u2, 1);
2083 __m128 res3 = _mm256_extractf128_ps (u3, 0);
2084 __m128 res7 = _mm256_extractf128_ps (u3, 1);
2091 res0 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[0]), res0);
2092 _mm_storeu_ps(out+4*i+offsets[0], res0);
2093 res1 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[1]), res1);
2094 _mm_storeu_ps(out+4*i+offsets[1], res1);
2095 res2 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[2]), res2);
2096 _mm_storeu_ps(out+4*i+offsets[2], res2);
2097 res3 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[3]), res3);
2098 _mm_storeu_ps(out+4*i+offsets[3], res3);
2099 res4 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[4]), res4);
2100 _mm_storeu_ps(out+4*i+offsets[4], res4);
2101 res5 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[5]), res5);
2102 _mm_storeu_ps(out+4*i+offsets[5], res5);
2103 res6 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[6]), res6);
2104 _mm_storeu_ps(out+4*i+offsets[6], res6);
2105 res7 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[7]), res7);
2106 _mm_storeu_ps(out+4*i+offsets[7], res7);
2110 _mm_storeu_ps(out+4*i+offsets[0], res0);
2111 _mm_storeu_ps(out+4*i+offsets[1], res1);
2112 _mm_storeu_ps(out+4*i+offsets[2], res2);
2113 _mm_storeu_ps(out+4*i+offsets[3], res3);
2114 _mm_storeu_ps(out+4*i+offsets[4], res4);
2115 _mm_storeu_ps(out+4*i+offsets[5], res5);
2116 _mm_storeu_ps(out+4*i+offsets[6], res6);
2117 _mm_storeu_ps(out+4*i+offsets[7], res7);
2121 for (
unsigned int i=4*n_chunks; i<n_entries; ++i)
2122 for (
unsigned int v=0; v<8; ++v)
2123 out[offsets[v]+i] += in[i][v];
2125 for (
unsigned int i=4*n_chunks; i<n_entries; ++i)
2126 for (
unsigned int v=0; v<8; ++v)
2127 out[offsets[v]+i] = in[i][v];
2135 #elif DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 1 2152 DEAL_II_ALWAYS_INLINE
2156 data = _mm_set1_pd(x);
2163 DEAL_II_ALWAYS_INLINE
2168 return *(
reinterpret_cast<double *
>(&
data)+comp);
2174 DEAL_II_ALWAYS_INLINE
2179 return *(
reinterpret_cast<const double *
>(&
data)+comp);
2185 DEAL_II_ALWAYS_INLINE
2189 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS 2200 DEAL_II_ALWAYS_INLINE
2204 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS 2215 DEAL_II_ALWAYS_INLINE
2219 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS 2230 DEAL_II_ALWAYS_INLINE
2234 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS 2247 DEAL_II_ALWAYS_INLINE
2250 data = _mm_loadu_pd (ptr);
2259 DEAL_II_ALWAYS_INLINE
2262 _mm_storeu_pd (ptr,
data);
2268 DEAL_II_ALWAYS_INLINE
2271 Assert(reinterpret_cast<std::size_t>(ptr)%16==0,
ExcMessage(
"Memory not aligned"));
2272 _mm_stream_pd(ptr,
data);
2287 DEAL_II_ALWAYS_INLINE
2289 const unsigned int *offsets)
2291 for (
unsigned int i=0; i<2; ++i)
2292 *(reinterpret_cast<double *>(&
data)+i) = base_ptr[offsets[i]];
2307 DEAL_II_ALWAYS_INLINE
2309 double *base_ptr)
const 2311 for (
unsigned int i=0; i<2; ++i)
2312 base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&
data)+i);
2326 DEAL_II_ALWAYS_INLINE
2339 DEAL_II_ALWAYS_INLINE
2347 __m128d mask = _mm_set1_pd (-0.);
2349 res.
data = _mm_andnot_pd(mask,
data);
2357 DEAL_II_ALWAYS_INLINE
2370 DEAL_II_ALWAYS_INLINE
2401 const unsigned int *offsets,
2404 const unsigned int n_chunks = n_entries/2;
2405 for (
unsigned int i=0; i<n_chunks; ++i)
2407 __m128d u0 = _mm_loadu_pd(in+2*i+offsets[0]);
2408 __m128d u1 = _mm_loadu_pd(in+2*i+offsets[1]);
2409 out[2*i+0].
data = _mm_unpacklo_pd (u0, u1);
2410 out[2*i+1].
data = _mm_unpackhi_pd (u0, u1);
2412 for (
unsigned int i=2*n_chunks; i<n_entries; ++i)
2413 for (
unsigned int v=0; v<2; ++v)
2414 out[i][v] = in[offsets[v]+i];
2426 const unsigned int n_entries,
2428 const unsigned int *offsets,
2431 const unsigned int n_chunks = n_entries/2;
2434 for (
unsigned int i=0; i<n_chunks; ++i)
2436 __m128d u0 = in[2*i+0].
data;
2437 __m128d u1 = in[2*i+1].
data;
2438 __m128d res0 = _mm_unpacklo_pd (u0, u1);
2439 __m128d res1 = _mm_unpackhi_pd (u0, u1);
2440 _mm_storeu_pd(out+2*i+offsets[0], _mm_add_pd(_mm_loadu_pd(out+2*i+offsets[0]), res0));
2441 _mm_storeu_pd(out+2*i+offsets[1], _mm_add_pd(_mm_loadu_pd(out+2*i+offsets[1]), res1));
2443 for (
unsigned int i=2*n_chunks; i<n_entries; ++i)
2444 for (
unsigned int v=0; v<2; ++v)
2445 out[offsets[v]+i] += in[i][v];
2449 for (
unsigned int i=0; i<n_chunks; ++i)
2451 __m128d u0 = in[2*i+0].
data;
2452 __m128d u1 = in[2*i+1].
data;
2453 __m128d res0 = _mm_unpacklo_pd (u0, u1);
2454 __m128d res1 = _mm_unpackhi_pd (u0, u1);
2455 _mm_storeu_pd(out+2*i+offsets[0], res0);
2456 _mm_storeu_pd(out+2*i+offsets[1], res1);
2458 for (
unsigned int i=2*n_chunks; i<n_entries; ++i)
2459 for (
unsigned int v=0; v<2; ++v)
2460 out[offsets[v]+i] = in[i][v];
2482 DEAL_II_ALWAYS_INLINE
2486 data = _mm_set1_ps(x);
2493 DEAL_II_ALWAYS_INLINE
2498 return *(
reinterpret_cast<float *
>(&
data)+comp);
2504 DEAL_II_ALWAYS_INLINE
2509 return *(
reinterpret_cast<const float *
>(&
data)+comp);
2515 DEAL_II_ALWAYS_INLINE
2519 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS 2530 DEAL_II_ALWAYS_INLINE
2534 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS 2545 DEAL_II_ALWAYS_INLINE
2549 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS 2560 DEAL_II_ALWAYS_INLINE
2564 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS 2577 DEAL_II_ALWAYS_INLINE
2580 data = _mm_loadu_ps (ptr);
2589 DEAL_II_ALWAYS_INLINE
2592 _mm_storeu_ps (ptr,
data);
2598 DEAL_II_ALWAYS_INLINE
2601 Assert(reinterpret_cast<std::size_t>(ptr)%16==0,
ExcMessage(
"Memory not aligned"));
2602 _mm_stream_ps(ptr,
data);
2617 DEAL_II_ALWAYS_INLINE
2619 const unsigned int *offsets)
2621 for (
unsigned int i=0; i<4; ++i)
2622 *(reinterpret_cast<float *>(&
data)+i) = base_ptr[offsets[i]];
2637 DEAL_II_ALWAYS_INLINE
2639 float *base_ptr)
const 2641 for (
unsigned int i=0; i<4; ++i)
2642 base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&
data)+i);
2656 DEAL_II_ALWAYS_INLINE
2669 DEAL_II_ALWAYS_INLINE
2676 __m128 mask = _mm_set1_ps (-0.f);
2678 res.
data = _mm_andnot_ps(mask,
data);
2686 DEAL_II_ALWAYS_INLINE
2699 DEAL_II_ALWAYS_INLINE
2730 const unsigned int *offsets,
2733 const unsigned int n_chunks = n_entries/4;
2734 for (
unsigned int i=0; i<n_chunks; ++i)
2736 __m128 u0 = _mm_loadu_ps(in+4*i+offsets[0]);
2737 __m128 u1 = _mm_loadu_ps(in+4*i+offsets[1]);
2738 __m128 u2 = _mm_loadu_ps(in+4*i+offsets[2]);
2739 __m128 u3 = _mm_loadu_ps(in+4*i+offsets[3]);
2740 __m128 v0 = _mm_shuffle_ps (u0, u1, 0x44);
2741 __m128 v1 = _mm_shuffle_ps (u0, u1, 0xee);
2742 __m128 v2 = _mm_shuffle_ps (u2, u3, 0x44);
2743 __m128 v3 = _mm_shuffle_ps (u2, u3, 0xee);
2744 out[4*i+0].
data = _mm_shuffle_ps (v0, v2, 0x88);
2745 out[4*i+1].
data = _mm_shuffle_ps (v0, v2, 0xdd);
2746 out[4*i+2].
data = _mm_shuffle_ps (v1, v3, 0x88);
2747 out[4*i+3].
data = _mm_shuffle_ps (v1, v3, 0xdd);
2749 for (
unsigned int i=4*n_chunks; i<n_entries; ++i)
2750 for (
unsigned int v=0; v<4; ++v)
2751 out[i][v] = in[offsets[v]+i];
2763 const unsigned int n_entries,
2765 const unsigned int *offsets,
2768 const unsigned int n_chunks = n_entries/4;
2769 for (
unsigned int i=0; i<n_chunks; ++i)
2771 __m128 u0 = in[4*i+0].
data;
2772 __m128 u1 = in[4*i+1].
data;
2773 __m128 u2 = in[4*i+2].
data;
2774 __m128 u3 = in[4*i+3].
data;
2775 __m128 t0 = _mm_shuffle_ps (u0, u1, 0x44);
2776 __m128 t1 = _mm_shuffle_ps (u0, u1, 0xee);
2777 __m128 t2 = _mm_shuffle_ps (u2, u3, 0x44);
2778 __m128 t3 = _mm_shuffle_ps (u2, u3, 0xee);
2779 u0 = _mm_shuffle_ps (t0, t2, 0x88);
2780 u1 = _mm_shuffle_ps (t0, t2, 0xdd);
2781 u2 = _mm_shuffle_ps (t1, t3, 0x88);
2782 u3 = _mm_shuffle_ps (t1, t3, 0xdd);
2789 u0 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[0]), u0);
2790 _mm_storeu_ps(out+4*i+offsets[0], u0);
2791 u1 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[1]), u1);
2792 _mm_storeu_ps(out+4*i+offsets[1], u1);
2793 u2 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[2]), u2);
2794 _mm_storeu_ps(out+4*i+offsets[2], u2);
2795 u3 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[3]), u3);
2796 _mm_storeu_ps(out+4*i+offsets[3], u3);
2800 _mm_storeu_ps(out+4*i+offsets[0], u0);
2801 _mm_storeu_ps(out+4*i+offsets[1], u1);
2802 _mm_storeu_ps(out+4*i+offsets[2], u2);
2803 _mm_storeu_ps(out+4*i+offsets[3], u3);
2807 for (
unsigned int i=4*n_chunks; i<n_entries; ++i)
2808 for (
unsigned int v=0; v<4; ++v)
2809 out[offsets[v]+i] += in[i][v];
2811 for (
unsigned int i=4*n_chunks; i<n_entries; ++i)
2812 for (
unsigned int v=0; v<4; ++v)
2813 out[offsets[v]+i] = in[i][v];
2818 #endif // if DEAL_II_COMPILER_VECTORIZATION_LEVEL > 0 2826 template <
typename Number>
2827 inline DEAL_II_ALWAYS_INLINE
2832 for (
unsigned int i=0; i<VectorizedArray<Number>::n_array_elements; ++i)
2833 if (lhs[i] != rhs[i])
2845 template <
typename Number>
2846 inline DEAL_II_ALWAYS_INLINE
2860 template <
typename Number>
2861 inline DEAL_II_ALWAYS_INLINE
2875 template <
typename Number>
2876 inline DEAL_II_ALWAYS_INLINE
2890 template <
typename Number>
2891 inline DEAL_II_ALWAYS_INLINE
2906 template <
typename Number>
2907 inline DEAL_II_ALWAYS_INLINE
2925 inline DEAL_II_ALWAYS_INLINE
2941 template <
typename Number>
2942 inline DEAL_II_ALWAYS_INLINE
2958 inline DEAL_II_ALWAYS_INLINE
2972 template <
typename Number>
2973 inline DEAL_II_ALWAYS_INLINE
2991 inline DEAL_II_ALWAYS_INLINE
3007 template <
typename Number>
3008 inline DEAL_II_ALWAYS_INLINE
3026 inline DEAL_II_ALWAYS_INLINE
3042 template <
typename Number>
3043 inline DEAL_II_ALWAYS_INLINE
3061 inline DEAL_II_ALWAYS_INLINE
3077 template <
typename Number>
3078 inline DEAL_II_ALWAYS_INLINE
3094 inline DEAL_II_ALWAYS_INLINE
3108 template <
typename Number>
3109 inline DEAL_II_ALWAYS_INLINE
3127 inline DEAL_II_ALWAYS_INLINE
3143 template <
typename Number>
3144 inline DEAL_II_ALWAYS_INLINE
3162 inline DEAL_II_ALWAYS_INLINE
3177 template <
typename Number>
3178 inline DEAL_II_ALWAYS_INLINE
3190 template <
typename Number>
3191 inline DEAL_II_ALWAYS_INLINE
3201 DEAL_II_NAMESPACE_CLOSE
3219 template <
typename Number>
3221 ::VectorizedArray<Number>
3222 sin (const ::VectorizedArray<Number> &x)
3230 for (
unsigned int i=0; i<::VectorizedArray<Number>::n_array_elements; ++i)
3231 values[i] = std::sin(x[i]);
3233 out.
load(&values[0]);
3246 template <
typename Number>
3248 ::VectorizedArray<Number>
3249 cos (const ::VectorizedArray<Number> &x)
3252 for (
unsigned int i=0; i<::VectorizedArray<Number>::n_array_elements; ++i)
3253 values[i] = std::cos(x[i]);
3255 out.
load(&values[0]);
3268 template <
typename Number>
3270 ::VectorizedArray<Number>
3271 tan (const ::VectorizedArray<Number> &x)
3274 for (
unsigned int i=0; i<::VectorizedArray<Number>::n_array_elements; ++i)
3275 values[i] = std::tan(x[i]);
3277 out.
load(&values[0]);
3290 template <
typename Number>
3292 ::VectorizedArray<Number>
3293 exp (const ::VectorizedArray<Number> &x)
3296 for (
unsigned int i=0; i<::VectorizedArray<Number>::n_array_elements; ++i)
3297 values[i] = std::exp(x[i]);
3299 out.
load(&values[0]);
3312 template <
typename Number>
3314 ::VectorizedArray<Number>
3315 log (const ::VectorizedArray<Number> &x)
3318 for (
unsigned int i=0; i<::VectorizedArray<Number>::n_array_elements; ++i)
3319 values[i] = std::log(x[i]);
3321 out.
load(&values[0]);
3334 template <
typename Number>
3336 ::VectorizedArray<Number>
3337 sqrt (const ::VectorizedArray<Number> &x)
3339 return x.get_sqrt();
3351 template <
typename Number>
3353 ::VectorizedArray<Number>
3354 pow (const ::VectorizedArray<Number> &x,
3358 for (
unsigned int i=0; i<::VectorizedArray<Number>::n_array_elements; ++i)
3359 values[i] = std::pow(x[i], p);
3361 out.
load(&values[0]);
3374 template <
typename Number>
3376 ::VectorizedArray<Number>
3377 abs (const ::VectorizedArray<Number> &x)
3391 template <
typename Number>
3393 ::VectorizedArray<Number>
3394 max (const ::VectorizedArray<Number> &x,
3395 const ::VectorizedArray<Number> &y)
3397 return x.get_max(y);
3409 template <
typename Number>
3411 ::VectorizedArray<Number>
3412 min (const ::VectorizedArray<Number> &x,
3413 const ::VectorizedArray<Number> &y)
3415 return x.get_min(y);
SymmetricTensor< rank_, dim, typename ProductType< Number, typename EnableIfScalar< OtherNumber >::type >::type > operator/(const SymmetricTensor< rank_, dim, Number > &t, const OtherNumber &factor)
void gather(const double *base_ptr, const unsigned int *offsets)
void store(Number *ptr) const
VectorizedArray get_sqrt() const
VectorizedArray get_abs() const
VectorizedArray< Number > log(const ::VectorizedArray< Number > &x)
VectorizedArray get_max(const VectorizedArray &other) const
VectorizedArray< Number > make_vectorized_array(const Number &u)
VectorizedArray< Number > tan(const ::VectorizedArray< Number > &x)
#define AssertIndexRange(index, range)
VectorizedArray get_abs() const
VectorizedArray get_max(const VectorizedArray &other) const
VectorizedArray< Number > exp(const ::VectorizedArray< Number > &x)
SymmetricTensor< rank_, dim, typename ProductType< Number, OtherNumber >::type > operator+(const SymmetricTensor< rank_, dim, Number > &left, const SymmetricTensor< rank_, dim, OtherNumber > &right)
void scatter(const unsigned int *offsets, Number *base_ptr) const
void gather(const float *base_ptr, const unsigned int *offsets)
VectorizedArray get_abs() const
VectorizedArray get_max(const VectorizedArray &other) const
SymmetricTensor< rank_, dim, typename ProductType< Number, OtherNumber >::type > operator-(const SymmetricTensor< rank_, dim, Number > &left, const SymmetricTensor< rank_, dim, OtherNumber > &right)
VectorizedArray & operator+=(const VectorizedArray< Number > &vec)
bool operator==(const AlignedVector< T > &lhs, const AlignedVector< T > &rhs)
void vectorized_transpose_and_store(const bool add_into, const unsigned int n_entries, const VectorizedArray< Number > *in, const unsigned int *offsets, Number *out)
static ::ExceptionBase & ExcMessage(std::string arg1)
Number & operator[](const unsigned int comp)
static const unsigned int n_array_elements
VectorizedArray< Number > min(const ::VectorizedArray< Number > &x, const ::VectorizedArray< Number > &y)
void load(const double *ptr)
#define Assert(cond, exc)
void streaming_store(float *ptr) const
VectorizedArray get_sqrt() const
void streaming_store(Number *ptr) const
void scatter(const unsigned int *offsets, float *base_ptr) const
VectorizedArray< Number > pow(const ::VectorizedArray< Number > &x, const Number p)
VectorizedArray get_min(const VectorizedArray &other) const
void vectorized_load_and_transpose(const unsigned int n_entries, const Number *in, const unsigned int *offsets, VectorizedArray< Number > *out)
VectorizedArray & operator-=(const VectorizedArray< Number > &vec)
void load(const float *ptr)
VectorizedArray< Number > sqrt(const ::VectorizedArray< Number > &x)
VectorizedArray< Number > sin(const ::VectorizedArray< Number > &x)
VectorizedArray get_min(const VectorizedArray &other) const
void scatter(const unsigned int *offsets, double *base_ptr) const
VectorizedArray & operator/=(const VectorizedArray< Number > &vec)
void load(const Number *ptr)
VectorizedArray get_min(const VectorizedArray &other) const
void gather(const Number *base_ptr, const unsigned int *offsets)
VectorizedArray & operator*=(const VectorizedArray< Number > &vec)
void store(double *ptr) const
void streaming_store(double *ptr) const
VectorizedArray & operator=(const Number scalar)
VectorizedArray get_sqrt() const
Point< dim, typename ProductType< Number, typename EnableIfScalar< OtherNumber >::type >::type > operator*(const OtherNumber) const
VectorizedArray< Number > abs(const ::VectorizedArray< Number > &x)
void store(float *ptr) const
VectorizedArray< Number > max(const ::VectorizedArray< Number > &x, const ::VectorizedArray< Number > &y)
VectorizedArray< Number > cos(const ::VectorizedArray< Number > &x)