17 #ifndef dealii_vectorization_h 18 #define dealii_vectorization_h 20 #include <deal.II/base/config.h> 22 #include <deal.II/base/exceptions.h> 23 #include <deal.II/base/template_constraints.h> 43 #if DEAL_II_COMPILER_VECTORIZATION_LEVEL > 0 45 # if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 2 && defined(__SSE2__) && \ 48 "Mismatch in vectorization capabilities: AVX was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native." 50 # if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 3 && defined(__SSE2__) && \ 53 "Mismatch in vectorization capabilities: AVX-512F was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native." 56 # if defined(_MSC_VER) 58 # elif defined(__ALTIVEC__) 67 # include <x86intrin.h> 73 DEAL_II_NAMESPACE_OPEN
114 template <
typename Number>
172 template <
typename Number>
189 DEAL_II_ALWAYS_INLINE
200 DEAL_II_ALWAYS_INLINE
211 DEAL_II_ALWAYS_INLINE
222 DEAL_II_ALWAYS_INLINE
233 DEAL_II_ALWAYS_INLINE
244 DEAL_II_ALWAYS_INLINE
255 DEAL_II_ALWAYS_INLINE
269 DEAL_II_ALWAYS_INLINE
282 DEAL_II_ALWAYS_INLINE
333 DEAL_II_ALWAYS_INLINE
352 DEAL_II_ALWAYS_INLINE
354 gather(
const Number *base_ptr,
const unsigned int *offsets)
356 data = base_ptr[offsets[0]];
371 DEAL_II_ALWAYS_INLINE
373 scatter(
const unsigned int *offsets, Number *base_ptr)
const 375 base_ptr[offsets[0]] =
data;
389 DEAL_II_ALWAYS_INLINE
402 DEAL_II_ALWAYS_INLINE
415 DEAL_II_ALWAYS_INLINE
428 DEAL_II_ALWAYS_INLINE
440 template <
typename Number2>
443 template <
typename Number2>
446 template <
typename Number2>
449 template <
typename Number2>
455 template <
typename Number>
466 template <
typename Number>
502 template <
typename Number>
506 const unsigned int * offsets,
509 for (
unsigned int i = 0; i < n_entries; ++i)
510 for (
unsigned int v = 0; v < VectorizedArray<Number>::n_array_elements; ++v)
511 out[i][v] = in[offsets[v] + i];
554 template <
typename Number>
557 const unsigned int n_entries,
559 const unsigned int * offsets,
563 for (
unsigned int i = 0; i < n_entries; ++i)
564 for (
unsigned int v = 0; v < VectorizedArray<Number>::n_array_elements;
566 out[offsets[v] + i] += in[i][v];
568 for (
unsigned int i = 0; i < n_entries; ++i)
569 for (
unsigned int v = 0; v < VectorizedArray<Number>::n_array_elements;
571 out[offsets[v] + i] = in[i][v];
579 #if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 3 && defined(__AVX512F__) 596 DEAL_II_ALWAYS_INLINE
600 data = _mm512_set1_pd(x);
607 DEAL_II_ALWAYS_INLINE
611 return *(
reinterpret_cast<double *
>(&
data) + comp);
617 DEAL_II_ALWAYS_INLINE
618 const double &
operator[](
const unsigned int comp)
const 621 return *(
reinterpret_cast<const double *
>(&
data) + comp);
627 DEAL_II_ALWAYS_INLINE
636 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS 647 DEAL_II_ALWAYS_INLINE
651 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS 661 DEAL_II_ALWAYS_INLINE
665 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS 676 DEAL_II_ALWAYS_INLINE
680 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS 693 DEAL_II_ALWAYS_INLINE
695 load(
const double *ptr)
697 data = _mm512_loadu_pd(ptr);
706 DEAL_II_ALWAYS_INLINE
708 store(
double *ptr)
const 710 _mm512_storeu_pd(ptr,
data);
716 DEAL_II_ALWAYS_INLINE
720 Assert(reinterpret_cast<std::size_t>(ptr) % 64 == 0,
722 _mm512_stream_pd(ptr,
data);
737 DEAL_II_ALWAYS_INLINE
739 gather(
const double *base_ptr,
const unsigned int *offsets)
744 const __m256 index_val =
745 _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
746 const __m256i index = *
reinterpret_cast<const __m256i *
>(&index_val);
747 data = _mm512_i32gather_pd(index, base_ptr, 8);
762 DEAL_II_ALWAYS_INLINE
764 scatter(
const unsigned int *offsets,
double *base_ptr)
const 766 for (
unsigned int i = 0; i < 8; ++i)
767 for (
unsigned int j = i + 1; j < 8; ++j)
768 Assert(offsets[i] != offsets[j],
769 ExcMessage(
"Result of scatter undefined if two offset elements" 770 " point to the same position"));
775 const __m256 index_val =
776 _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
777 const __m256i index = *
reinterpret_cast<const __m256i *
>(&index_val);
778 _mm512_i32scatter_pd(base_ptr, index,
data, 8);
792 DEAL_II_ALWAYS_INLINE
805 DEAL_II_ALWAYS_INLINE
814 __m512d mask = _mm512_set1_pd(-0.);
816 res.
data =
reinterpret_cast<__m512d
>(
817 _mm512_andnot_epi64(reinterpret_cast<__m512i>(mask),
818 reinterpret_cast<__m512i>(
data)));
826 DEAL_II_ALWAYS_INLINE
839 DEAL_II_ALWAYS_INLINE
851 template <
typename Number2>
854 template <
typename Number2>
857 template <
typename Number2>
860 template <
typename Number2>
874 const unsigned int * offsets,
877 const unsigned int n_chunks = n_entries / 4;
878 for (
unsigned int outer = 0; outer < 8; outer += 4)
880 const double *in0 = in + offsets[0 + outer];
881 const double *in1 = in + offsets[1 + outer];
882 const double *in2 = in + offsets[2 + outer];
883 const double *in3 = in + offsets[3 + outer];
885 for (
unsigned int i = 0; i < n_chunks; ++i)
887 __m256d u0 = _mm256_loadu_pd(in0 + 4 * i);
888 __m256d u1 = _mm256_loadu_pd(in1 + 4 * i);
889 __m256d u2 = _mm256_loadu_pd(in2 + 4 * i);
890 __m256d u3 = _mm256_loadu_pd(in3 + 4 * i);
891 __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
892 __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
893 __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
894 __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
895 *
reinterpret_cast<__m256d *
>(
896 reinterpret_cast<double *
>(&out[4 * i + 0].
data) + outer) =
897 _mm256_unpacklo_pd(t0, t1);
898 *
reinterpret_cast<__m256d *
>(
899 reinterpret_cast<double *
>(&out[4 * i + 1].
data) + outer) =
900 _mm256_unpackhi_pd(t0, t1);
901 *
reinterpret_cast<__m256d *
>(
902 reinterpret_cast<double *
>(&out[4 * i + 2].
data) + outer) =
903 _mm256_unpacklo_pd(t2, t3);
904 *
reinterpret_cast<__m256d *
>(
905 reinterpret_cast<double *
>(&out[4 * i + 3].
data) + outer) =
906 _mm256_unpackhi_pd(t2, t3);
908 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
909 for (
unsigned int v = 0; v < 4; ++v)
910 out[i][outer + v] = in[offsets[v + outer] + i];
922 const unsigned int n_entries,
924 const unsigned int * offsets,
927 const unsigned int n_chunks = n_entries / 4;
931 for (
unsigned int outer = 0; outer < 8; outer += 4)
933 double *out0 = out + offsets[0 + outer];
934 double *out1 = out + offsets[1 + outer];
935 double *out2 = out + offsets[2 + outer];
936 double *out3 = out + offsets[3 + outer];
937 for (
unsigned int i = 0; i < n_chunks; ++i)
939 __m256d u0 = *
reinterpret_cast<const __m256d *
>(
940 reinterpret_cast<const double *
>(&in[4 * i + 0].
data) + outer);
941 __m256d u1 = *
reinterpret_cast<const __m256d *
>(
942 reinterpret_cast<const double *
>(&in[4 * i + 1].
data) + outer);
943 __m256d u2 = *
reinterpret_cast<const __m256d *
>(
944 reinterpret_cast<const double *
>(&in[4 * i + 2].
data) + outer);
945 __m256d u3 = *
reinterpret_cast<const __m256d *
>(
946 reinterpret_cast<const double *
>(&in[4 * i + 3].
data) + outer);
947 __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
948 __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
949 __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
950 __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
951 __m256d res0 = _mm256_unpacklo_pd(t0, t1);
952 __m256d res1 = _mm256_unpackhi_pd(t0, t1);
953 __m256d res2 = _mm256_unpacklo_pd(t2, t3);
954 __m256d res3 = _mm256_unpackhi_pd(t2, t3);
961 res0 = _mm256_add_pd(_mm256_loadu_pd(out0 + 4 * i), res0);
962 _mm256_storeu_pd(out0 + 4 * i, res0);
963 res1 = _mm256_add_pd(_mm256_loadu_pd(out1 + 4 * i), res1);
964 _mm256_storeu_pd(out1 + 4 * i, res1);
965 res2 = _mm256_add_pd(_mm256_loadu_pd(out2 + 4 * i), res2);
966 _mm256_storeu_pd(out2 + 4 * i, res2);
967 res3 = _mm256_add_pd(_mm256_loadu_pd(out3 + 4 * i), res3);
968 _mm256_storeu_pd(out3 + 4 * i, res3);
972 _mm256_storeu_pd(out0 + 4 * i, res0);
973 _mm256_storeu_pd(out1 + 4 * i, res1);
974 _mm256_storeu_pd(out2 + 4 * i, res2);
975 _mm256_storeu_pd(out3 + 4 * i, res3);
979 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
980 for (
unsigned int v = 0; v < 4; ++v)
981 out[offsets[v + outer] + i] += in[i][v + outer];
983 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
984 for (
unsigned int v = 0; v < 4; ++v)
985 out[offsets[v + outer] + i] = in[i][v + outer];
1006 DEAL_II_ALWAYS_INLINE
1010 data = _mm512_set1_ps(x);
1017 DEAL_II_ALWAYS_INLINE
1021 return *(
reinterpret_cast<float *
>(&
data) + comp);
1027 DEAL_II_ALWAYS_INLINE
1028 const float &
operator[](
const unsigned int comp)
const 1031 return *(
reinterpret_cast<const float *
>(&
data) + comp);
1037 DEAL_II_ALWAYS_INLINE
1046 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS 1057 DEAL_II_ALWAYS_INLINE
1061 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS 1071 DEAL_II_ALWAYS_INLINE
1075 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS 1086 DEAL_II_ALWAYS_INLINE
1090 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS 1103 DEAL_II_ALWAYS_INLINE
1105 load(
const float *ptr)
1107 data = _mm512_loadu_ps(ptr);
1116 DEAL_II_ALWAYS_INLINE
1118 store(
float *ptr)
const 1120 _mm512_storeu_ps(ptr,
data);
1126 DEAL_II_ALWAYS_INLINE
1130 Assert(reinterpret_cast<std::size_t>(ptr) % 64 == 0,
1132 _mm512_stream_ps(ptr,
data);
1147 DEAL_II_ALWAYS_INLINE
1149 gather(
const float *base_ptr,
const unsigned int *offsets)
1154 const __m512 index_val =
1155 _mm512_loadu_ps(reinterpret_cast<const float *>(offsets));
1156 const __m512i index = *
reinterpret_cast<const __m512i *
>(&index_val);
1157 data = _mm512_i32gather_ps(index, base_ptr, 4);
1172 DEAL_II_ALWAYS_INLINE
1174 scatter(
const unsigned int *offsets,
float *base_ptr)
const 1176 for (
unsigned int i = 0; i < 16; ++i)
1177 for (
unsigned int j = i + 1; j < 16; ++j)
1178 Assert(offsets[i] != offsets[j],
1179 ExcMessage(
"Result of scatter undefined if two offset elements" 1180 " point to the same position"));
1185 const __m512 index_val =
1186 _mm512_loadu_ps(reinterpret_cast<const float *>(offsets));
1187 const __m512i index = *
reinterpret_cast<const __m512i *
>(&index_val);
1188 _mm512_i32scatter_ps(base_ptr, index,
data, 4);
1202 DEAL_II_ALWAYS_INLINE
1215 DEAL_II_ALWAYS_INLINE
1224 __m512 mask = _mm512_set1_ps(-0.f);
1226 res.
data =
reinterpret_cast<__m512
>(
1227 _mm512_andnot_epi32(reinterpret_cast<__m512i>(mask),
1228 reinterpret_cast<__m512i>(
data)));
1236 DEAL_II_ALWAYS_INLINE
1249 DEAL_II_ALWAYS_INLINE
1261 template <
typename Number2>
1264 template <
typename Number2>
1267 template <
typename Number2>
1270 template <
typename Number2>
1284 const unsigned int * offsets,
1287 const unsigned int n_chunks = n_entries / 4;
1288 for (
unsigned int outer = 0; outer < 16; outer += 8)
1290 for (
unsigned int i = 0; i < n_chunks; ++i)
1292 __m128 u0 = _mm_loadu_ps(in + 4 * i + offsets[0 + outer]);
1293 __m128 u1 = _mm_loadu_ps(in + 4 * i + offsets[1 + outer]);
1294 __m128 u2 = _mm_loadu_ps(in + 4 * i + offsets[2 + outer]);
1295 __m128 u3 = _mm_loadu_ps(in + 4 * i + offsets[3 + outer]);
1296 __m128 u4 = _mm_loadu_ps(in + 4 * i + offsets[4 + outer]);
1297 __m128 u5 = _mm_loadu_ps(in + 4 * i + offsets[5 + outer]);
1298 __m128 u6 = _mm_loadu_ps(in + 4 * i + offsets[6 + outer]);
1299 __m128 u7 = _mm_loadu_ps(in + 4 * i + offsets[7 + outer]);
1302 __m256 t0, t1, t2, t3 = _mm256_set1_ps(0.F);
1303 t0 = _mm256_insertf128_ps(t3, u0, 0);
1304 t0 = _mm256_insertf128_ps(t0, u4, 1);
1305 t1 = _mm256_insertf128_ps(t3, u1, 0);
1306 t1 = _mm256_insertf128_ps(t1, u5, 1);
1307 t2 = _mm256_insertf128_ps(t3, u2, 0);
1308 t2 = _mm256_insertf128_ps(t2, u6, 1);
1309 t3 = _mm256_insertf128_ps(t3, u3, 0);
1310 t3 = _mm256_insertf128_ps(t3, u7, 1);
1311 __m256 v0 = _mm256_shuffle_ps(t0, t1, 0x44);
1312 __m256 v1 = _mm256_shuffle_ps(t0, t1, 0xee);
1313 __m256 v2 = _mm256_shuffle_ps(t2, t3, 0x44);
1314 __m256 v3 = _mm256_shuffle_ps(t2, t3, 0xee);
1315 *
reinterpret_cast<__m256 *
>(
1316 reinterpret_cast<float *
>(&out[4 * i + 0].
data) + outer) =
1317 _mm256_shuffle_ps(v0, v2, 0x88);
1318 *
reinterpret_cast<__m256 *
>(
1319 reinterpret_cast<float *
>(&out[4 * i + 1].
data) + outer) =
1320 _mm256_shuffle_ps(v0, v2, 0xdd);
1321 *
reinterpret_cast<__m256 *
>(
1322 reinterpret_cast<float *
>(&out[4 * i + 2].
data) + outer) =
1323 _mm256_shuffle_ps(v1, v3, 0x88);
1324 *
reinterpret_cast<__m256 *
>(
1325 reinterpret_cast<float *
>(&out[4 * i + 3].
data) + outer) =
1326 _mm256_shuffle_ps(v1, v3, 0xdd);
1328 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1329 for (
unsigned int v = 0; v < 8; ++v)
1330 out[i][v + outer] = in[offsets[v + outer] + i];
1342 const unsigned int n_entries,
1344 const unsigned int * offsets,
1347 const unsigned int n_chunks = n_entries / 4;
1348 for (
unsigned int outer = 0; outer < 16; outer += 8)
1350 for (
unsigned int i = 0; i < n_chunks; ++i)
1352 __m256 u0 = *
reinterpret_cast<const __m256 *
>(
1353 reinterpret_cast<const float *
>(&in[4 * i + 0].
data) + outer);
1354 __m256 u1 = *
reinterpret_cast<const __m256 *
>(
1355 reinterpret_cast<const float *
>(&in[4 * i + 1].
data) + outer);
1356 __m256 u2 = *
reinterpret_cast<const __m256 *
>(
1357 reinterpret_cast<const float *
>(&in[4 * i + 2].
data) + outer);
1358 __m256 u3 = *
reinterpret_cast<const __m256 *
>(
1359 reinterpret_cast<const float *
>(&in[4 * i + 3].
data) + outer);
1360 __m256 t0 = _mm256_shuffle_ps(u0, u1, 0x44);
1361 __m256 t1 = _mm256_shuffle_ps(u0, u1, 0xee);
1362 __m256 t2 = _mm256_shuffle_ps(u2, u3, 0x44);
1363 __m256 t3 = _mm256_shuffle_ps(u2, u3, 0xee);
1364 u0 = _mm256_shuffle_ps(t0, t2, 0x88);
1365 u1 = _mm256_shuffle_ps(t0, t2, 0xdd);
1366 u2 = _mm256_shuffle_ps(t1, t3, 0x88);
1367 u3 = _mm256_shuffle_ps(t1, t3, 0xdd);
1368 __m128 res0 = _mm256_extractf128_ps(u0, 0);
1369 __m128 res4 = _mm256_extractf128_ps(u0, 1);
1370 __m128 res1 = _mm256_extractf128_ps(u1, 0);
1371 __m128 res5 = _mm256_extractf128_ps(u1, 1);
1372 __m128 res2 = _mm256_extractf128_ps(u2, 0);
1373 __m128 res6 = _mm256_extractf128_ps(u2, 1);
1374 __m128 res3 = _mm256_extractf128_ps(u3, 0);
1375 __m128 res7 = _mm256_extractf128_ps(u3, 1);
1382 res0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0 + outer]),
1384 _mm_storeu_ps(out + 4 * i + offsets[0 + outer], res0);
1385 res1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1 + outer]),
1387 _mm_storeu_ps(out + 4 * i + offsets[1 + outer], res1);
1388 res2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2 + outer]),
1390 _mm_storeu_ps(out + 4 * i + offsets[2 + outer], res2);
1391 res3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3 + outer]),
1393 _mm_storeu_ps(out + 4 * i + offsets[3 + outer], res3);
1394 res4 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[4 + outer]),
1396 _mm_storeu_ps(out + 4 * i + offsets[4 + outer], res4);
1397 res5 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[5 + outer]),
1399 _mm_storeu_ps(out + 4 * i + offsets[5 + outer], res5);
1400 res6 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[6 + outer]),
1402 _mm_storeu_ps(out + 4 * i + offsets[6 + outer], res6);
1403 res7 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[7 + outer]),
1405 _mm_storeu_ps(out + 4 * i + offsets[7 + outer], res7);
1409 _mm_storeu_ps(out + 4 * i + offsets[0 + outer], res0);
1410 _mm_storeu_ps(out + 4 * i + offsets[1 + outer], res1);
1411 _mm_storeu_ps(out + 4 * i + offsets[2 + outer], res2);
1412 _mm_storeu_ps(out + 4 * i + offsets[3 + outer], res3);
1413 _mm_storeu_ps(out + 4 * i + offsets[4 + outer], res4);
1414 _mm_storeu_ps(out + 4 * i + offsets[5 + outer], res5);
1415 _mm_storeu_ps(out + 4 * i + offsets[6 + outer], res6);
1416 _mm_storeu_ps(out + 4 * i + offsets[7 + outer], res7);
1420 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1421 for (
unsigned int v = 0; v < 8; ++v)
1422 out[offsets[v + outer] + i] += in[i][v + outer];
1424 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1425 for (
unsigned int v = 0; v < 8; ++v)
1426 out[offsets[v + outer] + i] = in[i][v + outer];
1432 #elif DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 2 && defined(__AVX__) 1449 DEAL_II_ALWAYS_INLINE
1453 data = _mm256_set1_pd(x);
1460 DEAL_II_ALWAYS_INLINE
1464 return *(
reinterpret_cast<double *
>(&
data) + comp);
1470 DEAL_II_ALWAYS_INLINE
1471 const double &
operator[](
const unsigned int comp)
const 1474 return *(
reinterpret_cast<const double *
>(&
data) + comp);
1480 DEAL_II_ALWAYS_INLINE
1489 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS 1500 DEAL_II_ALWAYS_INLINE
1504 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS 1514 DEAL_II_ALWAYS_INLINE
1518 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS 1529 DEAL_II_ALWAYS_INLINE
1533 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS 1546 DEAL_II_ALWAYS_INLINE
1548 load(
const double *ptr)
1550 data = _mm256_loadu_pd(ptr);
1559 DEAL_II_ALWAYS_INLINE
1561 store(
double *ptr)
const 1563 _mm256_storeu_pd(ptr,
data);
1569 DEAL_II_ALWAYS_INLINE
1573 Assert(reinterpret_cast<std::size_t>(ptr) % 32 == 0,
1575 _mm256_stream_pd(ptr,
data);
1590 DEAL_II_ALWAYS_INLINE
1592 gather(
const double *base_ptr,
const unsigned int *offsets)
1598 const __m128 index_val =
1599 _mm_loadu_ps(reinterpret_cast<const float *>(offsets));
1600 const __m128i index = *
reinterpret_cast<const __m128i *
>(&index_val);
1601 data = _mm256_i32gather_pd(base_ptr, index, 8);
1603 for (
unsigned int i = 0; i < 4; ++i)
1604 *(reinterpret_cast<double *>(&
data) + i) = base_ptr[offsets[i]];
1620 DEAL_II_ALWAYS_INLINE
1622 scatter(
const unsigned int *offsets,
double *base_ptr)
const 1625 for (
unsigned int i = 0; i < 4; ++i)
1626 base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&
data) + i);
1640 DEAL_II_ALWAYS_INLINE
1653 DEAL_II_ALWAYS_INLINE
1660 __m256d mask = _mm256_set1_pd(-0.);
1662 res.
data = _mm256_andnot_pd(mask,
data);
1670 DEAL_II_ALWAYS_INLINE
1683 DEAL_II_ALWAYS_INLINE
1695 template <
typename Number2>
1698 template <
typename Number2>
1701 template <
typename Number2>
1704 template <
typename Number2>
1718 const unsigned int * offsets,
1721 const unsigned int n_chunks = n_entries / 4;
1722 const double * in0 = in + offsets[0];
1723 const double * in1 = in + offsets[1];
1724 const double * in2 = in + offsets[2];
1725 const double * in3 = in + offsets[3];
1727 for (
unsigned int i = 0; i < n_chunks; ++i)
1729 __m256d u0 = _mm256_loadu_pd(in0 + 4 * i);
1730 __m256d u1 = _mm256_loadu_pd(in1 + 4 * i);
1731 __m256d u2 = _mm256_loadu_pd(in2 + 4 * i);
1732 __m256d u3 = _mm256_loadu_pd(in3 + 4 * i);
1733 __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
1734 __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
1735 __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
1736 __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
1737 out[4 * i + 0].
data = _mm256_unpacklo_pd(t0, t1);
1738 out[4 * i + 1].
data = _mm256_unpackhi_pd(t0, t1);
1739 out[4 * i + 2].
data = _mm256_unpacklo_pd(t2, t3);
1740 out[4 * i + 3].
data = _mm256_unpackhi_pd(t2, t3);
1742 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1743 for (
unsigned int v = 0; v < 4; ++v)
1744 out[i][v] = in[offsets[v] + i];
1755 const unsigned int n_entries,
1757 const unsigned int * offsets,
1760 const unsigned int n_chunks = n_entries / 4;
1761 double * out0 = out + offsets[0];
1762 double * out1 = out + offsets[1];
1763 double * out2 = out + offsets[2];
1764 double * out3 = out + offsets[3];
1765 for (
unsigned int i = 0; i < n_chunks; ++i)
1767 __m256d u0 = in[4 * i + 0].
data;
1768 __m256d u1 = in[4 * i + 1].
data;
1769 __m256d u2 = in[4 * i + 2].
data;
1770 __m256d u3 = in[4 * i + 3].
data;
1771 __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
1772 __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
1773 __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
1774 __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
1775 __m256d res0 = _mm256_unpacklo_pd(t0, t1);
1776 __m256d res1 = _mm256_unpackhi_pd(t0, t1);
1777 __m256d res2 = _mm256_unpacklo_pd(t2, t3);
1778 __m256d res3 = _mm256_unpackhi_pd(t2, t3);
1785 res0 = _mm256_add_pd(_mm256_loadu_pd(out0 + 4 * i), res0);
1786 _mm256_storeu_pd(out0 + 4 * i, res0);
1787 res1 = _mm256_add_pd(_mm256_loadu_pd(out1 + 4 * i), res1);
1788 _mm256_storeu_pd(out1 + 4 * i, res1);
1789 res2 = _mm256_add_pd(_mm256_loadu_pd(out2 + 4 * i), res2);
1790 _mm256_storeu_pd(out2 + 4 * i, res2);
1791 res3 = _mm256_add_pd(_mm256_loadu_pd(out3 + 4 * i), res3);
1792 _mm256_storeu_pd(out3 + 4 * i, res3);
1796 _mm256_storeu_pd(out0 + 4 * i, res0);
1797 _mm256_storeu_pd(out1 + 4 * i, res1);
1798 _mm256_storeu_pd(out2 + 4 * i, res2);
1799 _mm256_storeu_pd(out3 + 4 * i, res3);
1803 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1804 for (
unsigned int v = 0; v < 4; ++v)
1805 out[offsets[v] + i] += in[i][v];
1807 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1808 for (
unsigned int v = 0; v < 4; ++v)
1809 out[offsets[v] + i] = in[i][v];
1829 DEAL_II_ALWAYS_INLINE
1833 data = _mm256_set1_ps(x);
1840 DEAL_II_ALWAYS_INLINE
1844 return *(
reinterpret_cast<float *
>(&
data) + comp);
1850 DEAL_II_ALWAYS_INLINE
1851 const float &
operator[](
const unsigned int comp)
const 1854 return *(
reinterpret_cast<const float *
>(&
data) + comp);
1860 DEAL_II_ALWAYS_INLINE
1869 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS 1880 DEAL_II_ALWAYS_INLINE
1884 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS 1894 DEAL_II_ALWAYS_INLINE
1898 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS 1909 DEAL_II_ALWAYS_INLINE
1913 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS 1926 DEAL_II_ALWAYS_INLINE
1928 load(
const float *ptr)
1930 data = _mm256_loadu_ps(ptr);
1939 DEAL_II_ALWAYS_INLINE
1941 store(
float *ptr)
const 1943 _mm256_storeu_ps(ptr,
data);
1949 DEAL_II_ALWAYS_INLINE
1953 Assert(reinterpret_cast<std::size_t>(ptr) % 32 == 0,
1955 _mm256_stream_ps(ptr,
data);
1970 DEAL_II_ALWAYS_INLINE
1972 gather(
const float *base_ptr,
const unsigned int *offsets)
1978 const __m256 index_val =
1979 _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
1980 const __m256i index = *
reinterpret_cast<const __m256i *
>(&index_val);
1981 data = _mm256_i32gather_ps(base_ptr, index, 4);
1983 for (
unsigned int i = 0; i < 8; ++i)
1984 *(reinterpret_cast<float *>(&
data) + i) = base_ptr[offsets[i]];
2000 DEAL_II_ALWAYS_INLINE
2002 scatter(
const unsigned int *offsets,
float *base_ptr)
const 2005 for (
unsigned int i = 0; i < 8; ++i)
2006 base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&
data) + i);
2020 DEAL_II_ALWAYS_INLINE
2033 DEAL_II_ALWAYS_INLINE
2040 __m256 mask = _mm256_set1_ps(-0.f);
2042 res.
data = _mm256_andnot_ps(mask,
data);
2050 DEAL_II_ALWAYS_INLINE
2063 DEAL_II_ALWAYS_INLINE
2075 template <
typename Number2>
2078 template <
typename Number2>
2081 template <
typename Number2>
2084 template <
typename Number2>
2098 const unsigned int * offsets,
2101 const unsigned int n_chunks = n_entries / 4;
2102 for (
unsigned int i = 0; i < n_chunks; ++i)
2104 __m128 u0 = _mm_loadu_ps(in + 4 * i + offsets[0]);
2105 __m128 u1 = _mm_loadu_ps(in + 4 * i + offsets[1]);
2106 __m128 u2 = _mm_loadu_ps(in + 4 * i + offsets[2]);
2107 __m128 u3 = _mm_loadu_ps(in + 4 * i + offsets[3]);
2108 __m128 u4 = _mm_loadu_ps(in + 4 * i + offsets[4]);
2109 __m128 u5 = _mm_loadu_ps(in + 4 * i + offsets[5]);
2110 __m128 u6 = _mm_loadu_ps(in + 4 * i + offsets[6]);
2111 __m128 u7 = _mm_loadu_ps(in + 4 * i + offsets[7]);
2114 __m256 t0, t1, t2, t3 = _mm256_set1_ps(0.F);
2115 t0 = _mm256_insertf128_ps(t3, u0, 0);
2116 t0 = _mm256_insertf128_ps(t0, u4, 1);
2117 t1 = _mm256_insertf128_ps(t3, u1, 0);
2118 t1 = _mm256_insertf128_ps(t1, u5, 1);
2119 t2 = _mm256_insertf128_ps(t3, u2, 0);
2120 t2 = _mm256_insertf128_ps(t2, u6, 1);
2121 t3 = _mm256_insertf128_ps(t3, u3, 0);
2122 t3 = _mm256_insertf128_ps(t3, u7, 1);
2123 __m256 v0 = _mm256_shuffle_ps(t0, t1, 0x44);
2124 __m256 v1 = _mm256_shuffle_ps(t0, t1, 0xee);
2125 __m256 v2 = _mm256_shuffle_ps(t2, t3, 0x44);
2126 __m256 v3 = _mm256_shuffle_ps(t2, t3, 0xee);
2127 out[4 * i + 0].
data = _mm256_shuffle_ps(v0, v2, 0x88);
2128 out[4 * i + 1].
data = _mm256_shuffle_ps(v0, v2, 0xdd);
2129 out[4 * i + 2].
data = _mm256_shuffle_ps(v1, v3, 0x88);
2130 out[4 * i + 3].
data = _mm256_shuffle_ps(v1, v3, 0xdd);
2132 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2133 for (
unsigned int v = 0; v < 8; ++v)
2134 out[i][v] = in[offsets[v] + i];
2145 const unsigned int n_entries,
2147 const unsigned int * offsets,
2150 const unsigned int n_chunks = n_entries / 4;
2151 for (
unsigned int i = 0; i < n_chunks; ++i)
2153 __m256 u0 = in[4 * i + 0].
data;
2154 __m256 u1 = in[4 * i + 1].
data;
2155 __m256 u2 = in[4 * i + 2].
data;
2156 __m256 u3 = in[4 * i + 3].
data;
2157 __m256 t0 = _mm256_shuffle_ps(u0, u1, 0x44);
2158 __m256 t1 = _mm256_shuffle_ps(u0, u1, 0xee);
2159 __m256 t2 = _mm256_shuffle_ps(u2, u3, 0x44);
2160 __m256 t3 = _mm256_shuffle_ps(u2, u3, 0xee);
2161 u0 = _mm256_shuffle_ps(t0, t2, 0x88);
2162 u1 = _mm256_shuffle_ps(t0, t2, 0xdd);
2163 u2 = _mm256_shuffle_ps(t1, t3, 0x88);
2164 u3 = _mm256_shuffle_ps(t1, t3, 0xdd);
2165 __m128 res0 = _mm256_extractf128_ps(u0, 0);
2166 __m128 res4 = _mm256_extractf128_ps(u0, 1);
2167 __m128 res1 = _mm256_extractf128_ps(u1, 0);
2168 __m128 res5 = _mm256_extractf128_ps(u1, 1);
2169 __m128 res2 = _mm256_extractf128_ps(u2, 0);
2170 __m128 res6 = _mm256_extractf128_ps(u2, 1);
2171 __m128 res3 = _mm256_extractf128_ps(u3, 0);
2172 __m128 res7 = _mm256_extractf128_ps(u3, 1);
2179 res0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), res0);
2180 _mm_storeu_ps(out + 4 * i + offsets[0], res0);
2181 res1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), res1);
2182 _mm_storeu_ps(out + 4 * i + offsets[1], res1);
2183 res2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), res2);
2184 _mm_storeu_ps(out + 4 * i + offsets[2], res2);
2185 res3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), res3);
2186 _mm_storeu_ps(out + 4 * i + offsets[3], res3);
2187 res4 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[4]), res4);
2188 _mm_storeu_ps(out + 4 * i + offsets[4], res4);
2189 res5 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[5]), res5);
2190 _mm_storeu_ps(out + 4 * i + offsets[5], res5);
2191 res6 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[6]), res6);
2192 _mm_storeu_ps(out + 4 * i + offsets[6], res6);
2193 res7 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[7]), res7);
2194 _mm_storeu_ps(out + 4 * i + offsets[7], res7);
2198 _mm_storeu_ps(out + 4 * i + offsets[0], res0);
2199 _mm_storeu_ps(out + 4 * i + offsets[1], res1);
2200 _mm_storeu_ps(out + 4 * i + offsets[2], res2);
2201 _mm_storeu_ps(out + 4 * i + offsets[3], res3);
2202 _mm_storeu_ps(out + 4 * i + offsets[4], res4);
2203 _mm_storeu_ps(out + 4 * i + offsets[5], res5);
2204 _mm_storeu_ps(out + 4 * i + offsets[6], res6);
2205 _mm_storeu_ps(out + 4 * i + offsets[7], res7);
2209 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2210 for (
unsigned int v = 0; v < 8; ++v)
2211 out[offsets[v] + i] += in[i][v];
2213 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2214 for (
unsigned int v = 0; v < 8; ++v)
2215 out[offsets[v] + i] = in[i][v];
2220 #elif DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 1 && defined(__SSE2__) 2237 DEAL_II_ALWAYS_INLINE
2241 data = _mm_set1_pd(x);
2248 DEAL_II_ALWAYS_INLINE
2252 return *(
reinterpret_cast<double *
>(&
data) + comp);
2258 DEAL_II_ALWAYS_INLINE
2259 const double &
operator[](
const unsigned int comp)
const 2262 return *(
reinterpret_cast<const double *
>(&
data) + comp);
2268 DEAL_II_ALWAYS_INLINE
2272 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS 2283 DEAL_II_ALWAYS_INLINE
2287 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS 2298 DEAL_II_ALWAYS_INLINE
2302 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS 2313 DEAL_II_ALWAYS_INLINE
2317 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS 2330 DEAL_II_ALWAYS_INLINE
2332 load(
const double *ptr)
2334 data = _mm_loadu_pd(ptr);
2343 DEAL_II_ALWAYS_INLINE
2345 store(
double *ptr)
const 2347 _mm_storeu_pd(ptr,
data);
2353 DEAL_II_ALWAYS_INLINE
2357 Assert(reinterpret_cast<std::size_t>(ptr) % 16 == 0,
2359 _mm_stream_pd(ptr,
data);
2374 DEAL_II_ALWAYS_INLINE
2376 gather(
const double *base_ptr,
const unsigned int *offsets)
2378 for (
unsigned int i = 0; i < 2; ++i)
2379 *(reinterpret_cast<double *>(&
data) + i) = base_ptr[offsets[i]];
2394 DEAL_II_ALWAYS_INLINE
2396 scatter(
const unsigned int *offsets,
double *base_ptr)
const 2398 for (
unsigned int i = 0; i < 2; ++i)
2399 base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&
data) + i);
2413 DEAL_II_ALWAYS_INLINE
2426 DEAL_II_ALWAYS_INLINE
2434 __m128d mask = _mm_set1_pd(-0.);
2436 res.
data = _mm_andnot_pd(mask,
data);
2444 DEAL_II_ALWAYS_INLINE
2457 DEAL_II_ALWAYS_INLINE
2469 template <
typename Number2>
2472 template <
typename Number2>
2475 template <
typename Number2>
2478 template <
typename Number2>
2492 const unsigned int * offsets,
2495 const unsigned int n_chunks = n_entries / 2;
2496 for (
unsigned int i = 0; i < n_chunks; ++i)
2498 __m128d u0 = _mm_loadu_pd(in + 2 * i + offsets[0]);
2499 __m128d u1 = _mm_loadu_pd(in + 2 * i + offsets[1]);
2500 out[2 * i + 0].
data = _mm_unpacklo_pd(u0, u1);
2501 out[2 * i + 1].
data = _mm_unpackhi_pd(u0, u1);
2503 for (
unsigned int i = 2 * n_chunks; i < n_entries; ++i)
2504 for (
unsigned int v = 0; v < 2; ++v)
2505 out[i][v] = in[offsets[v] + i];
2516 const unsigned int n_entries,
2518 const unsigned int * offsets,
2521 const unsigned int n_chunks = n_entries / 2;
2524 for (
unsigned int i = 0; i < n_chunks; ++i)
2526 __m128d u0 = in[2 * i + 0].
data;
2527 __m128d u1 = in[2 * i + 1].
data;
2528 __m128d res0 = _mm_unpacklo_pd(u0, u1);
2529 __m128d res1 = _mm_unpackhi_pd(u0, u1);
2530 _mm_storeu_pd(out + 2 * i + offsets[0],
2531 _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[0]),
2533 _mm_storeu_pd(out + 2 * i + offsets[1],
2534 _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[1]),
2537 for (
unsigned int i = 2 * n_chunks; i < n_entries; ++i)
2538 for (
unsigned int v = 0; v < 2; ++v)
2539 out[offsets[v] + i] += in[i][v];
2543 for (
unsigned int i = 0; i < n_chunks; ++i)
2545 __m128d u0 = in[2 * i + 0].
data;
2546 __m128d u1 = in[2 * i + 1].
data;
2547 __m128d res0 = _mm_unpacklo_pd(u0, u1);
2548 __m128d res1 = _mm_unpackhi_pd(u0, u1);
2549 _mm_storeu_pd(out + 2 * i + offsets[0], res0);
2550 _mm_storeu_pd(out + 2 * i + offsets[1], res1);
2552 for (
unsigned int i = 2 * n_chunks; i < n_entries; ++i)
2553 for (
unsigned int v = 0; v < 2; ++v)
2554 out[offsets[v] + i] = in[i][v];
2576 DEAL_II_ALWAYS_INLINE
2580 data = _mm_set1_ps(x);
2587 DEAL_II_ALWAYS_INLINE
2591 return *(
reinterpret_cast<float *
>(&
data) + comp);
2597 DEAL_II_ALWAYS_INLINE
2598 const float &
operator[](
const unsigned int comp)
const 2601 return *(
reinterpret_cast<const float *
>(&
data) + comp);
2607 DEAL_II_ALWAYS_INLINE
2611 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS 2622 DEAL_II_ALWAYS_INLINE
2626 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS 2637 DEAL_II_ALWAYS_INLINE
2641 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS 2652 DEAL_II_ALWAYS_INLINE
2656 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS 2669 DEAL_II_ALWAYS_INLINE
2671 load(
const float *ptr)
2673 data = _mm_loadu_ps(ptr);
2682 DEAL_II_ALWAYS_INLINE
2684 store(
float *ptr)
const 2686 _mm_storeu_ps(ptr,
data);
2692 DEAL_II_ALWAYS_INLINE
2696 Assert(reinterpret_cast<std::size_t>(ptr) % 16 == 0,
2698 _mm_stream_ps(ptr,
data);
2713 DEAL_II_ALWAYS_INLINE
2715 gather(
const float *base_ptr,
const unsigned int *offsets)
2717 for (
unsigned int i = 0; i < 4; ++i)
2718 *(reinterpret_cast<float *>(&
data) + i) = base_ptr[offsets[i]];
2733 DEAL_II_ALWAYS_INLINE
2735 scatter(
const unsigned int *offsets,
float *base_ptr)
const 2737 for (
unsigned int i = 0; i < 4; ++i)
2738 base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&
data) + i);
2752 DEAL_II_ALWAYS_INLINE
2765 DEAL_II_ALWAYS_INLINE
2772 __m128 mask = _mm_set1_ps(-0.f);
2774 res.
data = _mm_andnot_ps(mask,
data);
2782 DEAL_II_ALWAYS_INLINE
2795 DEAL_II_ALWAYS_INLINE
2807 template <
typename Number2>
2810 template <
typename Number2>
2813 template <
typename Number2>
2816 template <
typename Number2>
2830 const unsigned int * offsets,
2833 const unsigned int n_chunks = n_entries / 4;
2834 for (
unsigned int i = 0; i < n_chunks; ++i)
2836 __m128 u0 = _mm_loadu_ps(in + 4 * i + offsets[0]);
2837 __m128 u1 = _mm_loadu_ps(in + 4 * i + offsets[1]);
2838 __m128 u2 = _mm_loadu_ps(in + 4 * i + offsets[2]);
2839 __m128 u3 = _mm_loadu_ps(in + 4 * i + offsets[3]);
2840 __m128 v0 = _mm_shuffle_ps(u0, u1, 0x44);
2841 __m128 v1 = _mm_shuffle_ps(u0, u1, 0xee);
2842 __m128 v2 = _mm_shuffle_ps(u2, u3, 0x44);
2843 __m128 v3 = _mm_shuffle_ps(u2, u3, 0xee);
2844 out[4 * i + 0].
data = _mm_shuffle_ps(v0, v2, 0x88);
2845 out[4 * i + 1].
data = _mm_shuffle_ps(v0, v2, 0xdd);
2846 out[4 * i + 2].
data = _mm_shuffle_ps(v1, v3, 0x88);
2847 out[4 * i + 3].
data = _mm_shuffle_ps(v1, v3, 0xdd);
2849 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2850 for (
unsigned int v = 0; v < 4; ++v)
2851 out[i][v] = in[offsets[v] + i];
2862 const unsigned int n_entries,
2864 const unsigned int * offsets,
2867 const unsigned int n_chunks = n_entries / 4;
2868 for (
unsigned int i = 0; i < n_chunks; ++i)
2870 __m128 u0 = in[4 * i + 0].
data;
2871 __m128 u1 = in[4 * i + 1].
data;
2872 __m128 u2 = in[4 * i + 2].
data;
2873 __m128 u3 = in[4 * i + 3].
data;
2874 __m128 t0 = _mm_shuffle_ps(u0, u1, 0x44);
2875 __m128 t1 = _mm_shuffle_ps(u0, u1, 0xee);
2876 __m128 t2 = _mm_shuffle_ps(u2, u3, 0x44);
2877 __m128 t3 = _mm_shuffle_ps(u2, u3, 0xee);
2878 u0 = _mm_shuffle_ps(t0, t2, 0x88);
2879 u1 = _mm_shuffle_ps(t0, t2, 0xdd);
2880 u2 = _mm_shuffle_ps(t1, t3, 0x88);
2881 u3 = _mm_shuffle_ps(t1, t3, 0xdd);
2888 u0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), u0);
2889 _mm_storeu_ps(out + 4 * i + offsets[0], u0);
2890 u1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), u1);
2891 _mm_storeu_ps(out + 4 * i + offsets[1], u1);
2892 u2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), u2);
2893 _mm_storeu_ps(out + 4 * i + offsets[2], u2);
2894 u3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), u3);
2895 _mm_storeu_ps(out + 4 * i + offsets[3], u3);
2899 _mm_storeu_ps(out + 4 * i + offsets[0], u0);
2900 _mm_storeu_ps(out + 4 * i + offsets[1], u1);
2901 _mm_storeu_ps(out + 4 * i + offsets[2], u2);
2902 _mm_storeu_ps(out + 4 * i + offsets[3], u3);
2906 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2907 for (
unsigned int v = 0; v < 4; ++v)
2908 out[offsets[v] + i] += in[i][v];
2910 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2911 for (
unsigned int v = 0; v < 4; ++v)
2912 out[offsets[v] + i] = in[i][v];
2917 #endif // if DEAL_II_COMPILER_VECTORIZATION_LEVEL > 0 && defined(__SSE2__) 2920 #if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 1 && defined(__ALTIVEC__) && \ 2935 DEAL_II_ALWAYS_INLINE
2939 data = vec_splats(x);
2946 DEAL_II_ALWAYS_INLINE
2950 return *(
reinterpret_cast<double *
>(&
data) + comp);
2956 DEAL_II_ALWAYS_INLINE
2957 const double &
operator[](
const unsigned int comp)
const 2960 return *(
reinterpret_cast<const double *
>(&
data) + comp);
2966 DEAL_II_ALWAYS_INLINE
2977 DEAL_II_ALWAYS_INLINE
2988 DEAL_II_ALWAYS_INLINE
2999 DEAL_II_ALWAYS_INLINE
3011 DEAL_II_ALWAYS_INLINE
3013 load(
const double *ptr)
3015 data = vec_vsx_ld(0, ptr);
3022 DEAL_II_ALWAYS_INLINE
3024 store(
double *ptr)
const 3026 vec_vsx_st(
data, 0, ptr);
3031 DEAL_II_ALWAYS_INLINE
3040 DEAL_II_ALWAYS_INLINE
3042 gather(
const double *base_ptr,
const unsigned int *offsets)
3044 for (
unsigned int i = 0; i < 2; ++i)
3045 *(reinterpret_cast<double *>(&
data) + i) = base_ptr[offsets[i]];
3050 DEAL_II_ALWAYS_INLINE
3052 scatter(
const unsigned int *offsets,
double *base_ptr)
const 3054 for (
unsigned int i = 0; i < 2; ++i)
3055 base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&
data) + i);
3062 __vector
double data;
3069 DEAL_II_ALWAYS_INLINE
3082 DEAL_II_ALWAYS_INLINE
3095 DEAL_II_ALWAYS_INLINE
3108 DEAL_II_ALWAYS_INLINE
3120 template <
typename Number2>
3123 template <
typename Number2>
3126 template <
typename Number2>
3129 template <
typename Number2>
3148 DEAL_II_ALWAYS_INLINE
3152 data = vec_splats(x);
3159 DEAL_II_ALWAYS_INLINE
3163 return *(
reinterpret_cast<float *
>(&
data) + comp);
3169 DEAL_II_ALWAYS_INLINE
3170 const float &
operator[](
const unsigned int comp)
const 3173 return *(
reinterpret_cast<const float *
>(&
data) + comp);
3179 DEAL_II_ALWAYS_INLINE
3190 DEAL_II_ALWAYS_INLINE
3201 DEAL_II_ALWAYS_INLINE
3212 DEAL_II_ALWAYS_INLINE
3224 DEAL_II_ALWAYS_INLINE
3226 load(
const float *ptr)
3228 data = vec_vsx_ld(0, ptr);
3235 DEAL_II_ALWAYS_INLINE
3237 store(
float *ptr)
const 3239 vec_vsx_st(
data, 0, ptr);
3244 DEAL_II_ALWAYS_INLINE
3253 DEAL_II_ALWAYS_INLINE
3255 gather(
const float *base_ptr,
const unsigned int *offsets)
3257 for (
unsigned int i = 0; i < 4; ++i)
3258 *(reinterpret_cast<float *>(&
data) + i) = base_ptr[offsets[i]];
3263 DEAL_II_ALWAYS_INLINE
3265 scatter(
const unsigned int *offsets,
float *base_ptr)
const 3267 for (
unsigned int i = 0; i < 4; ++i)
3268 base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&
data) + i);
3275 __vector
float data;
3282 DEAL_II_ALWAYS_INLINE
3295 DEAL_II_ALWAYS_INLINE
3308 DEAL_II_ALWAYS_INLINE
3321 DEAL_II_ALWAYS_INLINE
3333 template <
typename Number2>
3336 template <
typename Number2>
3339 template <
typename Number2>
3342 template <
typename Number2>
3347 #endif // if DEAL_II_VECTORIZATION_LEVEL >=1 && defined(__ALTIVEC__) && 3357 template <
typename Number>
3358 inline DEAL_II_ALWAYS_INLINE
bool 3362 for (
unsigned int i = 0; i < VectorizedArray<Number>::n_array_elements; ++i)
3363 if (lhs[i] != rhs[i])
3375 template <
typename Number>
3388 template <
typename Number>
3401 template <
typename Number>
3414 template <
typename Number>
3428 template <
typename Number>
3459 template <
typename Number>
3486 template <
typename Number>
3517 template <
typename Number>
3548 template <
typename Number>
3579 template <
typename Number>
3606 template <
typename Number>
3637 template <
typename Number>
3667 template <
typename Number>
3679 template <
typename Number>
3689 DEAL_II_NAMESPACE_CLOSE
3707 template <
typename Number>
3708 inline ::VectorizedArray<Number>
3709 sin(const ::VectorizedArray<Number> &x)
3717 for (
unsigned int i = 0;
3718 i < ::VectorizedArray<Number>::n_array_elements;
3720 values[i] = std::sin(x[i]);
3722 out.
load(&values[0]);
3735 template <
typename Number>
3736 inline ::VectorizedArray<Number>
3737 cos(const ::VectorizedArray<Number> &x)
3740 for (
unsigned int i = 0;
3741 i < ::VectorizedArray<Number>::n_array_elements;
3743 values[i] = std::cos(x[i]);
3745 out.
load(&values[0]);
3758 template <
typename Number>
3759 inline ::VectorizedArray<Number>
3760 tan(const ::VectorizedArray<Number> &x)
3763 for (
unsigned int i = 0;
3764 i < ::VectorizedArray<Number>::n_array_elements;
3766 values[i] = std::tan(x[i]);
3768 out.
load(&values[0]);
3781 template <
typename Number>
3782 inline ::VectorizedArray<Number>
3783 exp(const ::VectorizedArray<Number> &x)
3786 for (
unsigned int i = 0;
3787 i < ::VectorizedArray<Number>::n_array_elements;
3789 values[i] = std::exp(x[i]);
3791 out.
load(&values[0]);
3804 template <
typename Number>
3805 inline ::VectorizedArray<Number>
3806 log(const ::VectorizedArray<Number> &x)
3809 for (
unsigned int i = 0;
3810 i < ::VectorizedArray<Number>::n_array_elements;
3812 values[i] = std::log(x[i]);
3814 out.
load(&values[0]);
3827 template <
typename Number>
3828 inline ::VectorizedArray<Number>
3829 sqrt(const ::VectorizedArray<Number> &x)
3843 template <
typename Number>
3844 inline ::VectorizedArray<Number>
3845 pow(const ::VectorizedArray<Number> &x,
const Number p)
3848 for (
unsigned int i = 0;
3849 i < ::VectorizedArray<Number>::n_array_elements;
3851 values[i] = std::pow(x[i], p);
3853 out.
load(&values[0]);
3866 template <
typename Number>
3867 inline ::VectorizedArray<Number>
3868 abs(const ::VectorizedArray<Number> &x)
3882 template <
typename Number>
3883 inline ::VectorizedArray<Number>
3884 max(const ::VectorizedArray<Number> &x,
3885 const ::VectorizedArray<Number> &y)
3899 template <
typename Number>
3900 inline ::VectorizedArray<Number>
3901 min(const ::VectorizedArray<Number> &x,
3902 const ::VectorizedArray<Number> &y)
VectorizedArray< Number > operator/(const VectorizedArray< Number > &u, const VectorizedArray< Number > &v)
VectorizedArray< Number > operator+(const VectorizedArray< Number > &u, const VectorizedArray< Number > &v)
void store(Number *ptr) const
VectorizedArray< Number > operator-(const VectorizedArray< Number > &u, const VectorizedArray< Number > &v)
VectorizedArray< Number > log(const ::VectorizedArray< Number > &x)
VectorizedArray< Number > operator*(const VectorizedArray< Number > &u, const VectorizedArray< Number > &v)
VectorizedArray get_max(const VectorizedArray &other) const
VectorizedArray< Number > operator-(const Number &u, const VectorizedArray< Number > &v)
VectorizedArray< Number > operator/(const Number &u, const VectorizedArray< Number > &v)
VectorizedArray< Number > make_vectorized_array(const Number &u)
VectorizedArray< Number > tan(const ::VectorizedArray< Number > &x)
const Number & operator[](const unsigned int comp) const
#define AssertIndexRange(index, range)
VectorizedArray< Number > exp(const ::VectorizedArray< Number > &x)
bool operator==(const VectorizedArray< Number > &lhs, const VectorizedArray< Number > &rhs)
VectorizedArray< float > operator/(const double u, const VectorizedArray< float > &v)
__global__ void vec_add(Number *val, const Number a, const size_type N)
VectorizedArray< float > operator+(const VectorizedArray< float > &v, const double u)
void scatter(const unsigned int *offsets, Number *base_ptr) const
VectorizedArray< float > operator+(const double u, const VectorizedArray< float > &v)
VectorizedArray get_abs() const
VectorizedArray & operator+=(const VectorizedArray< Number > &vec)
void vectorized_transpose_and_store(const bool add_into, const unsigned int n_entries, const VectorizedArray< Number > *in, const unsigned int *offsets, Number *out)
VectorizedArray< float > operator-(const double u, const VectorizedArray< float > &v)
static ::ExceptionBase & ExcMessage(std::string arg1)
Number & operator[](const unsigned int comp)
VectorizedArray< Number > operator+(const VectorizedArray< Number > &v, const Number &u)
static const unsigned int n_array_elements
VectorizedArray< Number > min(const ::VectorizedArray< Number > &x, const ::VectorizedArray< Number > &y)
VectorizedArray< Number > operator-(const VectorizedArray< Number > &u)
#define Assert(cond, exc)
VectorizedArray get_sqrt() const
void streaming_store(Number *ptr) const
VectorizedArray< Number > pow(const ::VectorizedArray< Number > &x, const Number p)
VectorizedArray get_min(const VectorizedArray &other) const
VectorizedArray< Number > operator+(const VectorizedArray< Number > &u)
void vectorized_load_and_transpose(const unsigned int n_entries, const Number *in, const unsigned int *offsets, VectorizedArray< Number > *out)
VectorizedArray< Number > operator+(const Number &u, const VectorizedArray< Number > &v)
VectorizedArray & operator-=(const VectorizedArray< Number > &vec)
VectorizedArray< float > operator-(const VectorizedArray< float > &v, const double u)
VectorizedArray< Number > operator-(const VectorizedArray< Number > &v, const Number &u)
VectorizedArray< float > operator/(const VectorizedArray< float > &v, const double u)
VectorizedArray< float > operator*(const VectorizedArray< float > &v, const double u)
VectorizedArray< Number > sqrt(const ::VectorizedArray< Number > &x)
VectorizedArray< Number > sin(const ::VectorizedArray< Number > &x)
VectorizedArray< Number > operator/(const VectorizedArray< Number > &v, const Number &u)
VectorizedArray & operator/=(const VectorizedArray< Number > &vec)
void load(const Number *ptr)
VectorizedArray< Number > operator*(const Number &u, const VectorizedArray< Number > &v)
VectorizedArray< float > operator*(const double u, const VectorizedArray< float > &v)
VectorizedArray< Number > operator*(const VectorizedArray< Number > &v, const Number &u)
void gather(const Number *base_ptr, const unsigned int *offsets)
VectorizedArray & operator*=(const VectorizedArray< Number > &vec)
VectorizedArray & operator=(const Number scalar)
VectorizedArray< Number > abs(const ::VectorizedArray< Number > &x)
VectorizedArray< Number > max(const ::VectorizedArray< Number > &x, const ::VectorizedArray< Number > &y)
VectorizedArray< Number > cos(const ::VectorizedArray< Number > &x)