17 #ifndef dealii_vectorization_h
18 #define dealii_vectorization_h
44 #if DEAL_II_VECTORIZATION_WIDTH_IN_BITS > 0
53 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && !defined(__AVX__)
55 "Mismatch in vectorization capabilities: AVX was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native."
57 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && !defined(__AVX512F__)
59 "Mismatch in vectorization capabilities: AVX-512F was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native."
62 # if defined(_MSC_VER)
64 # elif defined(__ALTIVEC__)
73 # include <x86intrin.h>
85 template <
typename Number, std::
size_t w
idth>
121 "You are trying to compare iterators into different arrays."));
133 "You are trying to compare iterators into different arrays."));
158 template <
typename U = T>
160 typename T::value_type>::type &
203 "You can't decrement an iterator that is already at the beginning of the range."));
224 return static_cast<std::ptrdiff_t
>(
lane) -
225 static_cast<ptrdiff_t
>(other.
lane);
253 template <
typename T, std::
size_t w
idth>
260 static constexpr std::size_t
394 template <
typename Number, std::
size_t w
idth>
413 static_assert(width == 1,
414 "You specified an illegal width that is not supported.");
602 gather(
const Number *base_ptr,
const unsigned int *offsets)
604 data = base_ptr[offsets[0]];
621 scatter(
const unsigned int *offsets, Number *base_ptr)
const
623 base_ptr[offsets[0]] =
data;
687 template <
typename Number2, std::
size_t w
idth2>
690 template <
typename Number2, std::
size_t w
idth2>
693 template <
typename Number2, std::
size_t w
idth2>
697 template <
typename Number2, std::
size_t w
idth2>
706 template <
typename Number, std::
size_t w
idth>
723 template <
typename Number,
741 template <
typename VectorizedArrayType>
746 std::is_same<VectorizedArrayType,
748 VectorizedArrayType::size()>>::
value,
749 "VectorizedArrayType is not a VectorizedArray.");
751 VectorizedArrayType result = u;
768 template <
typename Number, std::
size_t w
idth>
771 const std::array<Number *, width> &ptrs,
772 const unsigned int offset)
774 for (
unsigned int v = 0; v < width; v++)
775 out.
data[v] = ptrs[v][offset];
805 template <
typename Number, std::
size_t w
idth>
809 const unsigned int * offsets,
812 for (
unsigned int i = 0; i < n_entries; ++i)
813 for (
unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
814 out[i][v] = in[offsets[v] + i];
829 template <
typename Number, std::
size_t w
idth>
832 const std::array<Number *, width> &in,
835 for (
unsigned int i = 0; i < n_entries; ++i)
836 for (
unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
837 out[i][v] = in[v][i];
880 template <
typename Number, std::
size_t w
idth>
883 const unsigned int n_entries,
885 const unsigned int * offsets,
889 for (
unsigned int i = 0; i < n_entries; ++i)
890 for (
unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
891 out[offsets[v] + i] += in[i][v];
893 for (
unsigned int i = 0; i < n_entries; ++i)
894 for (
unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
895 out[offsets[v] + i] = in[i][v];
910 template <
typename Number, std::
size_t w
idth>
913 const unsigned int n_entries,
915 std::array<Number *, width> & out)
918 for (
unsigned int i = 0; i < n_entries; ++i)
919 for (
unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
920 out[v][i] += in[i][v];
922 for (
unsigned int i = 0; i < n_entries; ++i)
923 for (
unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
924 out[v][i] = in[i][v];
935 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && defined(__AVX512F__)
978 data = _mm512_set1_pd(x);
989 return *(
reinterpret_cast<double *
>(&
data) + comp);
996 const double &
operator[](
const unsigned int comp)
const
999 return *(
reinterpret_cast<const double *
>(&
data) + comp);
1014 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1029 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1043 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1058 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1073 load(
const double *ptr)
1075 data = _mm512_loadu_pd(ptr);
1086 store(
double *ptr)
const
1088 _mm512_storeu_pd(ptr,
data);
1098 Assert(
reinterpret_cast<std::size_t
>(ptr) % 64 == 0,
1100 _mm512_stream_pd(ptr,
data);
1117 gather(
const double *base_ptr,
const unsigned int *offsets)
1122 const __m256 index_val =
1123 _mm256_loadu_ps(
reinterpret_cast<const float *
>(offsets));
1124 const __m256i index = *
reinterpret_cast<const __m256i *
>(&index_val);
1125 data = _mm512_i32gather_pd(index, base_ptr, 8);
1142 scatter(
const unsigned int *offsets,
double *base_ptr)
const
1144 for (
unsigned int i = 0; i < 8; ++i)
1145 for (
unsigned int j = i + 1; j < 8; ++j)
1146 Assert(offsets[i] != offsets[j],
1147 ExcMessage(
"Result of scatter undefined if two offset elements"
1148 " point to the same position"));
1153 const __m256 index_val =
1154 _mm256_loadu_ps(
reinterpret_cast<const float *
>(offsets));
1155 const __m256i index = *
reinterpret_cast<const __m256i *
>(&index_val);
1156 _mm512_i32scatter_pd(base_ptr, index,
data, 8);
1193 __m512d mask = _mm512_set1_pd(-0.);
1195 res.
data =
reinterpret_cast<__m512d
>(
1196 _mm512_andnot_epi64(
reinterpret_cast<__m512i
>(mask),
1197 reinterpret_cast<__m512i
>(
data)));
1228 template <
typename Number2, std::
size_t w
idth2>
1231 template <
typename Number2, std::
size_t w
idth2>
1234 template <
typename Number2, std::
size_t w
idth2>
1238 template <
typename Number2, std::
size_t w
idth2>
1253 const unsigned int * offsets,
1261 const unsigned int n_chunks = n_entries / 4;
1262 for (
unsigned int i = 0; i < n_chunks; ++i)
1264 __m512d t0, t1, t2, t3 = {};
1266 t0 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[0] + 4 * i), 0);
1267 t0 = _mm512_insertf64x4(t0, _mm256_loadu_pd(in + offsets[2] + 4 * i), 1);
1268 t1 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[1] + 4 * i), 0);
1269 t1 = _mm512_insertf64x4(t1, _mm256_loadu_pd(in + offsets[3] + 4 * i), 1);
1270 t2 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[4] + 4 * i), 0);
1271 t2 = _mm512_insertf64x4(t2, _mm256_loadu_pd(in + offsets[6] + 4 * i), 1);
1272 t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[5] + 4 * i), 0);
1273 t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[7] + 4 * i), 1);
1275 __m512d v0 = _mm512_shuffle_f64x2(t0, t2, 0x88);
1276 __m512d v1 = _mm512_shuffle_f64x2(t0, t2, 0xdd);
1277 __m512d v2 = _mm512_shuffle_f64x2(t1, t3, 0x88);
1278 __m512d v3 = _mm512_shuffle_f64x2(t1, t3, 0xdd);
1279 out[4 * i + 0].
data = _mm512_unpacklo_pd(v0, v2);
1280 out[4 * i + 1].
data = _mm512_unpackhi_pd(v0, v2);
1281 out[4 * i + 2].
data = _mm512_unpacklo_pd(v1, v3);
1282 out[4 * i + 3].
data = _mm512_unpackhi_pd(v1, v3);
1285 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1286 out[i].
gather(in + i, offsets);
1297 const std::array<double *, 8> &in,
1300 const unsigned int n_chunks = n_entries / 4;
1301 for (
unsigned int i = 0; i < n_chunks; ++i)
1303 __m512d t0, t1, t2, t3 = {};
1305 t0 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[0] + 4 * i), 0);
1306 t0 = _mm512_insertf64x4(t0, _mm256_loadu_pd(in[2] + 4 * i), 1);
1307 t1 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[1] + 4 * i), 0);
1308 t1 = _mm512_insertf64x4(t1, _mm256_loadu_pd(in[3] + 4 * i), 1);
1309 t2 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[4] + 4 * i), 0);
1310 t2 = _mm512_insertf64x4(t2, _mm256_loadu_pd(in[6] + 4 * i), 1);
1311 t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[5] + 4 * i), 0);
1312 t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[7] + 4 * i), 1);
1314 __m512d v0 = _mm512_shuffle_f64x2(t0, t2, 0x88);
1315 __m512d v1 = _mm512_shuffle_f64x2(t0, t2, 0xdd);
1316 __m512d v2 = _mm512_shuffle_f64x2(t1, t3, 0x88);
1317 __m512d v3 = _mm512_shuffle_f64x2(t1, t3, 0xdd);
1318 out[4 * i + 0].
data = _mm512_unpacklo_pd(v0, v2);
1319 out[4 * i + 1].
data = _mm512_unpackhi_pd(v0, v2);
1320 out[4 * i + 2].
data = _mm512_unpacklo_pd(v1, v3);
1321 out[4 * i + 3].
data = _mm512_unpackhi_pd(v1, v3);
1324 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1336 const unsigned int n_entries,
1338 const unsigned int * offsets,
1343 const unsigned int n_chunks = n_entries / 4;
1344 __m512i mask1 = _mm512_set_epi64(0xd, 0xc, 0x5, 0x4, 0x9, 0x8, 0x1, 0x0);
1345 __m512i mask2 = _mm512_set_epi64(0xf, 0xe, 0x7, 0x6, 0xb, 0xa, 0x3, 0x2);
1346 for (
unsigned int i = 0; i < n_chunks; ++i)
1348 __m512d t0 = _mm512_unpacklo_pd(in[i * 4].data, in[i * 4 + 1].data);
1349 __m512d t1 = _mm512_unpackhi_pd(in[i * 4].data, in[i * 4 + 1].data);
1350 __m512d t2 = _mm512_unpacklo_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
1351 __m512d t3 = _mm512_unpackhi_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
1352 __m512d v0 = _mm512_permutex2var_pd(t0, mask1, t2);
1353 __m512d v1 = _mm512_permutex2var_pd(t0, mask2, t2);
1354 __m512d v2 = _mm512_permutex2var_pd(t1, mask1, t3);
1355 __m512d v3 = _mm512_permutex2var_pd(t1, mask2, t3);
1356 __m256d res0 = _mm512_extractf64x4_pd(v0, 0);
1357 __m256d res4 = _mm512_extractf64x4_pd(v0, 1);
1358 __m256d res1 = _mm512_extractf64x4_pd(v2, 0);
1359 __m256d res5 = _mm512_extractf64x4_pd(v2, 1);
1360 __m256d res2 = _mm512_extractf64x4_pd(v1, 0);
1361 __m256d res6 = _mm512_extractf64x4_pd(v1, 1);
1362 __m256d res3 = _mm512_extractf64x4_pd(v3, 0);
1363 __m256d res7 = _mm512_extractf64x4_pd(v3, 1);
1370 res0 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[0]), res0);
1371 _mm256_storeu_pd(out + 4 * i + offsets[0], res0);
1372 res1 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[1]), res1);
1373 _mm256_storeu_pd(out + 4 * i + offsets[1], res1);
1374 res2 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[2]), res2);
1375 _mm256_storeu_pd(out + 4 * i + offsets[2], res2);
1376 res3 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[3]), res3);
1377 _mm256_storeu_pd(out + 4 * i + offsets[3], res3);
1378 res4 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[4]), res4);
1379 _mm256_storeu_pd(out + 4 * i + offsets[4], res4);
1380 res5 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[5]), res5);
1381 _mm256_storeu_pd(out + 4 * i + offsets[5], res5);
1382 res6 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[6]), res6);
1383 _mm256_storeu_pd(out + 4 * i + offsets[6], res6);
1384 res7 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[7]), res7);
1385 _mm256_storeu_pd(out + 4 * i + offsets[7], res7);
1389 _mm256_storeu_pd(out + 4 * i + offsets[0], res0);
1390 _mm256_storeu_pd(out + 4 * i + offsets[1], res1);
1391 _mm256_storeu_pd(out + 4 * i + offsets[2], res2);
1392 _mm256_storeu_pd(out + 4 * i + offsets[3], res3);
1393 _mm256_storeu_pd(out + 4 * i + offsets[4], res4);
1394 _mm256_storeu_pd(out + 4 * i + offsets[5], res5);
1395 _mm256_storeu_pd(out + 4 * i + offsets[6], res6);
1396 _mm256_storeu_pd(out + 4 * i + offsets[7], res7);
1402 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1403 for (
unsigned int v = 0; v < 8; ++v)
1404 out[offsets[v] + i] += in[i][v];
1406 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1407 for (
unsigned int v = 0; v < 8; ++v)
1408 out[offsets[v] + i] = in[i][v];
1419 const unsigned int n_entries,
1421 std::array<double *, 8> & out)
1425 const unsigned int n_chunks = n_entries / 4;
1426 __m512i mask1 = _mm512_set_epi64(0xd, 0xc, 0x5, 0x4, 0x9, 0x8, 0x1, 0x0);
1427 __m512i mask2 = _mm512_set_epi64(0xf, 0xe, 0x7, 0x6, 0xb, 0xa, 0x3, 0x2);
1428 for (
unsigned int i = 0; i < n_chunks; ++i)
1430 __m512d t0 = _mm512_unpacklo_pd(in[i * 4].data, in[i * 4 + 1].data);
1431 __m512d t1 = _mm512_unpackhi_pd(in[i * 4].data, in[i * 4 + 1].data);
1432 __m512d t2 = _mm512_unpacklo_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
1433 __m512d t3 = _mm512_unpackhi_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
1434 __m512d v0 = _mm512_permutex2var_pd(t0, mask1, t2);
1435 __m512d v1 = _mm512_permutex2var_pd(t0, mask2, t2);
1436 __m512d v2 = _mm512_permutex2var_pd(t1, mask1, t3);
1437 __m512d v3 = _mm512_permutex2var_pd(t1, mask2, t3);
1438 __m256d res0 = _mm512_extractf64x4_pd(v0, 0);
1439 __m256d res4 = _mm512_extractf64x4_pd(v0, 1);
1440 __m256d res1 = _mm512_extractf64x4_pd(v2, 0);
1441 __m256d res5 = _mm512_extractf64x4_pd(v2, 1);
1442 __m256d res2 = _mm512_extractf64x4_pd(v1, 0);
1443 __m256d res6 = _mm512_extractf64x4_pd(v1, 1);
1444 __m256d res3 = _mm512_extractf64x4_pd(v3, 0);
1445 __m256d res7 = _mm512_extractf64x4_pd(v3, 1);
1449 res0 = _mm256_add_pd(_mm256_loadu_pd(out[0] + 4 * i), res0);
1450 _mm256_storeu_pd(out[0] + 4 * i, res0);
1451 res1 = _mm256_add_pd(_mm256_loadu_pd(out[1] + 4 * i), res1);
1452 _mm256_storeu_pd(out[1] + 4 * i, res1);
1453 res2 = _mm256_add_pd(_mm256_loadu_pd(out[2] + 4 * i), res2);
1454 _mm256_storeu_pd(out[2] + 4 * i, res2);
1455 res3 = _mm256_add_pd(_mm256_loadu_pd(out[3] + 4 * i), res3);
1456 _mm256_storeu_pd(out[3] + 4 * i, res3);
1457 res4 = _mm256_add_pd(_mm256_loadu_pd(out[4] + 4 * i), res4);
1458 _mm256_storeu_pd(out[4] + 4 * i, res4);
1459 res5 = _mm256_add_pd(_mm256_loadu_pd(out[5] + 4 * i), res5);
1460 _mm256_storeu_pd(out[5] + 4 * i, res5);
1461 res6 = _mm256_add_pd(_mm256_loadu_pd(out[6] + 4 * i), res6);
1462 _mm256_storeu_pd(out[6] + 4 * i, res6);
1463 res7 = _mm256_add_pd(_mm256_loadu_pd(out[7] + 4 * i), res7);
1464 _mm256_storeu_pd(out[7] + 4 * i, res7);
1468 _mm256_storeu_pd(out[0] + 4 * i, res0);
1469 _mm256_storeu_pd(out[1] + 4 * i, res1);
1470 _mm256_storeu_pd(out[2] + 4 * i, res2);
1471 _mm256_storeu_pd(out[3] + 4 * i, res3);
1472 _mm256_storeu_pd(out[4] + 4 * i, res4);
1473 _mm256_storeu_pd(out[5] + 4 * i, res5);
1474 _mm256_storeu_pd(out[6] + 4 * i, res6);
1475 _mm256_storeu_pd(out[7] + 4 * i, res7);
1480 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1481 for (
unsigned int v = 0; v < 8; ++v)
1482 out[v][i] += in[i][v];
1484 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1485 for (
unsigned int v = 0; v < 8; ++v)
1486 out[v][i] = in[i][v];
1532 data = _mm512_set1_ps(x);
1543 return *(
reinterpret_cast<float *
>(&
data) + comp);
1550 const float &
operator[](
const unsigned int comp)
const
1553 return *(
reinterpret_cast<const float *
>(&
data) + comp);
1568 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1583 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1597 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1612 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1627 load(
const float *ptr)
1629 data = _mm512_loadu_ps(ptr);
1640 store(
float *ptr)
const
1642 _mm512_storeu_ps(ptr,
data);
1652 Assert(
reinterpret_cast<std::size_t
>(ptr) % 64 == 0,
1654 _mm512_stream_ps(ptr,
data);
1671 gather(
const float *base_ptr,
const unsigned int *offsets)
1676 const __m512 index_val =
1677 _mm512_loadu_ps(
reinterpret_cast<const float *
>(offsets));
1678 const __m512i index = *
reinterpret_cast<const __m512i *
>(&index_val);
1679 data = _mm512_i32gather_ps(index, base_ptr, 4);
1696 scatter(
const unsigned int *offsets,
float *base_ptr)
const
1698 for (
unsigned int i = 0; i < 16; ++i)
1699 for (
unsigned int j = i + 1; j < 16; ++j)
1700 Assert(offsets[i] != offsets[j],
1701 ExcMessage(
"Result of scatter undefined if two offset elements"
1702 " point to the same position"));
1707 const __m512 index_val =
1708 _mm512_loadu_ps(
reinterpret_cast<const float *
>(offsets));
1709 const __m512i index = *
reinterpret_cast<const __m512i *
>(&index_val);
1710 _mm512_i32scatter_ps(base_ptr, index,
data, 4);
1747 __m512 mask = _mm512_set1_ps(-0.f);
1749 res.
data =
reinterpret_cast<__m512
>(
1750 _mm512_andnot_epi32(
reinterpret_cast<__m512i
>(mask),
1751 reinterpret_cast<__m512i
>(
data)));
1782 template <
typename Number2, std::
size_t w
idth2>
1785 template <
typename Number2, std::
size_t w
idth2>
1788 template <
typename Number2, std::
size_t w
idth2>
1792 template <
typename Number2, std::
size_t w
idth2>
1807 const unsigned int * offsets,
1814 const unsigned int n_chunks = n_entries / 4;
1822 __m512 t0, t1, t2, t3;
1825 for (
unsigned int i = 0; i < n_chunks; ++i)
1827 t0 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[0] + 4 * i), 0);
1828 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[4] + 4 * i), 1);
1829 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[8] + 4 * i), 2);
1830 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[12] + 4 * i), 3);
1831 t1 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[1] + 4 * i), 0);
1832 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[5] + 4 * i), 1);
1833 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[9] + 4 * i), 2);
1834 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[13] + 4 * i), 3);
1835 t2 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[2] + 4 * i), 0);
1836 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[6] + 4 * i), 1);
1837 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[10] + 4 * i), 2);
1838 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[14] + 4 * i), 3);
1839 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[3] + 4 * i), 0);
1840 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[7] + 4 * i), 1);
1841 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[11] + 4 * i), 2);
1842 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[15] + 4 * i), 3);
1844 __m512 v0 = _mm512_shuffle_ps(t0, t1, 0x44);
1845 __m512 v1 = _mm512_shuffle_ps(t0, t1, 0xee);
1846 __m512 v2 = _mm512_shuffle_ps(t2, t3, 0x44);
1847 __m512 v3 = _mm512_shuffle_ps(t2, t3, 0xee);
1849 out[4 * i + 0].
data = _mm512_shuffle_ps(v0, v2, 0x88);
1850 out[4 * i + 1].
data = _mm512_shuffle_ps(v0, v2, 0xdd);
1851 out[4 * i + 2].
data = _mm512_shuffle_ps(v1, v3, 0x88);
1852 out[4 * i + 3].
data = _mm512_shuffle_ps(v1, v3, 0xdd);
1856 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1857 out[i].
gather(in + i, offsets);
1868 const std::array<float *, 16> &in,
1873 const unsigned int n_chunks = n_entries / 4;
1875 __m512 t0, t1, t2, t3;
1878 for (
unsigned int i = 0; i < n_chunks; ++i)
1880 t0 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[0] + 4 * i), 0);
1881 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[4] + 4 * i), 1);
1882 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[8] + 4 * i), 2);
1883 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[12] + 4 * i), 3);
1884 t1 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[1] + 4 * i), 0);
1885 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[5] + 4 * i), 1);
1886 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[9] + 4 * i), 2);
1887 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[13] + 4 * i), 3);
1888 t2 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[2] + 4 * i), 0);
1889 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[6] + 4 * i), 1);
1890 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[10] + 4 * i), 2);
1891 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[14] + 4 * i), 3);
1892 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[3] + 4 * i), 0);
1893 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[7] + 4 * i), 1);
1894 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[11] + 4 * i), 2);
1895 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[15] + 4 * i), 3);
1897 __m512 v0 = _mm512_shuffle_ps(t0, t1, 0x44);
1898 __m512 v1 = _mm512_shuffle_ps(t0, t1, 0xee);
1899 __m512 v2 = _mm512_shuffle_ps(t2, t3, 0x44);
1900 __m512 v3 = _mm512_shuffle_ps(t2, t3, 0xee);
1902 out[4 * i + 0].
data = _mm512_shuffle_ps(v0, v2, 0x88);
1903 out[4 * i + 1].
data = _mm512_shuffle_ps(v0, v2, 0xdd);
1904 out[4 * i + 2].
data = _mm512_shuffle_ps(v1, v3, 0x88);
1905 out[4 * i + 3].
data = _mm512_shuffle_ps(v1, v3, 0xdd);
1908 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1920 const unsigned int n_entries,
1922 const unsigned int * offsets,
1925 const unsigned int n_chunks = n_entries / 4;
1926 for (
unsigned int i = 0; i < n_chunks; ++i)
1928 __m512 t0 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0x44);
1929 __m512 t1 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0xee);
1931 _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0x44);
1933 _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0xee);
1934 __m512 u0 = _mm512_shuffle_ps(t0, t2, 0x88);
1935 __m512 u1 = _mm512_shuffle_ps(t0, t2, 0xdd);
1936 __m512 u2 = _mm512_shuffle_ps(t1, t3, 0x88);
1937 __m512 u3 = _mm512_shuffle_ps(t1, t3, 0xdd);
1939 __m128 res0 = _mm512_extractf32x4_ps(u0, 0);
1940 __m128 res4 = _mm512_extractf32x4_ps(u0, 1);
1941 __m128 res8 = _mm512_extractf32x4_ps(u0, 2);
1942 __m128 res12 = _mm512_extractf32x4_ps(u0, 3);
1943 __m128 res1 = _mm512_extractf32x4_ps(u1, 0);
1944 __m128 res5 = _mm512_extractf32x4_ps(u1, 1);
1945 __m128 res9 = _mm512_extractf32x4_ps(u1, 2);
1946 __m128 res13 = _mm512_extractf32x4_ps(u1, 3);
1947 __m128 res2 = _mm512_extractf32x4_ps(u2, 0);
1948 __m128 res6 = _mm512_extractf32x4_ps(u2, 1);
1949 __m128 res10 = _mm512_extractf32x4_ps(u2, 2);
1950 __m128 res14 = _mm512_extractf32x4_ps(u2, 3);
1951 __m128 res3 = _mm512_extractf32x4_ps(u3, 0);
1952 __m128 res7 = _mm512_extractf32x4_ps(u3, 1);
1953 __m128 res11 = _mm512_extractf32x4_ps(u3, 2);
1954 __m128 res15 = _mm512_extractf32x4_ps(u3, 3);
1961 res0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), res0);
1962 _mm_storeu_ps(out + 4 * i + offsets[0], res0);
1963 res1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), res1);
1964 _mm_storeu_ps(out + 4 * i + offsets[1], res1);
1965 res2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), res2);
1966 _mm_storeu_ps(out + 4 * i + offsets[2], res2);
1967 res3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), res3);
1968 _mm_storeu_ps(out + 4 * i + offsets[3], res3);
1969 res4 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[4]), res4);
1970 _mm_storeu_ps(out + 4 * i + offsets[4], res4);
1971 res5 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[5]), res5);
1972 _mm_storeu_ps(out + 4 * i + offsets[5], res5);
1973 res6 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[6]), res6);
1974 _mm_storeu_ps(out + 4 * i + offsets[6], res6);
1975 res7 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[7]), res7);
1976 _mm_storeu_ps(out + 4 * i + offsets[7], res7);
1977 res8 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[8]), res8);
1978 _mm_storeu_ps(out + 4 * i + offsets[8], res8);
1979 res9 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[9]), res9);
1980 _mm_storeu_ps(out + 4 * i + offsets[9], res9);
1981 res10 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[10]), res10);
1982 _mm_storeu_ps(out + 4 * i + offsets[10], res10);
1983 res11 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[11]), res11);
1984 _mm_storeu_ps(out + 4 * i + offsets[11], res11);
1985 res12 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[12]), res12);
1986 _mm_storeu_ps(out + 4 * i + offsets[12], res12);
1987 res13 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[13]), res13);
1988 _mm_storeu_ps(out + 4 * i + offsets[13], res13);
1989 res14 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[14]), res14);
1990 _mm_storeu_ps(out + 4 * i + offsets[14], res14);
1991 res15 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[15]), res15);
1992 _mm_storeu_ps(out + 4 * i + offsets[15], res15);
1996 _mm_storeu_ps(out + 4 * i + offsets[0], res0);
1997 _mm_storeu_ps(out + 4 * i + offsets[1], res1);
1998 _mm_storeu_ps(out + 4 * i + offsets[2], res2);
1999 _mm_storeu_ps(out + 4 * i + offsets[3], res3);
2000 _mm_storeu_ps(out + 4 * i + offsets[4], res4);
2001 _mm_storeu_ps(out + 4 * i + offsets[5], res5);
2002 _mm_storeu_ps(out + 4 * i + offsets[6], res6);
2003 _mm_storeu_ps(out + 4 * i + offsets[7], res7);
2004 _mm_storeu_ps(out + 4 * i + offsets[8], res8);
2005 _mm_storeu_ps(out + 4 * i + offsets[9], res9);
2006 _mm_storeu_ps(out + 4 * i + offsets[10], res10);
2007 _mm_storeu_ps(out + 4 * i + offsets[11], res11);
2008 _mm_storeu_ps(out + 4 * i + offsets[12], res12);
2009 _mm_storeu_ps(out + 4 * i + offsets[13], res13);
2010 _mm_storeu_ps(out + 4 * i + offsets[14], res14);
2011 _mm_storeu_ps(out + 4 * i + offsets[15], res15);
2017 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2018 for (
unsigned int v = 0; v < 16; ++v)
2019 out[offsets[v] + i] += in[i][v];
2021 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2022 for (
unsigned int v = 0; v < 16; ++v)
2023 out[offsets[v] + i] = in[i][v];
2034 const unsigned int n_entries,
2036 std::array<float *, 16> & out)
2040 const unsigned int n_chunks = n_entries / 4;
2041 for (
unsigned int i = 0; i < n_chunks; ++i)
2043 __m512 t0 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0x44);
2044 __m512 t1 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0xee);
2046 _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0x44);
2048 _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0xee);
2049 __m512 u0 = _mm512_shuffle_ps(t0, t2, 0x88);
2050 __m512 u1 = _mm512_shuffle_ps(t0, t2, 0xdd);
2051 __m512 u2 = _mm512_shuffle_ps(t1, t3, 0x88);
2052 __m512 u3 = _mm512_shuffle_ps(t1, t3, 0xdd);
2054 __m128 res0 = _mm512_extractf32x4_ps(u0, 0);
2055 __m128 res4 = _mm512_extractf32x4_ps(u0, 1);
2056 __m128 res8 = _mm512_extractf32x4_ps(u0, 2);
2057 __m128 res12 = _mm512_extractf32x4_ps(u0, 3);
2058 __m128 res1 = _mm512_extractf32x4_ps(u1, 0);
2059 __m128 res5 = _mm512_extractf32x4_ps(u1, 1);
2060 __m128 res9 = _mm512_extractf32x4_ps(u1, 2);
2061 __m128 res13 = _mm512_extractf32x4_ps(u1, 3);
2062 __m128 res2 = _mm512_extractf32x4_ps(u2, 0);
2063 __m128 res6 = _mm512_extractf32x4_ps(u2, 1);
2064 __m128 res10 = _mm512_extractf32x4_ps(u2, 2);
2065 __m128 res14 = _mm512_extractf32x4_ps(u2, 3);
2066 __m128 res3 = _mm512_extractf32x4_ps(u3, 0);
2067 __m128 res7 = _mm512_extractf32x4_ps(u3, 1);
2068 __m128 res11 = _mm512_extractf32x4_ps(u3, 2);
2069 __m128 res15 = _mm512_extractf32x4_ps(u3, 3);
2073 res0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), res0);
2074 _mm_storeu_ps(out[0] + 4 * i, res0);
2075 res1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), res1);
2076 _mm_storeu_ps(out[1] + 4 * i, res1);
2077 res2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), res2);
2078 _mm_storeu_ps(out[2] + 4 * i, res2);
2079 res3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), res3);
2080 _mm_storeu_ps(out[3] + 4 * i, res3);
2081 res4 = _mm_add_ps(_mm_loadu_ps(out[4] + 4 * i), res4);
2082 _mm_storeu_ps(out[4] + 4 * i, res4);
2083 res5 = _mm_add_ps(_mm_loadu_ps(out[5] + 4 * i), res5);
2084 _mm_storeu_ps(out[5] + 4 * i, res5);
2085 res6 = _mm_add_ps(_mm_loadu_ps(out[6] + 4 * i), res6);
2086 _mm_storeu_ps(out[6] + 4 * i, res6);
2087 res7 = _mm_add_ps(_mm_loadu_ps(out[7] + 4 * i), res7);
2088 _mm_storeu_ps(out[7] + 4 * i, res7);
2089 res8 = _mm_add_ps(_mm_loadu_ps(out[8] + 4 * i), res8);
2090 _mm_storeu_ps(out[8] + 4 * i, res8);
2091 res9 = _mm_add_ps(_mm_loadu_ps(out[9] + 4 * i), res9);
2092 _mm_storeu_ps(out[9] + 4 * i, res9);
2093 res10 = _mm_add_ps(_mm_loadu_ps(out[10] + 4 * i), res10);
2094 _mm_storeu_ps(out[10] + 4 * i, res10);
2095 res11 = _mm_add_ps(_mm_loadu_ps(out[11] + 4 * i), res11);
2096 _mm_storeu_ps(out[11] + 4 * i, res11);
2097 res12 = _mm_add_ps(_mm_loadu_ps(out[12] + 4 * i), res12);
2098 _mm_storeu_ps(out[12] + 4 * i, res12);
2099 res13 = _mm_add_ps(_mm_loadu_ps(out[13] + 4 * i), res13);
2100 _mm_storeu_ps(out[13] + 4 * i, res13);
2101 res14 = _mm_add_ps(_mm_loadu_ps(out[14] + 4 * i), res14);
2102 _mm_storeu_ps(out[14] + 4 * i, res14);
2103 res15 = _mm_add_ps(_mm_loadu_ps(out[15] + 4 * i), res15);
2104 _mm_storeu_ps(out[15] + 4 * i, res15);
2108 _mm_storeu_ps(out[0] + 4 * i, res0);
2109 _mm_storeu_ps(out[1] + 4 * i, res1);
2110 _mm_storeu_ps(out[2] + 4 * i, res2);
2111 _mm_storeu_ps(out[3] + 4 * i, res3);
2112 _mm_storeu_ps(out[4] + 4 * i, res4);
2113 _mm_storeu_ps(out[5] + 4 * i, res5);
2114 _mm_storeu_ps(out[6] + 4 * i, res6);
2115 _mm_storeu_ps(out[7] + 4 * i, res7);
2116 _mm_storeu_ps(out[8] + 4 * i, res8);
2117 _mm_storeu_ps(out[9] + 4 * i, res9);
2118 _mm_storeu_ps(out[10] + 4 * i, res10);
2119 _mm_storeu_ps(out[11] + 4 * i, res11);
2120 _mm_storeu_ps(out[12] + 4 * i, res12);
2121 _mm_storeu_ps(out[13] + 4 * i, res13);
2122 _mm_storeu_ps(out[14] + 4 * i, res14);
2123 _mm_storeu_ps(out[15] + 4 * i, res15);
2128 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2129 for (
unsigned int v = 0; v < 16; ++v)
2130 out[v][i] += in[i][v];
2132 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2133 for (
unsigned int v = 0; v < 16; ++v)
2134 out[v][i] = in[i][v];
2139 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
2182 data = _mm256_set1_pd(x);
2193 return *(
reinterpret_cast<double *
>(&
data) + comp);
2200 const double &
operator[](
const unsigned int comp)
const
2203 return *(
reinterpret_cast<const double *
>(&
data) + comp);
2218 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2233 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2247 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2262 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2277 load(
const double *ptr)
2279 data = _mm256_loadu_pd(ptr);
2290 store(
double *ptr)
const
2292 _mm256_storeu_pd(ptr,
data);
2302 Assert(
reinterpret_cast<std::size_t
>(ptr) % 32 == 0,
2304 _mm256_stream_pd(ptr,
data);
2321 gather(
const double *base_ptr,
const unsigned int *offsets)
2327 const __m128 index_val =
2328 _mm_loadu_ps(
reinterpret_cast<const float *
>(offsets));
2329 const __m128i index = *
reinterpret_cast<const __m128i *
>(&index_val);
2330 data = _mm256_i32gather_pd(base_ptr, index, 8);
2332 for (
unsigned int i = 0; i < 4; ++i)
2333 *(
reinterpret_cast<double *
>(&
data) + i) = base_ptr[offsets[i]];
2351 scatter(
const unsigned int *offsets,
double *base_ptr)
const
2354 for (
unsigned int i = 0; i < 4; ++i)
2355 base_ptr[offsets[i]] = *(
reinterpret_cast<const double *
>(&
data) + i);
2390 __m256d mask = _mm256_set1_pd(-0.);
2392 res.
data = _mm256_andnot_pd(mask,
data);
2423 template <
typename Number2, std::
size_t w
idth2>
2426 template <
typename Number2, std::
size_t w
idth2>
2429 template <
typename Number2, std::
size_t w
idth2>
2433 template <
typename Number2, std::
size_t w
idth2>
2448 const unsigned int * offsets,
2451 const unsigned int n_chunks = n_entries / 4;
2452 const double * in0 = in + offsets[0];
2453 const double * in1 = in + offsets[1];
2454 const double * in2 = in + offsets[2];
2455 const double * in3 = in + offsets[3];
2457 for (
unsigned int i = 0; i < n_chunks; ++i)
2459 __m256d u0 = _mm256_loadu_pd(in0 + 4 * i);
2460 __m256d u1 = _mm256_loadu_pd(in1 + 4 * i);
2461 __m256d u2 = _mm256_loadu_pd(in2 + 4 * i);
2462 __m256d u3 = _mm256_loadu_pd(in3 + 4 * i);
2463 __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2464 __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2465 __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2466 __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2467 out[4 * i + 0].
data = _mm256_unpacklo_pd(t0, t1);
2468 out[4 * i + 1].
data = _mm256_unpackhi_pd(t0, t1);
2469 out[4 * i + 2].
data = _mm256_unpacklo_pd(t2, t3);
2470 out[4 * i + 3].
data = _mm256_unpackhi_pd(t2, t3);
2474 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2475 out[i].
gather(in + i, offsets);
2486 const std::array<double *, 4> &in,
2491 const unsigned int n_chunks = n_entries / 4;
2492 const double * in0 = in[0];
2493 const double * in1 = in[1];
2494 const double * in2 = in[2];
2495 const double * in3 = in[3];
2497 for (
unsigned int i = 0; i < n_chunks; ++i)
2499 __m256d u0 = _mm256_loadu_pd(in0 + 4 * i);
2500 __m256d u1 = _mm256_loadu_pd(in1 + 4 * i);
2501 __m256d u2 = _mm256_loadu_pd(in2 + 4 * i);
2502 __m256d u3 = _mm256_loadu_pd(in3 + 4 * i);
2503 __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2504 __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2505 __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2506 __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2507 out[4 * i + 0].
data = _mm256_unpacklo_pd(t0, t1);
2508 out[4 * i + 1].
data = _mm256_unpackhi_pd(t0, t1);
2509 out[4 * i + 2].
data = _mm256_unpacklo_pd(t2, t3);
2510 out[4 * i + 3].
data = _mm256_unpackhi_pd(t2, t3);
2513 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2525 const unsigned int n_entries,
2527 const unsigned int * offsets,
2530 const unsigned int n_chunks = n_entries / 4;
2531 double * out0 = out + offsets[0];
2532 double * out1 = out + offsets[1];
2533 double * out2 = out + offsets[2];
2534 double * out3 = out + offsets[3];
2535 for (
unsigned int i = 0; i < n_chunks; ++i)
2537 __m256d u0 = in[4 * i + 0].
data;
2538 __m256d u1 = in[4 * i + 1].
data;
2539 __m256d u2 = in[4 * i + 2].
data;
2540 __m256d u3 = in[4 * i + 3].
data;
2541 __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2542 __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2543 __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2544 __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2545 __m256d res0 = _mm256_unpacklo_pd(t0, t1);
2546 __m256d res1 = _mm256_unpackhi_pd(t0, t1);
2547 __m256d res2 = _mm256_unpacklo_pd(t2, t3);
2548 __m256d res3 = _mm256_unpackhi_pd(t2, t3);
2555 res0 = _mm256_add_pd(_mm256_loadu_pd(out0 + 4 * i), res0);
2556 _mm256_storeu_pd(out0 + 4 * i, res0);
2557 res1 = _mm256_add_pd(_mm256_loadu_pd(out1 + 4 * i), res1);
2558 _mm256_storeu_pd(out1 + 4 * i, res1);
2559 res2 = _mm256_add_pd(_mm256_loadu_pd(out2 + 4 * i), res2);
2560 _mm256_storeu_pd(out2 + 4 * i, res2);
2561 res3 = _mm256_add_pd(_mm256_loadu_pd(out3 + 4 * i), res3);
2562 _mm256_storeu_pd(out3 + 4 * i, res3);
2566 _mm256_storeu_pd(out0 + 4 * i, res0);
2567 _mm256_storeu_pd(out1 + 4 * i, res1);
2568 _mm256_storeu_pd(out2 + 4 * i, res2);
2569 _mm256_storeu_pd(out3 + 4 * i, res3);
2575 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2576 for (
unsigned int v = 0; v < 4; ++v)
2577 out[offsets[v] + i] += in[i][v];
2579 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2580 for (
unsigned int v = 0; v < 4; ++v)
2581 out[offsets[v] + i] = in[i][v];
2592 const unsigned int n_entries,
2594 std::array<double *, 4> & out)
2598 const unsigned int n_chunks = n_entries / 4;
2599 double * out0 = out[0];
2600 double * out1 = out[1];
2601 double * out2 = out[2];
2602 double * out3 = out[3];
2603 for (
unsigned int i = 0; i < n_chunks; ++i)
2605 __m256d u0 = in[4 * i + 0].
data;
2606 __m256d u1 = in[4 * i + 1].
data;
2607 __m256d u2 = in[4 * i + 2].
data;
2608 __m256d u3 = in[4 * i + 3].
data;
2609 __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2610 __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2611 __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2612 __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2613 __m256d res0 = _mm256_unpacklo_pd(t0, t1);
2614 __m256d res1 = _mm256_unpackhi_pd(t0, t1);
2615 __m256d res2 = _mm256_unpacklo_pd(t2, t3);
2616 __m256d res3 = _mm256_unpackhi_pd(t2, t3);
2623 res0 = _mm256_add_pd(_mm256_loadu_pd(out0 + 4 * i), res0);
2624 _mm256_storeu_pd(out0 + 4 * i, res0);
2625 res1 = _mm256_add_pd(_mm256_loadu_pd(out1 + 4 * i), res1);
2626 _mm256_storeu_pd(out1 + 4 * i, res1);
2627 res2 = _mm256_add_pd(_mm256_loadu_pd(out2 + 4 * i), res2);
2628 _mm256_storeu_pd(out2 + 4 * i, res2);
2629 res3 = _mm256_add_pd(_mm256_loadu_pd(out3 + 4 * i), res3);
2630 _mm256_storeu_pd(out3 + 4 * i, res3);
2634 _mm256_storeu_pd(out0 + 4 * i, res0);
2635 _mm256_storeu_pd(out1 + 4 * i, res1);
2636 _mm256_storeu_pd(out2 + 4 * i, res2);
2637 _mm256_storeu_pd(out3 + 4 * i, res3);
2643 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2644 for (
unsigned int v = 0; v < 4; ++v)
2645 out[v][i] += in[i][v];
2647 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2648 for (
unsigned int v = 0; v < 4; ++v)
2649 out[v][i] = in[i][v];
2695 data = _mm256_set1_ps(x);
2706 return *(
reinterpret_cast<float *
>(&
data) + comp);
2713 const float &
operator[](
const unsigned int comp)
const
2716 return *(
reinterpret_cast<const float *
>(&
data) + comp);
2731 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2746 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2760 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2775 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2790 load(
const float *ptr)
2792 data = _mm256_loadu_ps(ptr);
2803 store(
float *ptr)
const
2805 _mm256_storeu_ps(ptr,
data);
2815 Assert(
reinterpret_cast<std::size_t
>(ptr) % 32 == 0,
2817 _mm256_stream_ps(ptr,
data);
2834 gather(
const float *base_ptr,
const unsigned int *offsets)
2840 const __m256 index_val =
2841 _mm256_loadu_ps(
reinterpret_cast<const float *
>(offsets));
2842 const __m256i index = *
reinterpret_cast<const __m256i *
>(&index_val);
2843 data = _mm256_i32gather_ps(base_ptr, index, 4);
2845 for (
unsigned int i = 0; i < 8; ++i)
2846 *(
reinterpret_cast<float *
>(&
data) + i) = base_ptr[offsets[i]];
2864 scatter(
const unsigned int *offsets,
float *base_ptr)
const
2867 for (
unsigned int i = 0; i < 8; ++i)
2868 base_ptr[offsets[i]] = *(
reinterpret_cast<const float *
>(&
data) + i);
2903 __m256 mask = _mm256_set1_ps(-0.f);
2905 res.
data = _mm256_andnot_ps(mask,
data);
2936 template <
typename Number2, std::
size_t w
idth2>
2939 template <
typename Number2, std::
size_t w
idth2>
2942 template <
typename Number2, std::
size_t w
idth2>
2946 template <
typename Number2, std::
size_t w
idth2>
2961 const unsigned int * offsets,
2964 const unsigned int n_chunks = n_entries / 4;
2965 for (
unsigned int i = 0; i < n_chunks; ++i)
2969 __m256 t0, t1, t2, t3 = {};
2970 t0 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[0]), 0);
2971 t0 = _mm256_insertf128_ps(t0, _mm_loadu_ps(in + 4 * i + offsets[4]), 1);
2972 t1 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[1]), 0);
2973 t1 = _mm256_insertf128_ps(t1, _mm_loadu_ps(in + 4 * i + offsets[5]), 1);
2974 t2 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[2]), 0);
2975 t2 = _mm256_insertf128_ps(t2, _mm_loadu_ps(in + 4 * i + offsets[6]), 1);
2976 t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[3]), 0);
2977 t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[7]), 1);
2979 __m256 v0 = _mm256_shuffle_ps(t0, t1, 0x44);
2980 __m256 v1 = _mm256_shuffle_ps(t0, t1, 0xee);
2981 __m256 v2 = _mm256_shuffle_ps(t2, t3, 0x44);
2982 __m256 v3 = _mm256_shuffle_ps(t2, t3, 0xee);
2983 out[4 * i + 0].
data = _mm256_shuffle_ps(v0, v2, 0x88);
2984 out[4 * i + 1].
data = _mm256_shuffle_ps(v0, v2, 0xdd);
2985 out[4 * i + 2].
data = _mm256_shuffle_ps(v1, v3, 0x88);
2986 out[4 * i + 3].
data = _mm256_shuffle_ps(v1, v3, 0xdd);
2990 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2991 out[i].
gather(in + i, offsets);
3002 const std::array<float *, 8> &in,
3007 const unsigned int n_chunks = n_entries / 4;
3008 for (
unsigned int i = 0; i < n_chunks; ++i)
3010 __m256 t0, t1, t2, t3 = {};
3011 t0 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[0] + 4 * i), 0);
3012 t0 = _mm256_insertf128_ps(t0, _mm_loadu_ps(in[4] + 4 * i), 1);
3013 t1 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[1] + 4 * i), 0);
3014 t1 = _mm256_insertf128_ps(t1, _mm_loadu_ps(in[5] + 4 * i), 1);
3015 t2 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[2] + 4 * i), 0);
3016 t2 = _mm256_insertf128_ps(t2, _mm_loadu_ps(in[6] + 4 * i), 1);
3017 t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[3] + 4 * i), 0);
3018 t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[7] + 4 * i), 1);
3020 __m256 v0 = _mm256_shuffle_ps(t0, t1, 0x44);
3021 __m256 v1 = _mm256_shuffle_ps(t0, t1, 0xee);
3022 __m256 v2 = _mm256_shuffle_ps(t2, t3, 0x44);
3023 __m256 v3 = _mm256_shuffle_ps(t2, t3, 0xee);
3024 out[4 * i + 0].
data = _mm256_shuffle_ps(v0, v2, 0x88);
3025 out[4 * i + 1].
data = _mm256_shuffle_ps(v0, v2, 0xdd);
3026 out[4 * i + 2].
data = _mm256_shuffle_ps(v1, v3, 0x88);
3027 out[4 * i + 3].
data = _mm256_shuffle_ps(v1, v3, 0xdd);
3030 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3042 const unsigned int n_entries,
3044 const unsigned int * offsets,
3047 const unsigned int n_chunks = n_entries / 4;
3048 for (
unsigned int i = 0; i < n_chunks; ++i)
3050 __m256 u0 = in[4 * i + 0].
data;
3051 __m256 u1 = in[4 * i + 1].
data;
3052 __m256 u2 = in[4 * i + 2].
data;
3053 __m256 u3 = in[4 * i + 3].
data;
3054 __m256 t0 = _mm256_shuffle_ps(u0, u1, 0x44);
3055 __m256 t1 = _mm256_shuffle_ps(u0, u1, 0xee);
3056 __m256 t2 = _mm256_shuffle_ps(u2, u3, 0x44);
3057 __m256 t3 = _mm256_shuffle_ps(u2, u3, 0xee);
3058 u0 = _mm256_shuffle_ps(t0, t2, 0x88);
3059 u1 = _mm256_shuffle_ps(t0, t2, 0xdd);
3060 u2 = _mm256_shuffle_ps(t1, t3, 0x88);
3061 u3 = _mm256_shuffle_ps(t1, t3, 0xdd);
3062 __m128 res0 = _mm256_extractf128_ps(u0, 0);
3063 __m128 res4 = _mm256_extractf128_ps(u0, 1);
3064 __m128 res1 = _mm256_extractf128_ps(u1, 0);
3065 __m128 res5 = _mm256_extractf128_ps(u1, 1);
3066 __m128 res2 = _mm256_extractf128_ps(u2, 0);
3067 __m128 res6 = _mm256_extractf128_ps(u2, 1);
3068 __m128 res3 = _mm256_extractf128_ps(u3, 0);
3069 __m128 res7 = _mm256_extractf128_ps(u3, 1);
3076 res0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), res0);
3077 _mm_storeu_ps(out + 4 * i + offsets[0], res0);
3078 res1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), res1);
3079 _mm_storeu_ps(out + 4 * i + offsets[1], res1);
3080 res2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), res2);
3081 _mm_storeu_ps(out + 4 * i + offsets[2], res2);
3082 res3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), res3);
3083 _mm_storeu_ps(out + 4 * i + offsets[3], res3);
3084 res4 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[4]), res4);
3085 _mm_storeu_ps(out + 4 * i + offsets[4], res4);
3086 res5 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[5]), res5);
3087 _mm_storeu_ps(out + 4 * i + offsets[5], res5);
3088 res6 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[6]), res6);
3089 _mm_storeu_ps(out + 4 * i + offsets[6], res6);
3090 res7 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[7]), res7);
3091 _mm_storeu_ps(out + 4 * i + offsets[7], res7);
3095 _mm_storeu_ps(out + 4 * i + offsets[0], res0);
3096 _mm_storeu_ps(out + 4 * i + offsets[1], res1);
3097 _mm_storeu_ps(out + 4 * i + offsets[2], res2);
3098 _mm_storeu_ps(out + 4 * i + offsets[3], res3);
3099 _mm_storeu_ps(out + 4 * i + offsets[4], res4);
3100 _mm_storeu_ps(out + 4 * i + offsets[5], res5);
3101 _mm_storeu_ps(out + 4 * i + offsets[6], res6);
3102 _mm_storeu_ps(out + 4 * i + offsets[7], res7);
3108 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3109 for (
unsigned int v = 0; v < 8; ++v)
3110 out[offsets[v] + i] += in[i][v];
3112 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3113 for (
unsigned int v = 0; v < 8; ++v)
3114 out[offsets[v] + i] = in[i][v];
3125 const unsigned int n_entries,
3127 std::array<float *, 8> & out)
3131 const unsigned int n_chunks = n_entries / 4;
3132 for (
unsigned int i = 0; i < n_chunks; ++i)
3134 __m256 u0 = in[4 * i + 0].
data;
3135 __m256 u1 = in[4 * i + 1].
data;
3136 __m256 u2 = in[4 * i + 2].
data;
3137 __m256 u3 = in[4 * i + 3].
data;
3138 __m256 t0 = _mm256_shuffle_ps(u0, u1, 0x44);
3139 __m256 t1 = _mm256_shuffle_ps(u0, u1, 0xee);
3140 __m256 t2 = _mm256_shuffle_ps(u2, u3, 0x44);
3141 __m256 t3 = _mm256_shuffle_ps(u2, u3, 0xee);
3142 u0 = _mm256_shuffle_ps(t0, t2, 0x88);
3143 u1 = _mm256_shuffle_ps(t0, t2, 0xdd);
3144 u2 = _mm256_shuffle_ps(t1, t3, 0x88);
3145 u3 = _mm256_shuffle_ps(t1, t3, 0xdd);
3146 __m128 res0 = _mm256_extractf128_ps(u0, 0);
3147 __m128 res4 = _mm256_extractf128_ps(u0, 1);
3148 __m128 res1 = _mm256_extractf128_ps(u1, 0);
3149 __m128 res5 = _mm256_extractf128_ps(u1, 1);
3150 __m128 res2 = _mm256_extractf128_ps(u2, 0);
3151 __m128 res6 = _mm256_extractf128_ps(u2, 1);
3152 __m128 res3 = _mm256_extractf128_ps(u3, 0);
3153 __m128 res7 = _mm256_extractf128_ps(u3, 1);
3157 res0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), res0);
3158 _mm_storeu_ps(out[0] + 4 * i, res0);
3159 res1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), res1);
3160 _mm_storeu_ps(out[1] + 4 * i, res1);
3161 res2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), res2);
3162 _mm_storeu_ps(out[2] + 4 * i, res2);
3163 res3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), res3);
3164 _mm_storeu_ps(out[3] + 4 * i, res3);
3165 res4 = _mm_add_ps(_mm_loadu_ps(out[4] + 4 * i), res4);
3166 _mm_storeu_ps(out[4] + 4 * i, res4);
3167 res5 = _mm_add_ps(_mm_loadu_ps(out[5] + 4 * i), res5);
3168 _mm_storeu_ps(out[5] + 4 * i, res5);
3169 res6 = _mm_add_ps(_mm_loadu_ps(out[6] + 4 * i), res6);
3170 _mm_storeu_ps(out[6] + 4 * i, res6);
3171 res7 = _mm_add_ps(_mm_loadu_ps(out[7] + 4 * i), res7);
3172 _mm_storeu_ps(out[7] + 4 * i, res7);
3176 _mm_storeu_ps(out[0] + 4 * i, res0);
3177 _mm_storeu_ps(out[1] + 4 * i, res1);
3178 _mm_storeu_ps(out[2] + 4 * i, res2);
3179 _mm_storeu_ps(out[3] + 4 * i, res3);
3180 _mm_storeu_ps(out[4] + 4 * i, res4);
3181 _mm_storeu_ps(out[5] + 4 * i, res5);
3182 _mm_storeu_ps(out[6] + 4 * i, res6);
3183 _mm_storeu_ps(out[7] + 4 * i, res7);
3188 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3189 for (
unsigned int v = 0; v < 8; ++v)
3190 out[v][i] += in[i][v];
3192 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3193 for (
unsigned int v = 0; v < 8; ++v)
3194 out[v][i] = in[i][v];
3199 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__SSE2__)
3242 data = _mm_set1_pd(x);
3253 return *(
reinterpret_cast<double *
>(&
data) + comp);
3260 const double &
operator[](
const unsigned int comp)
const
3263 return *(
reinterpret_cast<const double *
>(&
data) + comp);
3273 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3288 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3303 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3318 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3333 load(
const double *ptr)
3335 data = _mm_loadu_pd(ptr);
3346 store(
double *ptr)
const
3348 _mm_storeu_pd(ptr,
data);
3358 Assert(
reinterpret_cast<std::size_t
>(ptr) % 16 == 0,
3360 _mm_stream_pd(ptr,
data);
3377 gather(
const double *base_ptr,
const unsigned int *offsets)
3379 for (
unsigned int i = 0; i < 2; ++i)
3380 *(
reinterpret_cast<double *
>(&
data) + i) = base_ptr[offsets[i]];
3397 scatter(
const unsigned int *offsets,
double *base_ptr)
const
3399 for (
unsigned int i = 0; i < 2; ++i)
3400 base_ptr[offsets[i]] = *(
reinterpret_cast<const double *
>(&
data) + i);
3436 __m128d mask = _mm_set1_pd(-0.);
3438 res.
data = _mm_andnot_pd(mask,
data);
3469 template <
typename Number2, std::
size_t w
idth2>
3472 template <
typename Number2, std::
size_t w
idth2>
3475 template <
typename Number2, std::
size_t w
idth2>
3479 template <
typename Number2, std::
size_t w
idth2>
3494 const unsigned int * offsets,
3497 const unsigned int n_chunks = n_entries / 2;
3498 for (
unsigned int i = 0; i < n_chunks; ++i)
3500 __m128d u0 = _mm_loadu_pd(in + 2 * i + offsets[0]);
3501 __m128d u1 = _mm_loadu_pd(in + 2 * i + offsets[1]);
3502 out[2 * i + 0].
data = _mm_unpacklo_pd(u0, u1);
3503 out[2 * i + 1].
data = _mm_unpackhi_pd(u0, u1);
3507 for (
unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3508 for (
unsigned int v = 0; v < 2; ++v)
3509 out[i][v] = in[offsets[v] + i];
3520 const std::array<double *, 2> &in,
3525 const unsigned int n_chunks = n_entries / 2;
3526 for (
unsigned int i = 0; i < n_chunks; ++i)
3528 __m128d u0 = _mm_loadu_pd(in[0] + 2 * i);
3529 __m128d u1 = _mm_loadu_pd(in[1] + 2 * i);
3530 out[2 * i + 0].
data = _mm_unpacklo_pd(u0, u1);
3531 out[2 * i + 1].
data = _mm_unpackhi_pd(u0, u1);
3534 for (
unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3535 for (
unsigned int v = 0; v < 2; ++v)
3536 out[i][v] = in[v][i];
3547 const unsigned int n_entries,
3549 const unsigned int * offsets,
3552 const unsigned int n_chunks = n_entries / 2;
3555 for (
unsigned int i = 0; i < n_chunks; ++i)
3557 __m128d u0 = in[2 * i + 0].
data;
3558 __m128d u1 = in[2 * i + 1].
data;
3559 __m128d res0 = _mm_unpacklo_pd(u0, u1);
3560 __m128d res1 = _mm_unpackhi_pd(u0, u1);
3561 _mm_storeu_pd(out + 2 * i + offsets[0],
3562 _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[0]),
3564 _mm_storeu_pd(out + 2 * i + offsets[1],
3565 _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[1]),
3569 for (
unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3570 for (
unsigned int v = 0; v < 2; ++v)
3571 out[offsets[v] + i] += in[i][v];
3575 for (
unsigned int i = 0; i < n_chunks; ++i)
3577 __m128d u0 = in[2 * i + 0].
data;
3578 __m128d u1 = in[2 * i + 1].
data;
3579 __m128d res0 = _mm_unpacklo_pd(u0, u1);
3580 __m128d res1 = _mm_unpackhi_pd(u0, u1);
3581 _mm_storeu_pd(out + 2 * i + offsets[0], res0);
3582 _mm_storeu_pd(out + 2 * i + offsets[1], res1);
3585 for (
unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3586 for (
unsigned int v = 0; v < 2; ++v)
3587 out[offsets[v] + i] = in[i][v];
3599 const unsigned int n_entries,
3601 std::array<double *, 2> & out)
3605 const unsigned int n_chunks = n_entries / 2;
3608 for (
unsigned int i = 0; i < n_chunks; ++i)
3610 __m128d u0 = in[2 * i + 0].
data;
3611 __m128d u1 = in[2 * i + 1].
data;
3612 __m128d res0 = _mm_unpacklo_pd(u0, u1);
3613 __m128d res1 = _mm_unpackhi_pd(u0, u1);
3614 _mm_storeu_pd(out[0] + 2 * i,
3615 _mm_add_pd(_mm_loadu_pd(out[0] + 2 * i), res0));
3616 _mm_storeu_pd(out[1] + 2 * i,
3617 _mm_add_pd(_mm_loadu_pd(out[1] + 2 * i), res1));
3620 for (
unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3621 for (
unsigned int v = 0; v < 2; ++v)
3622 out[v][i] += in[i][v];
3626 for (
unsigned int i = 0; i < n_chunks; ++i)
3628 __m128d u0 = in[2 * i + 0].
data;
3629 __m128d u1 = in[2 * i + 1].
data;
3630 __m128d res0 = _mm_unpacklo_pd(u0, u1);
3631 __m128d res1 = _mm_unpackhi_pd(u0, u1);
3632 _mm_storeu_pd(out[0] + 2 * i, res0);
3633 _mm_storeu_pd(out[1] + 2 * i, res1);
3636 for (
unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3637 for (
unsigned int v = 0; v < 2; ++v)
3638 out[v][i] = in[i][v];
3686 data = _mm_set1_ps(x);
3697 return *(
reinterpret_cast<float *
>(&
data) + comp);
3704 const float &
operator[](
const unsigned int comp)
const
3707 return *(
reinterpret_cast<const float *
>(&
data) + comp);
3717 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3732 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3747 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3762 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3777 load(
const float *ptr)
3779 data = _mm_loadu_ps(ptr);
3790 store(
float *ptr)
const
3792 _mm_storeu_ps(ptr,
data);
3802 Assert(
reinterpret_cast<std::size_t
>(ptr) % 16 == 0,
3804 _mm_stream_ps(ptr,
data);
3821 gather(
const float *base_ptr,
const unsigned int *offsets)
3823 for (
unsigned int i = 0; i < 4; ++i)
3824 *(
reinterpret_cast<float *
>(&
data) + i) = base_ptr[offsets[i]];
3841 scatter(
const unsigned int *offsets,
float *base_ptr)
const
3843 for (
unsigned int i = 0; i < 4; ++i)
3844 base_ptr[offsets[i]] = *(
reinterpret_cast<const float *
>(&
data) + i);
3879 __m128 mask = _mm_set1_ps(-0.f);
3881 res.
data = _mm_andnot_ps(mask,
data);
3912 template <
typename Number2, std::
size_t w
idth2>
3915 template <
typename Number2, std::
size_t w
idth2>
3918 template <
typename Number2, std::
size_t w
idth2>
3922 template <
typename Number2, std::
size_t w
idth2>
3937 const unsigned int * offsets,
3940 const unsigned int n_chunks = n_entries / 4;
3941 for (
unsigned int i = 0; i < n_chunks; ++i)
3943 __m128 u0 = _mm_loadu_ps(in + 4 * i + offsets[0]);
3944 __m128 u1 = _mm_loadu_ps(in + 4 * i + offsets[1]);
3945 __m128 u2 = _mm_loadu_ps(in + 4 * i + offsets[2]);
3946 __m128 u3 = _mm_loadu_ps(in + 4 * i + offsets[3]);
3947 __m128 v0 = _mm_shuffle_ps(u0, u1, 0x44);
3948 __m128 v1 = _mm_shuffle_ps(u0, u1, 0xee);
3949 __m128 v2 = _mm_shuffle_ps(u2, u3, 0x44);
3950 __m128 v3 = _mm_shuffle_ps(u2, u3, 0xee);
3951 out[4 * i + 0].
data = _mm_shuffle_ps(v0, v2, 0x88);
3952 out[4 * i + 1].
data = _mm_shuffle_ps(v0, v2, 0xdd);
3953 out[4 * i + 2].
data = _mm_shuffle_ps(v1, v3, 0x88);
3954 out[4 * i + 3].
data = _mm_shuffle_ps(v1, v3, 0xdd);
3958 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3959 for (
unsigned int v = 0; v < 4; ++v)
3960 out[i][v] = in[offsets[v] + i];
3971 const std::array<float *, 4> &in,
3976 const unsigned int n_chunks = n_entries / 4;
3977 for (
unsigned int i = 0; i < n_chunks; ++i)
3979 __m128 u0 = _mm_loadu_ps(in[0] + 4 * i);
3980 __m128 u1 = _mm_loadu_ps(in[1] + 4 * i);
3981 __m128 u2 = _mm_loadu_ps(in[2] + 4 * i);
3982 __m128 u3 = _mm_loadu_ps(in[3] + 4 * i);
3983 __m128 v0 = _mm_shuffle_ps(u0, u1, 0x44);
3984 __m128 v1 = _mm_shuffle_ps(u0, u1, 0xee);
3985 __m128 v2 = _mm_shuffle_ps(u2, u3, 0x44);
3986 __m128 v3 = _mm_shuffle_ps(u2, u3, 0xee);
3987 out[4 * i + 0].
data = _mm_shuffle_ps(v0, v2, 0x88);
3988 out[4 * i + 1].
data = _mm_shuffle_ps(v0, v2, 0xdd);
3989 out[4 * i + 2].
data = _mm_shuffle_ps(v1, v3, 0x88);
3990 out[4 * i + 3].
data = _mm_shuffle_ps(v1, v3, 0xdd);
3993 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3994 for (
unsigned int v = 0; v < 4; ++v)
3995 out[i][v] = in[v][i];
4006 const unsigned int n_entries,
4008 const unsigned int * offsets,
4011 const unsigned int n_chunks = n_entries / 4;
4012 for (
unsigned int i = 0; i < n_chunks; ++i)
4014 __m128 u0 = in[4 * i + 0].
data;
4015 __m128 u1 = in[4 * i + 1].
data;
4016 __m128 u2 = in[4 * i + 2].
data;
4017 __m128 u3 = in[4 * i + 3].
data;
4018 __m128 t0 = _mm_shuffle_ps(u0, u1, 0x44);
4019 __m128 t1 = _mm_shuffle_ps(u0, u1, 0xee);
4020 __m128 t2 = _mm_shuffle_ps(u2, u3, 0x44);
4021 __m128 t3 = _mm_shuffle_ps(u2, u3, 0xee);
4022 u0 = _mm_shuffle_ps(t0, t2, 0x88);
4023 u1 = _mm_shuffle_ps(t0, t2, 0xdd);
4024 u2 = _mm_shuffle_ps(t1, t3, 0x88);
4025 u3 = _mm_shuffle_ps(t1, t3, 0xdd);
4032 u0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), u0);
4033 _mm_storeu_ps(out + 4 * i + offsets[0], u0);
4034 u1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), u1);
4035 _mm_storeu_ps(out + 4 * i + offsets[1], u1);
4036 u2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), u2);
4037 _mm_storeu_ps(out + 4 * i + offsets[2], u2);
4038 u3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), u3);
4039 _mm_storeu_ps(out + 4 * i + offsets[3], u3);
4043 _mm_storeu_ps(out + 4 * i + offsets[0], u0);
4044 _mm_storeu_ps(out + 4 * i + offsets[1], u1);
4045 _mm_storeu_ps(out + 4 * i + offsets[2], u2);
4046 _mm_storeu_ps(out + 4 * i + offsets[3], u3);
4052 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4053 for (
unsigned int v = 0; v < 4; ++v)
4054 out[offsets[v] + i] += in[i][v];
4056 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4057 for (
unsigned int v = 0; v < 4; ++v)
4058 out[offsets[v] + i] = in[i][v];
4069 const unsigned int n_entries,
4071 std::array<float *, 4> & out)
4075 const unsigned int n_chunks = n_entries / 4;
4076 for (
unsigned int i = 0; i < n_chunks; ++i)
4078 __m128 u0 = in[4 * i + 0].
data;
4079 __m128 u1 = in[4 * i + 1].
data;
4080 __m128 u2 = in[4 * i + 2].
data;
4081 __m128 u3 = in[4 * i + 3].
data;
4082 __m128 t0 = _mm_shuffle_ps(u0, u1, 0x44);
4083 __m128 t1 = _mm_shuffle_ps(u0, u1, 0xee);
4084 __m128 t2 = _mm_shuffle_ps(u2, u3, 0x44);
4085 __m128 t3 = _mm_shuffle_ps(u2, u3, 0xee);
4086 u0 = _mm_shuffle_ps(t0, t2, 0x88);
4087 u1 = _mm_shuffle_ps(t0, t2, 0xdd);
4088 u2 = _mm_shuffle_ps(t1, t3, 0x88);
4089 u3 = _mm_shuffle_ps(t1, t3, 0xdd);
4093 u0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), u0);
4094 _mm_storeu_ps(out[0] + 4 * i, u0);
4095 u1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), u1);
4096 _mm_storeu_ps(out[1] + 4 * i, u1);
4097 u2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), u2);
4098 _mm_storeu_ps(out[2] + 4 * i, u2);
4099 u3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), u3);
4100 _mm_storeu_ps(out[3] + 4 * i, u3);
4104 _mm_storeu_ps(out[0] + 4 * i, u0);
4105 _mm_storeu_ps(out[1] + 4 * i, u1);
4106 _mm_storeu_ps(out[2] + 4 * i, u2);
4107 _mm_storeu_ps(out[3] + 4 * i, u3);
4112 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4113 for (
unsigned int v = 0; v < 4; ++v)
4114 out[v][i] += in[i][v];
4116 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4117 for (
unsigned int v = 0; v < 4; ++v)
4118 out[v][i] = in[i][v];
4123 # endif // if DEAL_II_VECTORIZATION_WIDTH_IN_BITS > 0 && defined(__SSE2__)
4125 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__ALTIVEC__) && \
4166 data = vec_splats(x);
4182 return *(
reinterpret_cast<double *
>(&
data) + comp);
4189 const double &
operator[](
const unsigned int comp)
const
4192 return *(
reinterpret_cast<const double *
>(&
data) + comp);
4245 load(
const double *ptr)
4247 data = vec_vsx_ld(0, ptr);
4256 store(
double *ptr)
const
4258 vec_vsx_st(
data, 0, ptr);
4274 gather(
const double *base_ptr,
const unsigned int *offsets)
4276 for (
unsigned int i = 0; i < 2; ++i)
4277 *(
reinterpret_cast<double *
>(&
data) + i) = base_ptr[offsets[i]];
4284 scatter(
const unsigned int *offsets,
double *base_ptr)
const
4286 for (
unsigned int i = 0; i < 2; ++i)
4287 base_ptr[offsets[i]] = *(
reinterpret_cast<const double *
>(&
data) + i);
4295 __vector
double data;
4351 template <
typename Number2, std::
size_t w
idth2>
4354 template <
typename Number2, std::
size_t w
idth2>
4357 template <
typename Number2, std::
size_t w
idth2>
4361 template <
typename Number2, std::
size_t w
idth2>
4407 data = vec_splats(x);
4423 return *(
reinterpret_cast<float *
>(&
data) + comp);
4430 const float &
operator[](
const unsigned int comp)
const
4433 return *(
reinterpret_cast<const float *
>(&
data) + comp);
4486 load(
const float *ptr)
4488 data = vec_vsx_ld(0, ptr);
4497 store(
float *ptr)
const
4499 vec_vsx_st(
data, 0, ptr);
4515 gather(
const float *base_ptr,
const unsigned int *offsets)
4517 for (
unsigned int i = 0; i < 4; ++i)
4518 *(
reinterpret_cast<float *
>(&
data) + i) = base_ptr[offsets[i]];
4525 scatter(
const unsigned int *offsets,
float *base_ptr)
const
4527 for (
unsigned int i = 0; i < 4; ++i)
4528 base_ptr[offsets[i]] = *(
reinterpret_cast<const float *
>(&
data) + i);
4536 __vector
float data;
4592 template <
typename Number2, std::
size_t w
idth2>
4595 template <
typename Number2, std::
size_t w
idth2>
4598 template <
typename Number2, std::
size_t w
idth2>
4602 template <
typename Number2, std::
size_t w
idth2>
4608 # endif // if DEAL_II_VECTORIZATION_LEVEL >=1 && defined(__ALTIVEC__) &&
4624 template <
typename Number, std::
size_t w
idth>
4629 for (
unsigned int i = 0; i < VectorizedArray<Number, width>::size(); ++i)
4630 if (lhs[i] != rhs[i])
4642 template <
typename Number, std::
size_t w
idth>
4656 template <
typename Number, std::
size_t w
idth>
4670 template <
typename Number, std::
size_t w
idth>
4684 template <
typename Number, std::
size_t w
idth>
4699 template <
typename Number, std::
size_t w
idth>
4715 template <std::
size_t w
idth>
4729 template <
typename Number, std::
size_t w
idth>
4744 template <std::
size_t w
idth>
4757 template <
typename Number, std::
size_t w
idth>
4773 template <std::
size_t w
idth>
4787 template <
typename Number, std::
size_t w
idth>
4803 template <std::
size_t w
idth>
4817 template <
typename Number, std::
size_t w
idth>
4833 template <std::
size_t w
idth>
4847 template <
typename Number, std::
size_t w
idth>
4862 template <std::
size_t w
idth>
4875 template <
typename Number, std::
size_t w
idth>
4891 template <std::
size_t w
idth>
4905 template <
typename Number, std::
size_t w
idth>
4921 template <std::
size_t w
idth>
4934 template <
typename Number, std::
size_t w
idth>
4946 template <
typename Number, std::
size_t w
idth>
4960 template <
typename Number, std::
size_t w
idth>
4961 inline std::ostream &
4965 for (
unsigned int i = 0; i < n - 1; ++i)
4989 #if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
5070 template <SIMDComparison predicate,
typename Number>
5073 const Number &right,
5074 const Number &true_value,
5075 const Number &false_value)
5081 mask = (left == right);
5084 mask = (left != right);
5087 mask = (left < right);
5090 mask = (left <= right);
5093 mask = (left > right);
5096 mask = (left >= right);
5100 return mask ? true_value : false_value;
5108 template <SIMDComparison predicate,
typename Number>
5116 result.
data = compare_and_apply_mask<predicate, Number>(left.
data,
5126 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && defined(__AVX512F__)
5128 template <SIMDComparison predicate>
5135 const __mmask16 mask =
5136 _mm512_cmp_ps_mask(left.
data, right.
data,
static_cast<int>(predicate));
5138 result.
data = _mm512_mask_mov_ps(false_values.
data, mask, true_values.
data);
5144 template <SIMDComparison predicate>
5151 const __mmask16 mask =
5152 _mm512_cmp_pd_mask(left.
data, right.
data,
static_cast<int>(predicate));
5154 result.
data = _mm512_mask_mov_pd(false_values.
data, mask, true_values.
data);
5160 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
5162 template <SIMDComparison predicate>
5170 _mm256_cmp_ps(left.
data, right.
data,
static_cast<int>(predicate));
5173 result.
data = _mm256_or_ps(_mm256_and_ps(mask, true_values.
data),
5174 _mm256_andnot_ps(mask, false_values.
data));
5179 template <SIMDComparison predicate>
5187 _mm256_cmp_pd(left.
data, right.
data,
static_cast<int>(predicate));
5190 result.
data = _mm256_or_pd(_mm256_and_pd(mask, true_values.
data),
5191 _mm256_andnot_pd(mask, false_values.
data));
5197 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__SSE2__)
5199 template <SIMDComparison predicate>
5210 mask = _mm_cmpeq_ps(left.
data, right.
data);
5213 mask = _mm_cmpneq_ps(left.
data, right.
data);
5216 mask = _mm_cmplt_ps(left.
data, right.
data);
5219 mask = _mm_cmple_ps(left.
data, right.
data);
5222 mask = _mm_cmpgt_ps(left.
data, right.
data);
5225 mask = _mm_cmpge_ps(left.
data, right.
data);
5230 result.
data = _mm_or_ps(_mm_and_ps(mask, true_values.
data),
5231 _mm_andnot_ps(mask, false_values.
data));
5237 template <SIMDComparison predicate>
5248 mask = _mm_cmpeq_pd(left.
data, right.
data);
5251 mask = _mm_cmpneq_pd(left.
data, right.
data);
5254 mask = _mm_cmplt_pd(left.
data, right.
data);
5257 mask = _mm_cmple_pd(left.
data, right.
data);
5260 mask = _mm_cmpgt_pd(left.
data, right.
data);
5263 mask = _mm_cmpge_pd(left.
data, right.
data);
5268 result.
data = _mm_or_pd(_mm_and_pd(mask, true_values.
data),
5269 _mm_andnot_pd(mask, false_values.
data));
5295 template <
typename Number, std::
size_t w
idth>
5296 inline ::VectorizedArray<Number, width>
5297 sin(const ::VectorizedArray<Number, width> &x)
5305 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5309 out.
load(&values[0]);
5322 template <
typename Number, std::
size_t w
idth>
5323 inline ::VectorizedArray<Number, width>
5324 cos(const ::VectorizedArray<Number, width> &x)
5327 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5331 out.
load(&values[0]);
5344 template <
typename Number, std::
size_t w
idth>
5345 inline ::VectorizedArray<Number, width>
5346 tan(const ::VectorizedArray<Number, width> &x)
5349 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5353 out.
load(&values[0]);
5366 template <
typename Number, std::
size_t w
idth>
5367 inline ::VectorizedArray<Number, width>
5368 exp(const ::VectorizedArray<Number, width> &x)
5371 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5375 out.
load(&values[0]);
5388 template <
typename Number, std::
size_t w
idth>
5389 inline ::VectorizedArray<Number, width>
5390 log(const ::VectorizedArray<Number, width> &x)
5393 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5397 out.
load(&values[0]);
5410 template <
typename Number, std::
size_t w
idth>
5411 inline ::VectorizedArray<Number, width>
5412 sqrt(const ::VectorizedArray<Number, width> &x)
5414 return x.get_sqrt();
5426 template <
typename Number, std::
size_t w
idth>
5427 inline ::VectorizedArray<Number, width>
5428 pow(const ::VectorizedArray<Number, width> &x,
const Number p)
5431 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5435 out.
load(&values[0]);
5448 template <
typename Number, std::
size_t w
idth>
5449 inline ::VectorizedArray<Number, width>
5450 abs(const ::VectorizedArray<Number, width> &x)
5464 template <
typename Number, std::
size_t w
idth>
5465 inline ::VectorizedArray<Number, width>
5466 max(const ::VectorizedArray<Number, width> &x,
5467 const ::VectorizedArray<Number, width> &y)
5469 return x.get_max(y);
5481 template <
typename Number, std::
size_t w
idth>
5482 inline ::VectorizedArray<Number, width>
5483 min(const ::VectorizedArray<Number, width> &x,
5484 const ::VectorizedArray<Number, width> &y)
5486 return x.get_min(y);