17#ifndef dealii_vectorization_h
18#define dealii_vectorization_h
44#if DEAL_II_VECTORIZATION_WIDTH_IN_BITS > 0
53# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && !defined(__AVX__)
55 "Mismatch in vectorization capabilities: AVX was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native."
57# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && !defined(__AVX512F__)
59 "Mismatch in vectorization capabilities: AVX-512F was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native."
64# elif defined(__ALTIVEC__)
73# include <x86intrin.h>
85template <
typename Number, std::
size_t w
idth>
119 "You are trying to compare iterators into different arrays."));
131 "You are trying to compare iterators into different arrays."));
156 template <
typename U = T>
157 typename std::enable_if<!std::is_same<U, const U>::value,
158 typename T::value_type>::type &
201 "You can't decrement an iterator that is already at the beginning of the range."));
222 return static_cast<std::ptrdiff_t
>(
lane) -
223 static_cast<ptrdiff_t
>(other.
lane);
249template <
typename T, std::
size_t w
idth>
256 static constexpr std::size_t
388template <
typename Number, std::
size_t w
idth>
407 static_assert(width == 1,
408 "You specified an illegal width that is not supported.");
596 gather(
const Number *base_ptr,
const unsigned int *offsets)
598 data = base_ptr[offsets[0]];
615 scatter(
const unsigned int *offsets, Number *base_ptr)
const
617 base_ptr[offsets[0]] =
data;
681 template <
typename Number2, std::
size_t w
idth2>
684 template <
typename Number2, std::
size_t w
idth2>
687 template <
typename Number2, std::
size_t w
idth2>
691 template <
typename Number2, std::
size_t w
idth2>
700template <
typename Number, std::
size_t w
idth>
717template <
typename Number,
735template <
typename VectorizedArrayType>
740 std::is_same<VectorizedArrayType,
742 VectorizedArrayType::size()>>::value,
743 "VectorizedArrayType is not a VectorizedArray.");
745 VectorizedArrayType result = u;
762template <
typename Number, std::
size_t w
idth>
765 const std::array<Number *, width> &ptrs,
766 const unsigned int offset)
768 for (
unsigned int v = 0; v < width; v++)
769 out.
data[v] = ptrs[v][offset];
799template <
typename Number, std::
size_t w
idth>
803 const unsigned int * offsets,
806 for (
unsigned int i = 0; i < n_entries; ++i)
807 for (
unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
808 out[i][v] = in[offsets[v] + i];
823template <
typename Number, std::
size_t w
idth>
826 const std::array<Number *, width> &in,
829 for (
unsigned int i = 0; i < n_entries; ++i)
830 for (
unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
831 out[i][v] = in[v][i];
874template <
typename Number, std::
size_t w
idth>
877 const unsigned int n_entries,
879 const unsigned int * offsets,
883 for (
unsigned int i = 0; i < n_entries; ++i)
884 for (
unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
885 out[offsets[v] + i] += in[i][v];
887 for (
unsigned int i = 0; i < n_entries; ++i)
888 for (
unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
889 out[offsets[v] + i] = in[i][v];
904template <
typename Number, std::
size_t w
idth>
907 const unsigned int n_entries,
909 std::array<Number *, width> & out)
912 for (
unsigned int i = 0; i < n_entries; ++i)
913 for (
unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
914 out[v][i] += in[i][v];
916 for (
unsigned int i = 0; i < n_entries; ++i)
917 for (
unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
918 out[v][i] = in[i][v];
929# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && defined(__AVX512F__)
972 data = _mm512_set1_pd(x);
983 return *(
reinterpret_cast<double *
>(&
data) + comp);
990 const double &
operator[](
const unsigned int comp)
const
993 return *(
reinterpret_cast<const double *
>(&
data) + comp);
1008# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1023# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1037# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1052# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1067 load(
const double *ptr)
1069 data = _mm512_loadu_pd(ptr);
1080 store(
double *ptr)
const
1082 _mm512_storeu_pd(ptr,
data);
1092 Assert(
reinterpret_cast<std::size_t
>(ptr) % 64 == 0,
1094 _mm512_stream_pd(ptr,
data);
1111 gather(
const double *base_ptr,
const unsigned int *offsets)
1116 const __m256 index_val =
1117 _mm256_loadu_ps(
reinterpret_cast<const float *
>(offsets));
1118 const __m256i index = *
reinterpret_cast<const __m256i *
>(&index_val);
1119 data = _mm512_i32gather_pd(index, base_ptr, 8);
1136 scatter(
const unsigned int *offsets,
double *base_ptr)
const
1138 for (
unsigned int i = 0; i < 8; ++i)
1139 for (
unsigned int j = i + 1; j < 8; ++j)
1140 Assert(offsets[i] != offsets[j],
1141 ExcMessage(
"Result of scatter undefined if two offset elements"
1142 " point to the same position"));
1147 const __m256 index_val =
1148 _mm256_loadu_ps(
reinterpret_cast<const float *
>(offsets));
1149 const __m256i index = *
reinterpret_cast<const __m256i *
>(&index_val);
1150 _mm512_i32scatter_pd(base_ptr, index,
data, 8);
1187 __m512d mask = _mm512_set1_pd(-0.);
1189 res.
data =
reinterpret_cast<__m512d
>(
1190 _mm512_andnot_epi64(
reinterpret_cast<__m512i
>(mask),
1191 reinterpret_cast<__m512i
>(
data)));
1222 template <
typename Number2, std::
size_t w
idth2>
1225 template <
typename Number2, std::
size_t w
idth2>
1228 template <
typename Number2, std::
size_t w
idth2>
1232 template <
typename Number2, std::
size_t w
idth2>
1247 const unsigned int * offsets,
1255 const unsigned int n_chunks = n_entries / 4;
1256 for (
unsigned int i = 0; i < n_chunks; ++i)
1258 __m512d t0, t1, t2, t3 = {};
1260 t0 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[0] + 4 * i), 0);
1261 t0 = _mm512_insertf64x4(t0, _mm256_loadu_pd(in + offsets[2] + 4 * i), 1);
1262 t1 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[1] + 4 * i), 0);
1263 t1 = _mm512_insertf64x4(t1, _mm256_loadu_pd(in + offsets[3] + 4 * i), 1);
1264 t2 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[4] + 4 * i), 0);
1265 t2 = _mm512_insertf64x4(t2, _mm256_loadu_pd(in + offsets[6] + 4 * i), 1);
1266 t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[5] + 4 * i), 0);
1267 t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[7] + 4 * i), 1);
1269 __m512d
v0 = _mm512_shuffle_f64x2(t0, t2, 0x88);
1270 __m512d
v1 = _mm512_shuffle_f64x2(t0, t2, 0xdd);
1271 __m512d v2 = _mm512_shuffle_f64x2(t1, t3, 0x88);
1272 __m512d v3 = _mm512_shuffle_f64x2(t1, t3, 0xdd);
1273 out[4 * i + 0].
data = _mm512_unpacklo_pd(
v0, v2);
1274 out[4 * i + 1].
data = _mm512_unpackhi_pd(
v0, v2);
1275 out[4 * i + 2].
data = _mm512_unpacklo_pd(
v1, v3);
1276 out[4 * i + 3].
data = _mm512_unpackhi_pd(
v1, v3);
1279 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1280 out[i].
gather(in + i, offsets);
1291 const std::array<double *, 8> &in,
1294 const unsigned int n_chunks = n_entries / 4;
1295 for (
unsigned int i = 0; i < n_chunks; ++i)
1297 __m512d t0, t1, t2, t3 = {};
1299 t0 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[0] + 4 * i), 0);
1300 t0 = _mm512_insertf64x4(t0, _mm256_loadu_pd(in[2] + 4 * i), 1);
1301 t1 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[1] + 4 * i), 0);
1302 t1 = _mm512_insertf64x4(t1, _mm256_loadu_pd(in[3] + 4 * i), 1);
1303 t2 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[4] + 4 * i), 0);
1304 t2 = _mm512_insertf64x4(t2, _mm256_loadu_pd(in[6] + 4 * i), 1);
1305 t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[5] + 4 * i), 0);
1306 t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[7] + 4 * i), 1);
1308 __m512d
v0 = _mm512_shuffle_f64x2(t0, t2, 0x88);
1309 __m512d
v1 = _mm512_shuffle_f64x2(t0, t2, 0xdd);
1310 __m512d v2 = _mm512_shuffle_f64x2(t1, t3, 0x88);
1311 __m512d v3 = _mm512_shuffle_f64x2(t1, t3, 0xdd);
1312 out[4 * i + 0].
data = _mm512_unpacklo_pd(
v0, v2);
1313 out[4 * i + 1].
data = _mm512_unpackhi_pd(
v0, v2);
1314 out[4 * i + 2].
data = _mm512_unpacklo_pd(
v1, v3);
1315 out[4 * i + 3].
data = _mm512_unpackhi_pd(
v1, v3);
1318 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1330 const unsigned int n_entries,
1332 const unsigned int * offsets,
1337 const unsigned int n_chunks = n_entries / 4;
1338 __m512i mask1 = _mm512_set_epi64(0xd, 0xc, 0x5, 0x4, 0x9, 0x8, 0x1, 0x0);
1339 __m512i mask2 = _mm512_set_epi64(0xf, 0xe, 0x7, 0x6, 0xb, 0xa, 0x3, 0x2);
1340 for (
unsigned int i = 0; i < n_chunks; ++i)
1342 __m512d t0 = _mm512_unpacklo_pd(in[i * 4].data, in[i * 4 + 1].data);
1343 __m512d t1 = _mm512_unpackhi_pd(in[i * 4].data, in[i * 4 + 1].data);
1344 __m512d t2 = _mm512_unpacklo_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
1345 __m512d t3 = _mm512_unpackhi_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
1346 __m512d
v0 = _mm512_permutex2var_pd(t0, mask1, t2);
1347 __m512d
v1 = _mm512_permutex2var_pd(t0, mask2, t2);
1348 __m512d v2 = _mm512_permutex2var_pd(t1, mask1, t3);
1349 __m512d v3 = _mm512_permutex2var_pd(t1, mask2, t3);
1350 __m256d res0 = _mm512_extractf64x4_pd(
v0, 0);
1351 __m256d res4 = _mm512_extractf64x4_pd(
v0, 1);
1352 __m256d res1 = _mm512_extractf64x4_pd(v2, 0);
1353 __m256d res5 = _mm512_extractf64x4_pd(v2, 1);
1354 __m256d res2 = _mm512_extractf64x4_pd(
v1, 0);
1355 __m256d res6 = _mm512_extractf64x4_pd(
v1, 1);
1356 __m256d res3 = _mm512_extractf64x4_pd(v3, 0);
1357 __m256d res7 = _mm512_extractf64x4_pd(v3, 1);
1364 res0 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[0]), res0);
1365 _mm256_storeu_pd(out + 4 * i + offsets[0], res0);
1366 res1 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[1]), res1);
1367 _mm256_storeu_pd(out + 4 * i + offsets[1], res1);
1368 res2 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[2]), res2);
1369 _mm256_storeu_pd(out + 4 * i + offsets[2], res2);
1370 res3 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[3]), res3);
1371 _mm256_storeu_pd(out + 4 * i + offsets[3], res3);
1372 res4 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[4]), res4);
1373 _mm256_storeu_pd(out + 4 * i + offsets[4], res4);
1374 res5 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[5]), res5);
1375 _mm256_storeu_pd(out + 4 * i + offsets[5], res5);
1376 res6 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[6]), res6);
1377 _mm256_storeu_pd(out + 4 * i + offsets[6], res6);
1378 res7 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[7]), res7);
1379 _mm256_storeu_pd(out + 4 * i + offsets[7], res7);
1383 _mm256_storeu_pd(out + 4 * i + offsets[0], res0);
1384 _mm256_storeu_pd(out + 4 * i + offsets[1], res1);
1385 _mm256_storeu_pd(out + 4 * i + offsets[2], res2);
1386 _mm256_storeu_pd(out + 4 * i + offsets[3], res3);
1387 _mm256_storeu_pd(out + 4 * i + offsets[4], res4);
1388 _mm256_storeu_pd(out + 4 * i + offsets[5], res5);
1389 _mm256_storeu_pd(out + 4 * i + offsets[6], res6);
1390 _mm256_storeu_pd(out + 4 * i + offsets[7], res7);
1396 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1397 for (
unsigned int v = 0; v < 8; ++v)
1398 out[offsets[v] + i] += in[i][v];
1400 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1401 for (
unsigned int v = 0; v < 8; ++v)
1402 out[offsets[v] + i] = in[i][v];
1413 const unsigned int n_entries,
1415 std::array<double *, 8> & out)
1419 const unsigned int n_chunks = n_entries / 4;
1420 __m512i mask1 = _mm512_set_epi64(0xd, 0xc, 0x5, 0x4, 0x9, 0x8, 0x1, 0x0);
1421 __m512i mask2 = _mm512_set_epi64(0xf, 0xe, 0x7, 0x6, 0xb, 0xa, 0x3, 0x2);
1422 for (
unsigned int i = 0; i < n_chunks; ++i)
1424 __m512d t0 = _mm512_unpacklo_pd(in[i * 4].data, in[i * 4 + 1].data);
1425 __m512d t1 = _mm512_unpackhi_pd(in[i * 4].data, in[i * 4 + 1].data);
1426 __m512d t2 = _mm512_unpacklo_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
1427 __m512d t3 = _mm512_unpackhi_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
1428 __m512d
v0 = _mm512_permutex2var_pd(t0, mask1, t2);
1429 __m512d
v1 = _mm512_permutex2var_pd(t0, mask2, t2);
1430 __m512d v2 = _mm512_permutex2var_pd(t1, mask1, t3);
1431 __m512d v3 = _mm512_permutex2var_pd(t1, mask2, t3);
1432 __m256d res0 = _mm512_extractf64x4_pd(
v0, 0);
1433 __m256d res4 = _mm512_extractf64x4_pd(
v0, 1);
1434 __m256d res1 = _mm512_extractf64x4_pd(v2, 0);
1435 __m256d res5 = _mm512_extractf64x4_pd(v2, 1);
1436 __m256d res2 = _mm512_extractf64x4_pd(
v1, 0);
1437 __m256d res6 = _mm512_extractf64x4_pd(
v1, 1);
1438 __m256d res3 = _mm512_extractf64x4_pd(v3, 0);
1439 __m256d res7 = _mm512_extractf64x4_pd(v3, 1);
1443 res0 = _mm256_add_pd(_mm256_loadu_pd(out[0] + 4 * i), res0);
1444 _mm256_storeu_pd(out[0] + 4 * i, res0);
1445 res1 = _mm256_add_pd(_mm256_loadu_pd(out[1] + 4 * i), res1);
1446 _mm256_storeu_pd(out[1] + 4 * i, res1);
1447 res2 = _mm256_add_pd(_mm256_loadu_pd(out[2] + 4 * i), res2);
1448 _mm256_storeu_pd(out[2] + 4 * i, res2);
1449 res3 = _mm256_add_pd(_mm256_loadu_pd(out[3] + 4 * i), res3);
1450 _mm256_storeu_pd(out[3] + 4 * i, res3);
1451 res4 = _mm256_add_pd(_mm256_loadu_pd(out[4] + 4 * i), res4);
1452 _mm256_storeu_pd(out[4] + 4 * i, res4);
1453 res5 = _mm256_add_pd(_mm256_loadu_pd(out[5] + 4 * i), res5);
1454 _mm256_storeu_pd(out[5] + 4 * i, res5);
1455 res6 = _mm256_add_pd(_mm256_loadu_pd(out[6] + 4 * i), res6);
1456 _mm256_storeu_pd(out[6] + 4 * i, res6);
1457 res7 = _mm256_add_pd(_mm256_loadu_pd(out[7] + 4 * i), res7);
1458 _mm256_storeu_pd(out[7] + 4 * i, res7);
1462 _mm256_storeu_pd(out[0] + 4 * i, res0);
1463 _mm256_storeu_pd(out[1] + 4 * i, res1);
1464 _mm256_storeu_pd(out[2] + 4 * i, res2);
1465 _mm256_storeu_pd(out[3] + 4 * i, res3);
1466 _mm256_storeu_pd(out[4] + 4 * i, res4);
1467 _mm256_storeu_pd(out[5] + 4 * i, res5);
1468 _mm256_storeu_pd(out[6] + 4 * i, res6);
1469 _mm256_storeu_pd(out[7] + 4 * i, res7);
1474 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1475 for (
unsigned int v = 0; v < 8; ++v)
1476 out[v][i] += in[i][v];
1478 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1479 for (
unsigned int v = 0; v < 8; ++v)
1480 out[v][i] = in[i][v];
1526 data = _mm512_set1_ps(x);
1537 return *(
reinterpret_cast<float *
>(&
data) + comp);
1544 const float &
operator[](
const unsigned int comp)
const
1547 return *(
reinterpret_cast<const float *
>(&
data) + comp);
1562# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1577# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1591# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1606# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1621 load(
const float *ptr)
1623 data = _mm512_loadu_ps(ptr);
1634 store(
float *ptr)
const
1636 _mm512_storeu_ps(ptr,
data);
1646 Assert(
reinterpret_cast<std::size_t
>(ptr) % 64 == 0,
1648 _mm512_stream_ps(ptr,
data);
1665 gather(
const float *base_ptr,
const unsigned int *offsets)
1670 const __m512 index_val =
1671 _mm512_loadu_ps(
reinterpret_cast<const float *
>(offsets));
1672 const __m512i index = *
reinterpret_cast<const __m512i *
>(&index_val);
1673 data = _mm512_i32gather_ps(index, base_ptr, 4);
1690 scatter(
const unsigned int *offsets,
float *base_ptr)
const
1692 for (
unsigned int i = 0; i < 16; ++i)
1693 for (
unsigned int j = i + 1; j < 16; ++j)
1694 Assert(offsets[i] != offsets[j],
1695 ExcMessage(
"Result of scatter undefined if two offset elements"
1696 " point to the same position"));
1701 const __m512 index_val =
1702 _mm512_loadu_ps(
reinterpret_cast<const float *
>(offsets));
1703 const __m512i index = *
reinterpret_cast<const __m512i *
>(&index_val);
1704 _mm512_i32scatter_ps(base_ptr, index,
data, 4);
1741 __m512 mask = _mm512_set1_ps(-0.f);
1743 res.
data =
reinterpret_cast<__m512
>(
1744 _mm512_andnot_epi32(
reinterpret_cast<__m512i
>(mask),
1745 reinterpret_cast<__m512i
>(
data)));
1776 template <
typename Number2, std::
size_t w
idth2>
1779 template <
typename Number2, std::
size_t w
idth2>
1782 template <
typename Number2, std::
size_t w
idth2>
1786 template <
typename Number2, std::
size_t w
idth2>
1801 const unsigned int * offsets,
1808 const unsigned int n_chunks = n_entries / 4;
1816 __m512 t0, t1, t2, t3;
1819 for (
unsigned int i = 0; i < n_chunks; ++i)
1821 t0 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[0] + 4 * i), 0);
1822 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[4] + 4 * i), 1);
1823 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[8] + 4 * i), 2);
1824 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[12] + 4 * i), 3);
1825 t1 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[1] + 4 * i), 0);
1826 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[5] + 4 * i), 1);
1827 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[9] + 4 * i), 2);
1828 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[13] + 4 * i), 3);
1829 t2 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[2] + 4 * i), 0);
1830 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[6] + 4 * i), 1);
1831 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[10] + 4 * i), 2);
1832 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[14] + 4 * i), 3);
1833 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[3] + 4 * i), 0);
1834 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[7] + 4 * i), 1);
1835 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[11] + 4 * i), 2);
1836 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[15] + 4 * i), 3);
1838 __m512
v0 = _mm512_shuffle_ps(t0, t1, 0x44);
1839 __m512
v1 = _mm512_shuffle_ps(t0, t1, 0xee);
1840 __m512 v2 = _mm512_shuffle_ps(t2, t3, 0x44);
1841 __m512 v3 = _mm512_shuffle_ps(t2, t3, 0xee);
1843 out[4 * i + 0].
data = _mm512_shuffle_ps(
v0, v2, 0x88);
1844 out[4 * i + 1].
data = _mm512_shuffle_ps(
v0, v2, 0xdd);
1845 out[4 * i + 2].
data = _mm512_shuffle_ps(
v1, v3, 0x88);
1846 out[4 * i + 3].
data = _mm512_shuffle_ps(
v1, v3, 0xdd);
1850 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1851 out[i].
gather(in + i, offsets);
1862 const std::array<float *, 16> &in,
1867 const unsigned int n_chunks = n_entries / 4;
1869 __m512 t0, t1, t2, t3;
1872 for (
unsigned int i = 0; i < n_chunks; ++i)
1874 t0 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[0] + 4 * i), 0);
1875 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[4] + 4 * i), 1);
1876 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[8] + 4 * i), 2);
1877 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[12] + 4 * i), 3);
1878 t1 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[1] + 4 * i), 0);
1879 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[5] + 4 * i), 1);
1880 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[9] + 4 * i), 2);
1881 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[13] + 4 * i), 3);
1882 t2 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[2] + 4 * i), 0);
1883 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[6] + 4 * i), 1);
1884 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[10] + 4 * i), 2);
1885 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[14] + 4 * i), 3);
1886 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[3] + 4 * i), 0);
1887 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[7] + 4 * i), 1);
1888 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[11] + 4 * i), 2);
1889 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[15] + 4 * i), 3);
1891 __m512
v0 = _mm512_shuffle_ps(t0, t1, 0x44);
1892 __m512
v1 = _mm512_shuffle_ps(t0, t1, 0xee);
1893 __m512 v2 = _mm512_shuffle_ps(t2, t3, 0x44);
1894 __m512 v3 = _mm512_shuffle_ps(t2, t3, 0xee);
1896 out[4 * i + 0].
data = _mm512_shuffle_ps(
v0, v2, 0x88);
1897 out[4 * i + 1].
data = _mm512_shuffle_ps(
v0, v2, 0xdd);
1898 out[4 * i + 2].
data = _mm512_shuffle_ps(
v1, v3, 0x88);
1899 out[4 * i + 3].
data = _mm512_shuffle_ps(
v1, v3, 0xdd);
1902 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1914 const unsigned int n_entries,
1916 const unsigned int * offsets,
1919 const unsigned int n_chunks = n_entries / 4;
1920 for (
unsigned int i = 0; i < n_chunks; ++i)
1922 __m512 t0 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0x44);
1923 __m512 t1 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0xee);
1925 _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0x44);
1927 _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0xee);
1928 __m512 u0 = _mm512_shuffle_ps(t0, t2, 0x88);
1929 __m512 u1 = _mm512_shuffle_ps(t0, t2, 0xdd);
1930 __m512 u2 = _mm512_shuffle_ps(t1, t3, 0x88);
1931 __m512 u3 = _mm512_shuffle_ps(t1, t3, 0xdd);
1933 __m128 res0 = _mm512_extractf32x4_ps(u0, 0);
1934 __m128 res4 = _mm512_extractf32x4_ps(u0, 1);
1935 __m128 res8 = _mm512_extractf32x4_ps(u0, 2);
1936 __m128 res12 = _mm512_extractf32x4_ps(u0, 3);
1937 __m128 res1 = _mm512_extractf32x4_ps(u1, 0);
1938 __m128 res5 = _mm512_extractf32x4_ps(u1, 1);
1939 __m128 res9 = _mm512_extractf32x4_ps(u1, 2);
1940 __m128 res13 = _mm512_extractf32x4_ps(u1, 3);
1941 __m128 res2 = _mm512_extractf32x4_ps(u2, 0);
1942 __m128 res6 = _mm512_extractf32x4_ps(u2, 1);
1943 __m128 res10 = _mm512_extractf32x4_ps(u2, 2);
1944 __m128 res14 = _mm512_extractf32x4_ps(u2, 3);
1945 __m128 res3 = _mm512_extractf32x4_ps(u3, 0);
1946 __m128 res7 = _mm512_extractf32x4_ps(u3, 1);
1947 __m128 res11 = _mm512_extractf32x4_ps(u3, 2);
1948 __m128 res15 = _mm512_extractf32x4_ps(u3, 3);
1955 res0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), res0);
1956 _mm_storeu_ps(out + 4 * i + offsets[0], res0);
1957 res1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), res1);
1958 _mm_storeu_ps(out + 4 * i + offsets[1], res1);
1959 res2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), res2);
1960 _mm_storeu_ps(out + 4 * i + offsets[2], res2);
1961 res3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), res3);
1962 _mm_storeu_ps(out + 4 * i + offsets[3], res3);
1963 res4 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[4]), res4);
1964 _mm_storeu_ps(out + 4 * i + offsets[4], res4);
1965 res5 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[5]), res5);
1966 _mm_storeu_ps(out + 4 * i + offsets[5], res5);
1967 res6 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[6]), res6);
1968 _mm_storeu_ps(out + 4 * i + offsets[6], res6);
1969 res7 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[7]), res7);
1970 _mm_storeu_ps(out + 4 * i + offsets[7], res7);
1971 res8 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[8]), res8);
1972 _mm_storeu_ps(out + 4 * i + offsets[8], res8);
1973 res9 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[9]), res9);
1974 _mm_storeu_ps(out + 4 * i + offsets[9], res9);
1975 res10 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[10]), res10);
1976 _mm_storeu_ps(out + 4 * i + offsets[10], res10);
1977 res11 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[11]), res11);
1978 _mm_storeu_ps(out + 4 * i + offsets[11], res11);
1979 res12 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[12]), res12);
1980 _mm_storeu_ps(out + 4 * i + offsets[12], res12);
1981 res13 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[13]), res13);
1982 _mm_storeu_ps(out + 4 * i + offsets[13], res13);
1983 res14 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[14]), res14);
1984 _mm_storeu_ps(out + 4 * i + offsets[14], res14);
1985 res15 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[15]), res15);
1986 _mm_storeu_ps(out + 4 * i + offsets[15], res15);
1990 _mm_storeu_ps(out + 4 * i + offsets[0], res0);
1991 _mm_storeu_ps(out + 4 * i + offsets[1], res1);
1992 _mm_storeu_ps(out + 4 * i + offsets[2], res2);
1993 _mm_storeu_ps(out + 4 * i + offsets[3], res3);
1994 _mm_storeu_ps(out + 4 * i + offsets[4], res4);
1995 _mm_storeu_ps(out + 4 * i + offsets[5], res5);
1996 _mm_storeu_ps(out + 4 * i + offsets[6], res6);
1997 _mm_storeu_ps(out + 4 * i + offsets[7], res7);
1998 _mm_storeu_ps(out + 4 * i + offsets[8], res8);
1999 _mm_storeu_ps(out + 4 * i + offsets[9], res9);
2000 _mm_storeu_ps(out + 4 * i + offsets[10], res10);
2001 _mm_storeu_ps(out + 4 * i + offsets[11], res11);
2002 _mm_storeu_ps(out + 4 * i + offsets[12], res12);
2003 _mm_storeu_ps(out + 4 * i + offsets[13], res13);
2004 _mm_storeu_ps(out + 4 * i + offsets[14], res14);
2005 _mm_storeu_ps(out + 4 * i + offsets[15], res15);
2011 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2012 for (
unsigned int v = 0; v < 16; ++v)
2013 out[offsets[v] + i] += in[i][v];
2015 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2016 for (
unsigned int v = 0; v < 16; ++v)
2017 out[offsets[v] + i] = in[i][v];
2028 const unsigned int n_entries,
2030 std::array<float *, 16> & out)
2034 const unsigned int n_chunks = n_entries / 4;
2035 for (
unsigned int i = 0; i < n_chunks; ++i)
2037 __m512 t0 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0x44);
2038 __m512 t1 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0xee);
2040 _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0x44);
2042 _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0xee);
2043 __m512 u0 = _mm512_shuffle_ps(t0, t2, 0x88);
2044 __m512 u1 = _mm512_shuffle_ps(t0, t2, 0xdd);
2045 __m512 u2 = _mm512_shuffle_ps(t1, t3, 0x88);
2046 __m512 u3 = _mm512_shuffle_ps(t1, t3, 0xdd);
2048 __m128 res0 = _mm512_extractf32x4_ps(u0, 0);
2049 __m128 res4 = _mm512_extractf32x4_ps(u0, 1);
2050 __m128 res8 = _mm512_extractf32x4_ps(u0, 2);
2051 __m128 res12 = _mm512_extractf32x4_ps(u0, 3);
2052 __m128 res1 = _mm512_extractf32x4_ps(u1, 0);
2053 __m128 res5 = _mm512_extractf32x4_ps(u1, 1);
2054 __m128 res9 = _mm512_extractf32x4_ps(u1, 2);
2055 __m128 res13 = _mm512_extractf32x4_ps(u1, 3);
2056 __m128 res2 = _mm512_extractf32x4_ps(u2, 0);
2057 __m128 res6 = _mm512_extractf32x4_ps(u2, 1);
2058 __m128 res10 = _mm512_extractf32x4_ps(u2, 2);
2059 __m128 res14 = _mm512_extractf32x4_ps(u2, 3);
2060 __m128 res3 = _mm512_extractf32x4_ps(u3, 0);
2061 __m128 res7 = _mm512_extractf32x4_ps(u3, 1);
2062 __m128 res11 = _mm512_extractf32x4_ps(u3, 2);
2063 __m128 res15 = _mm512_extractf32x4_ps(u3, 3);
2067 res0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), res0);
2068 _mm_storeu_ps(out[0] + 4 * i, res0);
2069 res1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), res1);
2070 _mm_storeu_ps(out[1] + 4 * i, res1);
2071 res2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), res2);
2072 _mm_storeu_ps(out[2] + 4 * i, res2);
2073 res3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), res3);
2074 _mm_storeu_ps(out[3] + 4 * i, res3);
2075 res4 = _mm_add_ps(_mm_loadu_ps(out[4] + 4 * i), res4);
2076 _mm_storeu_ps(out[4] + 4 * i, res4);
2077 res5 = _mm_add_ps(_mm_loadu_ps(out[5] + 4 * i), res5);
2078 _mm_storeu_ps(out[5] + 4 * i, res5);
2079 res6 = _mm_add_ps(_mm_loadu_ps(out[6] + 4 * i), res6);
2080 _mm_storeu_ps(out[6] + 4 * i, res6);
2081 res7 = _mm_add_ps(_mm_loadu_ps(out[7] + 4 * i), res7);
2082 _mm_storeu_ps(out[7] + 4 * i, res7);
2083 res8 = _mm_add_ps(_mm_loadu_ps(out[8] + 4 * i), res8);
2084 _mm_storeu_ps(out[8] + 4 * i, res8);
2085 res9 = _mm_add_ps(_mm_loadu_ps(out[9] + 4 * i), res9);
2086 _mm_storeu_ps(out[9] + 4 * i, res9);
2087 res10 = _mm_add_ps(_mm_loadu_ps(out[10] + 4 * i), res10);
2088 _mm_storeu_ps(out[10] + 4 * i, res10);
2089 res11 = _mm_add_ps(_mm_loadu_ps(out[11] + 4 * i), res11);
2090 _mm_storeu_ps(out[11] + 4 * i, res11);
2091 res12 = _mm_add_ps(_mm_loadu_ps(out[12] + 4 * i), res12);
2092 _mm_storeu_ps(out[12] + 4 * i, res12);
2093 res13 = _mm_add_ps(_mm_loadu_ps(out[13] + 4 * i), res13);
2094 _mm_storeu_ps(out[13] + 4 * i, res13);
2095 res14 = _mm_add_ps(_mm_loadu_ps(out[14] + 4 * i), res14);
2096 _mm_storeu_ps(out[14] + 4 * i, res14);
2097 res15 = _mm_add_ps(_mm_loadu_ps(out[15] + 4 * i), res15);
2098 _mm_storeu_ps(out[15] + 4 * i, res15);
2102 _mm_storeu_ps(out[0] + 4 * i, res0);
2103 _mm_storeu_ps(out[1] + 4 * i, res1);
2104 _mm_storeu_ps(out[2] + 4 * i, res2);
2105 _mm_storeu_ps(out[3] + 4 * i, res3);
2106 _mm_storeu_ps(out[4] + 4 * i, res4);
2107 _mm_storeu_ps(out[5] + 4 * i, res5);
2108 _mm_storeu_ps(out[6] + 4 * i, res6);
2109 _mm_storeu_ps(out[7] + 4 * i, res7);
2110 _mm_storeu_ps(out[8] + 4 * i, res8);
2111 _mm_storeu_ps(out[9] + 4 * i, res9);
2112 _mm_storeu_ps(out[10] + 4 * i, res10);
2113 _mm_storeu_ps(out[11] + 4 * i, res11);
2114 _mm_storeu_ps(out[12] + 4 * i, res12);
2115 _mm_storeu_ps(out[13] + 4 * i, res13);
2116 _mm_storeu_ps(out[14] + 4 * i, res14);
2117 _mm_storeu_ps(out[15] + 4 * i, res15);
2122 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2123 for (
unsigned int v = 0; v < 16; ++v)
2124 out[v][i] += in[i][v];
2126 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2127 for (
unsigned int v = 0; v < 16; ++v)
2128 out[v][i] = in[i][v];
2133# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
2176 data = _mm256_set1_pd(x);
2187 return *(
reinterpret_cast<double *
>(&
data) + comp);
2194 const double &
operator[](
const unsigned int comp)
const
2197 return *(
reinterpret_cast<const double *
>(&
data) + comp);
2212# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2227# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2241# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2256# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2271 load(
const double *ptr)
2273 data = _mm256_loadu_pd(ptr);
2284 store(
double *ptr)
const
2286 _mm256_storeu_pd(ptr,
data);
2296 Assert(
reinterpret_cast<std::size_t
>(ptr) % 32 == 0,
2298 _mm256_stream_pd(ptr,
data);
2315 gather(
const double *base_ptr,
const unsigned int *offsets)
2321 const __m128 index_val =
2322 _mm_loadu_ps(
reinterpret_cast<const float *
>(offsets));
2323 const __m128i index = *
reinterpret_cast<const __m128i *
>(&index_val);
2324 data = _mm256_i32gather_pd(base_ptr, index, 8);
2326 for (
unsigned int i = 0; i < 4; ++i)
2327 *(
reinterpret_cast<double *
>(&
data) + i) = base_ptr[offsets[i]];
2345 scatter(
const unsigned int *offsets,
double *base_ptr)
const
2348 for (
unsigned int i = 0; i < 4; ++i)
2349 base_ptr[offsets[i]] = *(
reinterpret_cast<const double *
>(&
data) + i);
2384 __m256d mask = _mm256_set1_pd(-0.);
2386 res.
data = _mm256_andnot_pd(mask,
data);
2417 template <
typename Number2, std::
size_t w
idth2>
2420 template <
typename Number2, std::
size_t w
idth2>
2423 template <
typename Number2, std::
size_t w
idth2>
2427 template <
typename Number2, std::
size_t w
idth2>
2442 const unsigned int * offsets,
2445 const unsigned int n_chunks = n_entries / 4;
2446 const double * in0 = in + offsets[0];
2447 const double * in1 = in + offsets[1];
2448 const double * in2 = in + offsets[2];
2449 const double * in3 = in + offsets[3];
2451 for (
unsigned int i = 0; i < n_chunks; ++i)
2453 __m256d u0 = _mm256_loadu_pd(in0 + 4 * i);
2454 __m256d u1 = _mm256_loadu_pd(in1 + 4 * i);
2455 __m256d u2 = _mm256_loadu_pd(in2 + 4 * i);
2456 __m256d u3 = _mm256_loadu_pd(in3 + 4 * i);
2457 __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2458 __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2459 __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2460 __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2461 out[4 * i + 0].
data = _mm256_unpacklo_pd(t0, t1);
2462 out[4 * i + 1].
data = _mm256_unpackhi_pd(t0, t1);
2463 out[4 * i + 2].
data = _mm256_unpacklo_pd(t2, t3);
2464 out[4 * i + 3].
data = _mm256_unpackhi_pd(t2, t3);
2468 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2469 out[i].
gather(in + i, offsets);
2480 const std::array<double *, 4> &in,
2485 const unsigned int n_chunks = n_entries / 4;
2486 const double * in0 = in[0];
2487 const double * in1 = in[1];
2488 const double * in2 = in[2];
2489 const double * in3 = in[3];
2491 for (
unsigned int i = 0; i < n_chunks; ++i)
2493 __m256d u0 = _mm256_loadu_pd(in0 + 4 * i);
2494 __m256d u1 = _mm256_loadu_pd(in1 + 4 * i);
2495 __m256d u2 = _mm256_loadu_pd(in2 + 4 * i);
2496 __m256d u3 = _mm256_loadu_pd(in3 + 4 * i);
2497 __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2498 __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2499 __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2500 __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2501 out[4 * i + 0].
data = _mm256_unpacklo_pd(t0, t1);
2502 out[4 * i + 1].
data = _mm256_unpackhi_pd(t0, t1);
2503 out[4 * i + 2].
data = _mm256_unpacklo_pd(t2, t3);
2504 out[4 * i + 3].
data = _mm256_unpackhi_pd(t2, t3);
2507 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2519 const unsigned int n_entries,
2521 const unsigned int * offsets,
2524 const unsigned int n_chunks = n_entries / 4;
2525 double * out0 = out + offsets[0];
2526 double * out1 = out + offsets[1];
2527 double * out2 = out + offsets[2];
2528 double * out3 = out + offsets[3];
2529 for (
unsigned int i = 0; i < n_chunks; ++i)
2531 __m256d u0 = in[4 * i + 0].
data;
2532 __m256d u1 = in[4 * i + 1].
data;
2533 __m256d u2 = in[4 * i + 2].
data;
2534 __m256d u3 = in[4 * i + 3].
data;
2535 __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2536 __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2537 __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2538 __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2539 __m256d res0 = _mm256_unpacklo_pd(t0, t1);
2540 __m256d res1 = _mm256_unpackhi_pd(t0, t1);
2541 __m256d res2 = _mm256_unpacklo_pd(t2, t3);
2542 __m256d res3 = _mm256_unpackhi_pd(t2, t3);
2549 res0 = _mm256_add_pd(_mm256_loadu_pd(out0 + 4 * i), res0);
2550 _mm256_storeu_pd(out0 + 4 * i, res0);
2551 res1 = _mm256_add_pd(_mm256_loadu_pd(out1 + 4 * i), res1);
2552 _mm256_storeu_pd(out1 + 4 * i, res1);
2553 res2 = _mm256_add_pd(_mm256_loadu_pd(out2 + 4 * i), res2);
2554 _mm256_storeu_pd(out2 + 4 * i, res2);
2555 res3 = _mm256_add_pd(_mm256_loadu_pd(out3 + 4 * i), res3);
2556 _mm256_storeu_pd(out3 + 4 * i, res3);
2560 _mm256_storeu_pd(out0 + 4 * i, res0);
2561 _mm256_storeu_pd(out1 + 4 * i, res1);
2562 _mm256_storeu_pd(out2 + 4 * i, res2);
2563 _mm256_storeu_pd(out3 + 4 * i, res3);
2569 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2570 for (
unsigned int v = 0; v < 4; ++v)
2571 out[offsets[v] + i] += in[i][v];
2573 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2574 for (
unsigned int v = 0; v < 4; ++v)
2575 out[offsets[v] + i] = in[i][v];
2586 const unsigned int n_entries,
2588 std::array<double *, 4> & out)
2592 const unsigned int n_chunks = n_entries / 4;
2593 double * out0 = out[0];
2594 double * out1 = out[1];
2595 double * out2 = out[2];
2596 double * out3 = out[3];
2597 for (
unsigned int i = 0; i < n_chunks; ++i)
2599 __m256d u0 = in[4 * i + 0].
data;
2600 __m256d u1 = in[4 * i + 1].
data;
2601 __m256d u2 = in[4 * i + 2].
data;
2602 __m256d u3 = in[4 * i + 3].
data;
2603 __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2604 __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2605 __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2606 __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2607 __m256d res0 = _mm256_unpacklo_pd(t0, t1);
2608 __m256d res1 = _mm256_unpackhi_pd(t0, t1);
2609 __m256d res2 = _mm256_unpacklo_pd(t2, t3);
2610 __m256d res3 = _mm256_unpackhi_pd(t2, t3);
2617 res0 = _mm256_add_pd(_mm256_loadu_pd(out0 + 4 * i), res0);
2618 _mm256_storeu_pd(out0 + 4 * i, res0);
2619 res1 = _mm256_add_pd(_mm256_loadu_pd(out1 + 4 * i), res1);
2620 _mm256_storeu_pd(out1 + 4 * i, res1);
2621 res2 = _mm256_add_pd(_mm256_loadu_pd(out2 + 4 * i), res2);
2622 _mm256_storeu_pd(out2 + 4 * i, res2);
2623 res3 = _mm256_add_pd(_mm256_loadu_pd(out3 + 4 * i), res3);
2624 _mm256_storeu_pd(out3 + 4 * i, res3);
2628 _mm256_storeu_pd(out0 + 4 * i, res0);
2629 _mm256_storeu_pd(out1 + 4 * i, res1);
2630 _mm256_storeu_pd(out2 + 4 * i, res2);
2631 _mm256_storeu_pd(out3 + 4 * i, res3);
2637 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2638 for (
unsigned int v = 0; v < 4; ++v)
2639 out[v][i] += in[i][v];
2641 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2642 for (
unsigned int v = 0; v < 4; ++v)
2643 out[v][i] = in[i][v];
2689 data = _mm256_set1_ps(x);
2700 return *(
reinterpret_cast<float *
>(&
data) + comp);
2707 const float &
operator[](
const unsigned int comp)
const
2710 return *(
reinterpret_cast<const float *
>(&
data) + comp);
2725# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2740# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2754# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2769# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2784 load(
const float *ptr)
2786 data = _mm256_loadu_ps(ptr);
2797 store(
float *ptr)
const
2799 _mm256_storeu_ps(ptr,
data);
2809 Assert(
reinterpret_cast<std::size_t
>(ptr) % 32 == 0,
2811 _mm256_stream_ps(ptr,
data);
2828 gather(
const float *base_ptr,
const unsigned int *offsets)
2834 const __m256 index_val =
2835 _mm256_loadu_ps(
reinterpret_cast<const float *
>(offsets));
2836 const __m256i index = *
reinterpret_cast<const __m256i *
>(&index_val);
2837 data = _mm256_i32gather_ps(base_ptr, index, 4);
2839 for (
unsigned int i = 0; i < 8; ++i)
2840 *(
reinterpret_cast<float *
>(&
data) + i) = base_ptr[offsets[i]];
2858 scatter(
const unsigned int *offsets,
float *base_ptr)
const
2861 for (
unsigned int i = 0; i < 8; ++i)
2862 base_ptr[offsets[i]] = *(
reinterpret_cast<const float *
>(&
data) + i);
2897 __m256 mask = _mm256_set1_ps(-0.f);
2899 res.
data = _mm256_andnot_ps(mask,
data);
2930 template <
typename Number2, std::
size_t w
idth2>
2933 template <
typename Number2, std::
size_t w
idth2>
2936 template <
typename Number2, std::
size_t w
idth2>
2940 template <
typename Number2, std::
size_t w
idth2>
2955 const unsigned int * offsets,
2958 const unsigned int n_chunks = n_entries / 4;
2959 for (
unsigned int i = 0; i < n_chunks; ++i)
2963 __m256 t0, t1, t2, t3 = {};
2964 t0 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[0]), 0);
2965 t0 = _mm256_insertf128_ps(t0, _mm_loadu_ps(in + 4 * i + offsets[4]), 1);
2966 t1 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[1]), 0);
2967 t1 = _mm256_insertf128_ps(t1, _mm_loadu_ps(in + 4 * i + offsets[5]), 1);
2968 t2 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[2]), 0);
2969 t2 = _mm256_insertf128_ps(t2, _mm_loadu_ps(in + 4 * i + offsets[6]), 1);
2970 t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[3]), 0);
2971 t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[7]), 1);
2973 __m256
v0 = _mm256_shuffle_ps(t0, t1, 0x44);
2974 __m256
v1 = _mm256_shuffle_ps(t0, t1, 0xee);
2975 __m256 v2 = _mm256_shuffle_ps(t2, t3, 0x44);
2976 __m256 v3 = _mm256_shuffle_ps(t2, t3, 0xee);
2977 out[4 * i + 0].
data = _mm256_shuffle_ps(
v0, v2, 0x88);
2978 out[4 * i + 1].
data = _mm256_shuffle_ps(
v0, v2, 0xdd);
2979 out[4 * i + 2].
data = _mm256_shuffle_ps(
v1, v3, 0x88);
2980 out[4 * i + 3].
data = _mm256_shuffle_ps(
v1, v3, 0xdd);
2984 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2985 out[i].
gather(in + i, offsets);
2996 const std::array<float *, 8> &in,
3001 const unsigned int n_chunks = n_entries / 4;
3002 for (
unsigned int i = 0; i < n_chunks; ++i)
3004 __m256 t0, t1, t2, t3 = {};
3005 t0 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[0] + 4 * i), 0);
3006 t0 = _mm256_insertf128_ps(t0, _mm_loadu_ps(in[4] + 4 * i), 1);
3007 t1 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[1] + 4 * i), 0);
3008 t1 = _mm256_insertf128_ps(t1, _mm_loadu_ps(in[5] + 4 * i), 1);
3009 t2 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[2] + 4 * i), 0);
3010 t2 = _mm256_insertf128_ps(t2, _mm_loadu_ps(in[6] + 4 * i), 1);
3011 t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[3] + 4 * i), 0);
3012 t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[7] + 4 * i), 1);
3014 __m256
v0 = _mm256_shuffle_ps(t0, t1, 0x44);
3015 __m256
v1 = _mm256_shuffle_ps(t0, t1, 0xee);
3016 __m256 v2 = _mm256_shuffle_ps(t2, t3, 0x44);
3017 __m256 v3 = _mm256_shuffle_ps(t2, t3, 0xee);
3018 out[4 * i + 0].
data = _mm256_shuffle_ps(
v0, v2, 0x88);
3019 out[4 * i + 1].
data = _mm256_shuffle_ps(
v0, v2, 0xdd);
3020 out[4 * i + 2].
data = _mm256_shuffle_ps(
v1, v3, 0x88);
3021 out[4 * i + 3].
data = _mm256_shuffle_ps(
v1, v3, 0xdd);
3024 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3036 const unsigned int n_entries,
3038 const unsigned int * offsets,
3041 const unsigned int n_chunks = n_entries / 4;
3042 for (
unsigned int i = 0; i < n_chunks; ++i)
3044 __m256 u0 = in[4 * i + 0].
data;
3045 __m256 u1 = in[4 * i + 1].
data;
3046 __m256 u2 = in[4 * i + 2].
data;
3047 __m256 u3 = in[4 * i + 3].
data;
3048 __m256 t0 = _mm256_shuffle_ps(u0, u1, 0x44);
3049 __m256 t1 = _mm256_shuffle_ps(u0, u1, 0xee);
3050 __m256 t2 = _mm256_shuffle_ps(u2, u3, 0x44);
3051 __m256 t3 = _mm256_shuffle_ps(u2, u3, 0xee);
3052 u0 = _mm256_shuffle_ps(t0, t2, 0x88);
3053 u1 = _mm256_shuffle_ps(t0, t2, 0xdd);
3054 u2 = _mm256_shuffle_ps(t1, t3, 0x88);
3055 u3 = _mm256_shuffle_ps(t1, t3, 0xdd);
3056 __m128 res0 = _mm256_extractf128_ps(u0, 0);
3057 __m128 res4 = _mm256_extractf128_ps(u0, 1);
3058 __m128 res1 = _mm256_extractf128_ps(u1, 0);
3059 __m128 res5 = _mm256_extractf128_ps(u1, 1);
3060 __m128 res2 = _mm256_extractf128_ps(u2, 0);
3061 __m128 res6 = _mm256_extractf128_ps(u2, 1);
3062 __m128 res3 = _mm256_extractf128_ps(u3, 0);
3063 __m128 res7 = _mm256_extractf128_ps(u3, 1);
3070 res0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), res0);
3071 _mm_storeu_ps(out + 4 * i + offsets[0], res0);
3072 res1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), res1);
3073 _mm_storeu_ps(out + 4 * i + offsets[1], res1);
3074 res2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), res2);
3075 _mm_storeu_ps(out + 4 * i + offsets[2], res2);
3076 res3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), res3);
3077 _mm_storeu_ps(out + 4 * i + offsets[3], res3);
3078 res4 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[4]), res4);
3079 _mm_storeu_ps(out + 4 * i + offsets[4], res4);
3080 res5 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[5]), res5);
3081 _mm_storeu_ps(out + 4 * i + offsets[5], res5);
3082 res6 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[6]), res6);
3083 _mm_storeu_ps(out + 4 * i + offsets[6], res6);
3084 res7 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[7]), res7);
3085 _mm_storeu_ps(out + 4 * i + offsets[7], res7);
3089 _mm_storeu_ps(out + 4 * i + offsets[0], res0);
3090 _mm_storeu_ps(out + 4 * i + offsets[1], res1);
3091 _mm_storeu_ps(out + 4 * i + offsets[2], res2);
3092 _mm_storeu_ps(out + 4 * i + offsets[3], res3);
3093 _mm_storeu_ps(out + 4 * i + offsets[4], res4);
3094 _mm_storeu_ps(out + 4 * i + offsets[5], res5);
3095 _mm_storeu_ps(out + 4 * i + offsets[6], res6);
3096 _mm_storeu_ps(out + 4 * i + offsets[7], res7);
3102 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3103 for (
unsigned int v = 0; v < 8; ++v)
3104 out[offsets[v] + i] += in[i][v];
3106 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3107 for (
unsigned int v = 0; v < 8; ++v)
3108 out[offsets[v] + i] = in[i][v];
3119 const unsigned int n_entries,
3121 std::array<float *, 8> & out)
3125 const unsigned int n_chunks = n_entries / 4;
3126 for (
unsigned int i = 0; i < n_chunks; ++i)
3128 __m256 u0 = in[4 * i + 0].
data;
3129 __m256 u1 = in[4 * i + 1].
data;
3130 __m256 u2 = in[4 * i + 2].
data;
3131 __m256 u3 = in[4 * i + 3].
data;
3132 __m256 t0 = _mm256_shuffle_ps(u0, u1, 0x44);
3133 __m256 t1 = _mm256_shuffle_ps(u0, u1, 0xee);
3134 __m256 t2 = _mm256_shuffle_ps(u2, u3, 0x44);
3135 __m256 t3 = _mm256_shuffle_ps(u2, u3, 0xee);
3136 u0 = _mm256_shuffle_ps(t0, t2, 0x88);
3137 u1 = _mm256_shuffle_ps(t0, t2, 0xdd);
3138 u2 = _mm256_shuffle_ps(t1, t3, 0x88);
3139 u3 = _mm256_shuffle_ps(t1, t3, 0xdd);
3140 __m128 res0 = _mm256_extractf128_ps(u0, 0);
3141 __m128 res4 = _mm256_extractf128_ps(u0, 1);
3142 __m128 res1 = _mm256_extractf128_ps(u1, 0);
3143 __m128 res5 = _mm256_extractf128_ps(u1, 1);
3144 __m128 res2 = _mm256_extractf128_ps(u2, 0);
3145 __m128 res6 = _mm256_extractf128_ps(u2, 1);
3146 __m128 res3 = _mm256_extractf128_ps(u3, 0);
3147 __m128 res7 = _mm256_extractf128_ps(u3, 1);
3151 res0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), res0);
3152 _mm_storeu_ps(out[0] + 4 * i, res0);
3153 res1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), res1);
3154 _mm_storeu_ps(out[1] + 4 * i, res1);
3155 res2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), res2);
3156 _mm_storeu_ps(out[2] + 4 * i, res2);
3157 res3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), res3);
3158 _mm_storeu_ps(out[3] + 4 * i, res3);
3159 res4 = _mm_add_ps(_mm_loadu_ps(out[4] + 4 * i), res4);
3160 _mm_storeu_ps(out[4] + 4 * i, res4);
3161 res5 = _mm_add_ps(_mm_loadu_ps(out[5] + 4 * i), res5);
3162 _mm_storeu_ps(out[5] + 4 * i, res5);
3163 res6 = _mm_add_ps(_mm_loadu_ps(out[6] + 4 * i), res6);
3164 _mm_storeu_ps(out[6] + 4 * i, res6);
3165 res7 = _mm_add_ps(_mm_loadu_ps(out[7] + 4 * i), res7);
3166 _mm_storeu_ps(out[7] + 4 * i, res7);
3170 _mm_storeu_ps(out[0] + 4 * i, res0);
3171 _mm_storeu_ps(out[1] + 4 * i, res1);
3172 _mm_storeu_ps(out[2] + 4 * i, res2);
3173 _mm_storeu_ps(out[3] + 4 * i, res3);
3174 _mm_storeu_ps(out[4] + 4 * i, res4);
3175 _mm_storeu_ps(out[5] + 4 * i, res5);
3176 _mm_storeu_ps(out[6] + 4 * i, res6);
3177 _mm_storeu_ps(out[7] + 4 * i, res7);
3182 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3183 for (
unsigned int v = 0; v < 8; ++v)
3184 out[v][i] += in[i][v];
3186 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3187 for (
unsigned int v = 0; v < 8; ++v)
3188 out[v][i] = in[i][v];
3193# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__SSE2__)
3236 data = _mm_set1_pd(x);
3247 return *(
reinterpret_cast<double *
>(&
data) + comp);
3254 const double &
operator[](
const unsigned int comp)
const
3257 return *(
reinterpret_cast<const double *
>(&
data) + comp);
3267# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3282# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3297# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3312# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3327 load(
const double *ptr)
3329 data = _mm_loadu_pd(ptr);
3340 store(
double *ptr)
const
3342 _mm_storeu_pd(ptr,
data);
3352 Assert(
reinterpret_cast<std::size_t
>(ptr) % 16 == 0,
3354 _mm_stream_pd(ptr,
data);
3371 gather(
const double *base_ptr,
const unsigned int *offsets)
3373 for (
unsigned int i = 0; i < 2; ++i)
3374 *(
reinterpret_cast<double *
>(&
data) + i) = base_ptr[offsets[i]];
3391 scatter(
const unsigned int *offsets,
double *base_ptr)
const
3393 for (
unsigned int i = 0; i < 2; ++i)
3394 base_ptr[offsets[i]] = *(
reinterpret_cast<const double *
>(&
data) + i);
3430 __m128d mask = _mm_set1_pd(-0.);
3432 res.
data = _mm_andnot_pd(mask,
data);
3463 template <
typename Number2, std::
size_t w
idth2>
3466 template <
typename Number2, std::
size_t w
idth2>
3469 template <
typename Number2, std::
size_t w
idth2>
3473 template <
typename Number2, std::
size_t w
idth2>
3488 const unsigned int * offsets,
3491 const unsigned int n_chunks = n_entries / 2;
3492 for (
unsigned int i = 0; i < n_chunks; ++i)
3494 __m128d u0 = _mm_loadu_pd(in + 2 * i + offsets[0]);
3495 __m128d u1 = _mm_loadu_pd(in + 2 * i + offsets[1]);
3496 out[2 * i + 0].
data = _mm_unpacklo_pd(u0, u1);
3497 out[2 * i + 1].
data = _mm_unpackhi_pd(u0, u1);
3501 for (
unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3502 for (
unsigned int v = 0; v < 2; ++v)
3503 out[i][v] = in[offsets[v] + i];
3514 const std::array<double *, 2> &in,
3519 const unsigned int n_chunks = n_entries / 2;
3520 for (
unsigned int i = 0; i < n_chunks; ++i)
3522 __m128d u0 = _mm_loadu_pd(in[0] + 2 * i);
3523 __m128d u1 = _mm_loadu_pd(in[1] + 2 * i);
3524 out[2 * i + 0].
data = _mm_unpacklo_pd(u0, u1);
3525 out[2 * i + 1].
data = _mm_unpackhi_pd(u0, u1);
3528 for (
unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3529 for (
unsigned int v = 0; v < 2; ++v)
3530 out[i][v] = in[v][i];
3541 const unsigned int n_entries,
3543 const unsigned int * offsets,
3546 const unsigned int n_chunks = n_entries / 2;
3549 for (
unsigned int i = 0; i < n_chunks; ++i)
3551 __m128d u0 = in[2 * i + 0].
data;
3552 __m128d u1 = in[2 * i + 1].
data;
3553 __m128d res0 = _mm_unpacklo_pd(u0, u1);
3554 __m128d res1 = _mm_unpackhi_pd(u0, u1);
3555 _mm_storeu_pd(out + 2 * i + offsets[0],
3556 _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[0]),
3558 _mm_storeu_pd(out + 2 * i + offsets[1],
3559 _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[1]),
3563 for (
unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3564 for (
unsigned int v = 0; v < 2; ++v)
3565 out[offsets[v] + i] += in[i][v];
3569 for (
unsigned int i = 0; i < n_chunks; ++i)
3571 __m128d u0 = in[2 * i + 0].
data;
3572 __m128d u1 = in[2 * i + 1].
data;
3573 __m128d res0 = _mm_unpacklo_pd(u0, u1);
3574 __m128d res1 = _mm_unpackhi_pd(u0, u1);
3575 _mm_storeu_pd(out + 2 * i + offsets[0], res0);
3576 _mm_storeu_pd(out + 2 * i + offsets[1], res1);
3579 for (
unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3580 for (
unsigned int v = 0; v < 2; ++v)
3581 out[offsets[v] + i] = in[i][v];
3593 const unsigned int n_entries,
3595 std::array<double *, 2> & out)
3599 const unsigned int n_chunks = n_entries / 2;
3602 for (
unsigned int i = 0; i < n_chunks; ++i)
3604 __m128d u0 = in[2 * i + 0].
data;
3605 __m128d u1 = in[2 * i + 1].
data;
3606 __m128d res0 = _mm_unpacklo_pd(u0, u1);
3607 __m128d res1 = _mm_unpackhi_pd(u0, u1);
3608 _mm_storeu_pd(out[0] + 2 * i,
3609 _mm_add_pd(_mm_loadu_pd(out[0] + 2 * i), res0));
3610 _mm_storeu_pd(out[1] + 2 * i,
3611 _mm_add_pd(_mm_loadu_pd(out[1] + 2 * i), res1));
3614 for (
unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3615 for (
unsigned int v = 0; v < 2; ++v)
3616 out[v][i] += in[i][v];
3620 for (
unsigned int i = 0; i < n_chunks; ++i)
3622 __m128d u0 = in[2 * i + 0].
data;
3623 __m128d u1 = in[2 * i + 1].
data;
3624 __m128d res0 = _mm_unpacklo_pd(u0, u1);
3625 __m128d res1 = _mm_unpackhi_pd(u0, u1);
3626 _mm_storeu_pd(out[0] + 2 * i, res0);
3627 _mm_storeu_pd(out[1] + 2 * i, res1);
3630 for (
unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3631 for (
unsigned int v = 0; v < 2; ++v)
3632 out[v][i] = in[i][v];
3680 data = _mm_set1_ps(x);
3691 return *(
reinterpret_cast<float *
>(&
data) + comp);
3698 const float &
operator[](
const unsigned int comp)
const
3701 return *(
reinterpret_cast<const float *
>(&
data) + comp);
3711# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3726# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3741# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3756# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3771 load(
const float *ptr)
3773 data = _mm_loadu_ps(ptr);
3784 store(
float *ptr)
const
3786 _mm_storeu_ps(ptr,
data);
3796 Assert(
reinterpret_cast<std::size_t
>(ptr) % 16 == 0,
3798 _mm_stream_ps(ptr,
data);
3815 gather(
const float *base_ptr,
const unsigned int *offsets)
3817 for (
unsigned int i = 0; i < 4; ++i)
3818 *(
reinterpret_cast<float *
>(&
data) + i) = base_ptr[offsets[i]];
3835 scatter(
const unsigned int *offsets,
float *base_ptr)
const
3837 for (
unsigned int i = 0; i < 4; ++i)
3838 base_ptr[offsets[i]] = *(
reinterpret_cast<const float *
>(&
data) + i);
3873 __m128 mask = _mm_set1_ps(-0.f);
3875 res.
data = _mm_andnot_ps(mask,
data);
3906 template <
typename Number2, std::
size_t w
idth2>
3909 template <
typename Number2, std::
size_t w
idth2>
3912 template <
typename Number2, std::
size_t w
idth2>
3916 template <
typename Number2, std::
size_t w
idth2>
3931 const unsigned int * offsets,
3934 const unsigned int n_chunks = n_entries / 4;
3935 for (
unsigned int i = 0; i < n_chunks; ++i)
3937 __m128 u0 = _mm_loadu_ps(in + 4 * i + offsets[0]);
3938 __m128 u1 = _mm_loadu_ps(in + 4 * i + offsets[1]);
3939 __m128 u2 = _mm_loadu_ps(in + 4 * i + offsets[2]);
3940 __m128 u3 = _mm_loadu_ps(in + 4 * i + offsets[3]);
3941 __m128
v0 = _mm_shuffle_ps(u0, u1, 0x44);
3942 __m128
v1 = _mm_shuffle_ps(u0, u1, 0xee);
3943 __m128 v2 = _mm_shuffle_ps(u2, u3, 0x44);
3944 __m128 v3 = _mm_shuffle_ps(u2, u3, 0xee);
3945 out[4 * i + 0].
data = _mm_shuffle_ps(
v0, v2, 0x88);
3946 out[4 * i + 1].
data = _mm_shuffle_ps(
v0, v2, 0xdd);
3947 out[4 * i + 2].
data = _mm_shuffle_ps(
v1, v3, 0x88);
3948 out[4 * i + 3].
data = _mm_shuffle_ps(
v1, v3, 0xdd);
3952 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3953 for (
unsigned int v = 0; v < 4; ++v)
3954 out[i][v] = in[offsets[v] + i];
3965 const std::array<float *, 4> &in,
3970 const unsigned int n_chunks = n_entries / 4;
3971 for (
unsigned int i = 0; i < n_chunks; ++i)
3973 __m128 u0 = _mm_loadu_ps(in[0] + 4 * i);
3974 __m128 u1 = _mm_loadu_ps(in[1] + 4 * i);
3975 __m128 u2 = _mm_loadu_ps(in[2] + 4 * i);
3976 __m128 u3 = _mm_loadu_ps(in[3] + 4 * i);
3977 __m128
v0 = _mm_shuffle_ps(u0, u1, 0x44);
3978 __m128
v1 = _mm_shuffle_ps(u0, u1, 0xee);
3979 __m128 v2 = _mm_shuffle_ps(u2, u3, 0x44);
3980 __m128 v3 = _mm_shuffle_ps(u2, u3, 0xee);
3981 out[4 * i + 0].
data = _mm_shuffle_ps(
v0, v2, 0x88);
3982 out[4 * i + 1].
data = _mm_shuffle_ps(
v0, v2, 0xdd);
3983 out[4 * i + 2].
data = _mm_shuffle_ps(
v1, v3, 0x88);
3984 out[4 * i + 3].
data = _mm_shuffle_ps(
v1, v3, 0xdd);
3987 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3988 for (
unsigned int v = 0; v < 4; ++v)
3989 out[i][v] = in[v][i];
4000 const unsigned int n_entries,
4002 const unsigned int * offsets,
4005 const unsigned int n_chunks = n_entries / 4;
4006 for (
unsigned int i = 0; i < n_chunks; ++i)
4008 __m128 u0 = in[4 * i + 0].
data;
4009 __m128 u1 = in[4 * i + 1].
data;
4010 __m128 u2 = in[4 * i + 2].
data;
4011 __m128 u3 = in[4 * i + 3].
data;
4012 __m128 t0 = _mm_shuffle_ps(u0, u1, 0x44);
4013 __m128 t1 = _mm_shuffle_ps(u0, u1, 0xee);
4014 __m128 t2 = _mm_shuffle_ps(u2, u3, 0x44);
4015 __m128 t3 = _mm_shuffle_ps(u2, u3, 0xee);
4016 u0 = _mm_shuffle_ps(t0, t2, 0x88);
4017 u1 = _mm_shuffle_ps(t0, t2, 0xdd);
4018 u2 = _mm_shuffle_ps(t1, t3, 0x88);
4019 u3 = _mm_shuffle_ps(t1, t3, 0xdd);
4026 u0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), u0);
4027 _mm_storeu_ps(out + 4 * i + offsets[0], u0);
4028 u1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), u1);
4029 _mm_storeu_ps(out + 4 * i + offsets[1], u1);
4030 u2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), u2);
4031 _mm_storeu_ps(out + 4 * i + offsets[2], u2);
4032 u3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), u3);
4033 _mm_storeu_ps(out + 4 * i + offsets[3], u3);
4037 _mm_storeu_ps(out + 4 * i + offsets[0], u0);
4038 _mm_storeu_ps(out + 4 * i + offsets[1], u1);
4039 _mm_storeu_ps(out + 4 * i + offsets[2], u2);
4040 _mm_storeu_ps(out + 4 * i + offsets[3], u3);
4046 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4047 for (
unsigned int v = 0; v < 4; ++v)
4048 out[offsets[v] + i] += in[i][v];
4050 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4051 for (
unsigned int v = 0; v < 4; ++v)
4052 out[offsets[v] + i] = in[i][v];
4063 const unsigned int n_entries,
4065 std::array<float *, 4> & out)
4069 const unsigned int n_chunks = n_entries / 4;
4070 for (
unsigned int i = 0; i < n_chunks; ++i)
4072 __m128 u0 = in[4 * i + 0].
data;
4073 __m128 u1 = in[4 * i + 1].
data;
4074 __m128 u2 = in[4 * i + 2].
data;
4075 __m128 u3 = in[4 * i + 3].
data;
4076 __m128 t0 = _mm_shuffle_ps(u0, u1, 0x44);
4077 __m128 t1 = _mm_shuffle_ps(u0, u1, 0xee);
4078 __m128 t2 = _mm_shuffle_ps(u2, u3, 0x44);
4079 __m128 t3 = _mm_shuffle_ps(u2, u3, 0xee);
4080 u0 = _mm_shuffle_ps(t0, t2, 0x88);
4081 u1 = _mm_shuffle_ps(t0, t2, 0xdd);
4082 u2 = _mm_shuffle_ps(t1, t3, 0x88);
4083 u3 = _mm_shuffle_ps(t1, t3, 0xdd);
4087 u0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), u0);
4088 _mm_storeu_ps(out[0] + 4 * i, u0);
4089 u1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), u1);
4090 _mm_storeu_ps(out[1] + 4 * i, u1);
4091 u2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), u2);
4092 _mm_storeu_ps(out[2] + 4 * i, u2);
4093 u3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), u3);
4094 _mm_storeu_ps(out[3] + 4 * i, u3);
4098 _mm_storeu_ps(out[0] + 4 * i, u0);
4099 _mm_storeu_ps(out[1] + 4 * i, u1);
4100 _mm_storeu_ps(out[2] + 4 * i, u2);
4101 _mm_storeu_ps(out[3] + 4 * i, u3);
4106 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4107 for (
unsigned int v = 0; v < 4; ++v)
4108 out[v][i] += in[i][v];
4110 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4111 for (
unsigned int v = 0; v < 4; ++v)
4112 out[v][i] = in[i][v];
4119# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__ALTIVEC__) && \
4160 data = vec_splats(x);
4176 return *(
reinterpret_cast<double *
>(&
data) + comp);
4183 const double &
operator[](
const unsigned int comp)
const
4186 return *(
reinterpret_cast<const double *
>(&
data) + comp);
4239 load(
const double *ptr)
4241 data = vec_vsx_ld(0, ptr);
4250 store(
double *ptr)
const
4252 vec_vsx_st(
data, 0, ptr);
4268 gather(
const double *base_ptr,
const unsigned int *offsets)
4270 for (
unsigned int i = 0; i < 2; ++i)
4271 *(
reinterpret_cast<double *
>(&
data) + i) = base_ptr[offsets[i]];
4278 scatter(
const unsigned int *offsets,
double *base_ptr)
const
4280 for (
unsigned int i = 0; i < 2; ++i)
4281 base_ptr[offsets[i]] = *(
reinterpret_cast<const double *
>(&
data) + i);
4289 __vector
double data;
4345 template <
typename Number2, std::
size_t w
idth2>
4348 template <
typename Number2, std::
size_t w
idth2>
4351 template <
typename Number2, std::
size_t w
idth2>
4355 template <
typename Number2, std::
size_t w
idth2>
4401 data = vec_splats(x);
4417 return *(
reinterpret_cast<float *
>(&
data) + comp);
4424 const float &
operator[](
const unsigned int comp)
const
4427 return *(
reinterpret_cast<const float *
>(&
data) + comp);
4480 load(
const float *ptr)
4482 data = vec_vsx_ld(0, ptr);
4491 store(
float *ptr)
const
4493 vec_vsx_st(
data, 0, ptr);
4509 gather(
const float *base_ptr,
const unsigned int *offsets)
4511 for (
unsigned int i = 0; i < 4; ++i)
4512 *(
reinterpret_cast<float *
>(&
data) + i) = base_ptr[offsets[i]];
4519 scatter(
const unsigned int *offsets,
float *base_ptr)
const
4521 for (
unsigned int i = 0; i < 4; ++i)
4522 base_ptr[offsets[i]] = *(
reinterpret_cast<const float *
>(&
data) + i);
4530 __vector
float data;
4586 template <
typename Number2, std::
size_t w
idth2>
4589 template <
typename Number2, std::
size_t w
idth2>
4592 template <
typename Number2, std::
size_t w
idth2>
4596 template <
typename Number2, std::
size_t w
idth2>
4618template <
typename Number, std::
size_t w
idth>
4623 for (
unsigned int i = 0; i < VectorizedArray<Number, width>::size(); ++i)
4624 if (lhs[i] != rhs[i])
4636template <
typename Number, std::
size_t w
idth>
4650template <
typename Number, std::
size_t w
idth>
4664template <
typename Number, std::
size_t w
idth>
4678template <
typename Number, std::
size_t w
idth>
4693template <
typename Number, std::
size_t w
idth>
4709template <std::
size_t w
idth>
4723template <
typename Number, std::
size_t w
idth>
4738template <std::
size_t w
idth>
4751template <
typename Number, std::
size_t w
idth>
4767template <std::
size_t w
idth>
4781template <
typename Number, std::
size_t w
idth>
4797template <std::
size_t w
idth>
4811template <
typename Number, std::
size_t w
idth>
4827template <std::
size_t w
idth>
4841template <
typename Number, std::
size_t w
idth>
4856template <std::
size_t w
idth>
4869template <
typename Number, std::
size_t w
idth>
4885template <std::
size_t w
idth>
4899template <
typename Number, std::
size_t w
idth>
4915template <std::
size_t w
idth>
4928template <
typename Number, std::
size_t w
idth>
4940template <
typename Number, std::
size_t w
idth>
4954template <
typename Number, std::
size_t w
idth>
4955inline std::ostream &
4959 for (
unsigned int i = 0; i < n - 1; ++i)
4983#if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
5064template <SIMDComparison predicate,
typename Number>
5067 const Number &right,
5068 const Number &true_value,
5069 const Number &false_value)
5075 mask = (left == right);
5078 mask = (left != right);
5081 mask = (left < right);
5084 mask = (left <= right);
5087 mask = (left > right);
5090 mask = (left >= right);
5094 return mask ? true_value : false_value;
5102template <SIMDComparison predicate,
typename Number>
5110 result.
data = compare_and_apply_mask<predicate, Number>(left.
data,
5120# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && defined(__AVX512F__)
5122template <SIMDComparison predicate>
5129 const __mmask16 mask =
5130 _mm512_cmp_ps_mask(left.
data, right.
data,
static_cast<int>(predicate));
5132 result.
data = _mm512_mask_mov_ps(false_values.
data, mask, true_values.
data);
5138template <SIMDComparison predicate>
5145 const __mmask16 mask =
5146 _mm512_cmp_pd_mask(left.
data, right.
data,
static_cast<int>(predicate));
5148 result.
data = _mm512_mask_mov_pd(false_values.
data, mask, true_values.
data);
5154# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
5156template <SIMDComparison predicate>
5164 _mm256_cmp_ps(left.
data, right.
data,
static_cast<int>(predicate));
5167 result.
data = _mm256_or_ps(_mm256_and_ps(mask, true_values.
data),
5168 _mm256_andnot_ps(mask, false_values.
data));
5173template <SIMDComparison predicate>
5181 _mm256_cmp_pd(left.
data, right.
data,
static_cast<int>(predicate));
5184 result.
data = _mm256_or_pd(_mm256_and_pd(mask, true_values.
data),
5185 _mm256_andnot_pd(mask, false_values.
data));
5191# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__SSE2__)
5193template <SIMDComparison predicate>
5204 mask = _mm_cmpeq_ps(left.
data, right.
data);
5207 mask = _mm_cmpneq_ps(left.
data, right.
data);
5210 mask = _mm_cmplt_ps(left.
data, right.
data);
5213 mask = _mm_cmple_ps(left.
data, right.
data);
5216 mask = _mm_cmpgt_ps(left.
data, right.
data);
5219 mask = _mm_cmpge_ps(left.
data, right.
data);
5224 result.
data = _mm_or_ps(_mm_and_ps(mask, true_values.
data),
5225 _mm_andnot_ps(mask, false_values.
data));
5231template <SIMDComparison predicate>
5242 mask = _mm_cmpeq_pd(left.
data, right.
data);
5245 mask = _mm_cmpneq_pd(left.
data, right.
data);
5248 mask = _mm_cmplt_pd(left.
data, right.
data);
5251 mask = _mm_cmple_pd(left.
data, right.
data);
5254 mask = _mm_cmpgt_pd(left.
data, right.
data);
5257 mask = _mm_cmpge_pd(left.
data, right.
data);
5262 result.
data = _mm_or_pd(_mm_and_pd(mask, true_values.
data),
5263 _mm_andnot_pd(mask, false_values.
data));
5289 template <
typename Number, std::
size_t w
idth>
5290 inline ::VectorizedArray<Number, width>
5291 sin(const ::VectorizedArray<Number, width> &x)
5299 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5316 template <
typename Number, std::
size_t w
idth>
5317 inline ::VectorizedArray<Number, width>
5318 cos(const ::VectorizedArray<Number, width> &x)
5321 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5338 template <
typename Number, std::
size_t w
idth>
5339 inline ::VectorizedArray<Number, width>
5340 tan(const ::VectorizedArray<Number, width> &x)
5343 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5360 template <
typename Number, std::
size_t w
idth>
5361 inline ::VectorizedArray<Number, width>
5362 exp(const ::VectorizedArray<Number, width> &x)
5365 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5382 template <
typename Number, std::
size_t w
idth>
5383 inline ::VectorizedArray<Number, width>
5384 log(const ::VectorizedArray<Number, width> &x)
5387 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5404 template <
typename Number, std::
size_t w
idth>
5405 inline ::VectorizedArray<Number, width>
5406 sqrt(const ::VectorizedArray<Number, width> &x)
5408 return x.get_sqrt();
5420 template <
typename Number, std::
size_t w
idth>
5421 inline ::VectorizedArray<Number, width>
5422 pow(const ::VectorizedArray<Number, width> &x,
const Number p)
5425 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5443 template <
typename Number, std::
size_t w
idth>
5444 inline ::VectorizedArray<Number, width>
5445 pow(const ::VectorizedArray<Number, width> &x,
5446 const ::VectorizedArray<Number, width> &p)
5449 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5466 template <
typename Number, std::
size_t w
idth>
5467 inline ::VectorizedArray<Number, width>
5468 abs(const ::VectorizedArray<Number, width> &x)
5482 template <
typename Number, std::
size_t w
idth>
5483 inline ::VectorizedArray<Number, width>
5484 max(const ::VectorizedArray<Number, width> &x,
5485 const ::VectorizedArray<Number, width> &y)
5487 return x.get_max(y);
5499 template <
typename Number, std::
size_t w
idth>
5500 inline ::VectorizedArray<Number, width>
5501 min(const ::VectorizedArray<Number, width> &x,
5502 const ::VectorizedArray<Number, width> &y)
5504 return x.get_min(y);
OutputOperator< VectorType > & operator<<(OutputOperator< VectorType > &out, unsigned int step)
VectorizedArrayIterator< const T > begin() const
VectorizedArrayIterator< const T > end() const
static constexpr std::size_t size()
VectorizedArrayIterator< T > end()
VectorizedArrayIterator< T > begin()
VectorizedArrayIterator< T > & operator+=(const std::size_t offset)
VectorizedArrayIterator< T > & operator=(const VectorizedArrayIterator< T > &other)=default
VectorizedArrayIterator< T > & operator--()
VectorizedArrayIterator< T > & operator++()
std::ptrdiff_t operator-(const VectorizedArrayIterator< T > &other) const
bool operator==(const VectorizedArrayIterator< T > &other) const
VectorizedArrayIterator(T &data, const std::size_t lane)
const T::value_type & operator*() const
std::enable_if<!std::is_same< U, constU >::value, typenameT::value_type >::type & operator*()
bool operator!=(const VectorizedArrayIterator< T > &other) const
VectorizedArrayIterator< T > operator+(const std::size_t &offset) const
VectorizedArray< Number, width > operator-(const VectorizedArray< Number, width > &u)
VectorizedArray< float, width > operator+(const VectorizedArray< float, width > &v, const double u)
VectorizedArray & operator/=(const VectorizedArray &vec)
void gather(const Number *base_ptr, const unsigned int *offsets)
void vectorized_load_and_transpose(const unsigned int n_entries, const Number *in, const unsigned int *offsets, VectorizedArray< Number, width > *out)
VectorizedArray< Number, width > operator+(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArrayType make_vectorized_array(const typename VectorizedArrayType::value_type &u)
VectorizedArray< Number, width > operator/(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArray get_abs() const
VectorizedArray< float, width > operator/(const VectorizedArray< float, width > &v, const double u)
VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &x, const ::VectorizedArray< Number, width > &y)
VectorizedArray< Number, width > log(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > operator*(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArray< Number, width > exp(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > operator-(const VectorizedArray< Number, width > &v, const Number &u)
Number & operator[](const unsigned int comp)
VectorizedArray< float, width > operator-(const double u, const VectorizedArray< float, width > &v)
VectorizedArray< Number, width > operator+(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > operator+(const VectorizedArray< Number, width > &u)
VectorizedArray()=default
bool operator==(const VectorizedArray< Number, width > &lhs, const VectorizedArray< Number, width > &rhs)
VectorizedArray< Number, width > tan(const ::VectorizedArray< Number, width > &x)
VectorizedArray(const Number scalar)
VectorizedArray< Number, width > operator-(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray< float, width > operator*(const VectorizedArray< float, width > &v, const double u)
VectorizedArray & operator*=(const VectorizedArray &vec)
VectorizedArray get_max(const VectorizedArray &other) const
const Number & operator[](const unsigned int comp) const
VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &x, const ::VectorizedArray< Number, width > &y)
VectorizedArray get_min(const VectorizedArray &other) const
VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &x, const Number p)
VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &x, const ::VectorizedArray< Number, width > &p)
VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &x)
VectorizedArray< float, width > operator-(const VectorizedArray< float, width > &v, const double u)
void store(Number *ptr) const
void scatter(const unsigned int *offsets, Number *base_ptr) const
VectorizedArray< Number, width > operator-(const Number &u, const VectorizedArray< Number, width > &v)
void load(const Number *ptr)
VectorizedArray< Number, width > operator*(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray & operator-=(const VectorizedArray &vec)
VectorizedArray< float, width > operator+(const double u, const VectorizedArray< float, width > &v)
VectorizedArray< Number, width > operator*(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray get_sqrt() const
VectorizedArray< Number, width > operator/(const Number &u, const VectorizedArray< Number, width > &v)
static const unsigned int n_array_elements
VectorizedArray & operator+=(const VectorizedArray &vec)
VectorizedArray & operator=(const Number scalar)
VectorizedArray< Number, width > make_vectorized_array(const Number &u)
VectorizedArray< Number, width > operator/(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > operator+(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > cos(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &x)
void streaming_store(Number *ptr) const
void vectorized_transpose_and_store(const bool add_into, const unsigned int n_entries, const VectorizedArray< Number, width > *in, const unsigned int *offsets, Number *out)
VectorizedArray< float, width > operator/(const double u, const VectorizedArray< float, width > &v)
VectorizedArray< float, width > operator*(const double u, const VectorizedArray< float, width > &v)
#define DEAL_II_ALWAYS_INLINE
#define DEAL_II_DEPRECATED
#define DEAL_II_NAMESPACE_OPEN
#define DEAL_II_NAMESPACE_CLOSE
__global__ void vec_add(Number *val, const Number a, const size_type N)
#define Assert(cond, exc)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcMessage(std::string arg1)
Expression fabs(const Expression &x)
::VectorizedArray< Number, width > log(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > exp(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > tan(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > cos(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &, const Number p)
::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)
typename T::value_type value_type
std::ptrdiff_t difference_type
random_access_iterator_tag iterator_category
void gather(VectorizedArray< Number, width > &out, const std::array< Number *, width > &ptrs, const unsigned int offset)
Number compare_and_apply_mask(const Number &left, const Number &right, const Number &true_value, const Number &false_value)