17#ifndef dealii_vectorization_h
18#define dealii_vectorization_h
44#if DEAL_II_VECTORIZATION_WIDTH_IN_BITS > 0
53# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && !defined(__AVX__)
55 "Mismatch in vectorization capabilities: AVX was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native."
57# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && !defined(__AVX512F__)
59 "Mismatch in vectorization capabilities: AVX-512F was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native."
64# elif defined(__ALTIVEC__)
73# include <x86intrin.h>
85template <
typename Number, std::
size_t w
idth>
119 "You are trying to compare iterators into different arrays."));
131 "You are trying to compare iterators into different arrays."));
145 const typename T::value_type &
157 template <
typename U = T>
158 typename std::enable_if<!std::is_same<U, const U>::value,
159 typename T::value_type>::type &
202 "You can't decrement an iterator that is already at the beginning of the range."));
223 return static_cast<std::ptrdiff_t
>(
lane) -
224 static_cast<ptrdiff_t
>(other.
lane);
250template <
typename T, std::
size_t w
idth>
262 template <
typename U>
265 auto i0 = this->
begin();
266 auto i1 = list.begin();
268 for (; i1 != list.end(); ++i0, ++i1)
273 "Initializer list exceeds size of this VectorizedArray object."));
278 for (; i0 != this->
end(); ++i0)
287 static constexpr std::size_t
419template <
typename Number, std::
size_t w
idth>
429 static_assert(width == 1,
430 "You specified an illegal width that is not supported.");
449 template <
typename U>
628 gather(
const Number *base_ptr,
const unsigned int *offsets)
630 data = base_ptr[offsets[0]];
647 scatter(
const unsigned int *offsets, Number *base_ptr)
const
649 base_ptr[offsets[0]] =
data;
713 template <
typename Number2, std::
size_t w
idth2>
716 template <
typename Number2, std::
size_t w
idth2>
719 template <
typename Number2, std::
size_t w
idth2>
723 template <
typename Number2, std::
size_t w
idth2>
743template <
typename Number,
761template <
typename VectorizedArrayType>
766 std::is_same<VectorizedArrayType,
768 VectorizedArrayType::size()>>::value,
769 "VectorizedArrayType is not a VectorizedArray.");
771 VectorizedArrayType result = u;
788template <
typename Number, std::
size_t w
idth>
791 const std::array<Number *, width> &ptrs,
792 const unsigned int offset)
794 for (
unsigned int v = 0; v < width; ++v)
795 out.
data[v] = ptrs[v][offset];
825template <
typename Number, std::
size_t w
idth>
829 const unsigned int * offsets,
832 for (
unsigned int i = 0; i < n_entries; ++i)
833 for (
unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
834 out[i][v] = in[offsets[v] + i];
849template <
typename Number, std::
size_t w
idth>
852 const std::array<Number *, width> &in,
855 for (
unsigned int i = 0; i < n_entries; ++i)
856 for (
unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
857 out[i][v] = in[v][i];
900template <
typename Number, std::
size_t w
idth>
903 const unsigned int n_entries,
905 const unsigned int * offsets,
909 for (
unsigned int i = 0; i < n_entries; ++i)
910 for (
unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
911 out[offsets[v] + i] += in[i][v];
913 for (
unsigned int i = 0; i < n_entries; ++i)
914 for (
unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
915 out[offsets[v] + i] = in[i][v];
930template <
typename Number, std::
size_t w
idth>
933 const unsigned int n_entries,
935 std::array<Number *, width> & out)
938 for (
unsigned int i = 0; i < n_entries; ++i)
939 for (
unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
940 out[v][i] += in[i][v];
942 for (
unsigned int i = 0; i < n_entries; ++i)
943 for (
unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
944 out[v][i] = in[i][v];
955# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && defined(__AVX512F__)
987 template <
typename U>
999 data = _mm512_set1_pd(x);
1011 return *(
reinterpret_cast<double *
>(&
data) + comp);
1022 return *(
reinterpret_cast<const double *
>(&
data) + comp);
1037# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1052# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1066# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1081# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1096 load(
const double *ptr)
1098 data = _mm512_loadu_pd(ptr);
1109 store(
double *ptr)
const
1111 _mm512_storeu_pd(ptr,
data);
1122 Assert(
reinterpret_cast<std::size_t
>(ptr) % 64 == 0,
1124 _mm512_stream_pd(ptr,
data);
1141 gather(
const double *base_ptr,
const unsigned int *offsets)
1146 const __m256 index_val =
1147 _mm256_loadu_ps(
reinterpret_cast<const float *
>(offsets));
1148 const __m256i
index = *
reinterpret_cast<const __m256i *
>(&index_val);
1154 __mmask8
mask = 0xFF;
1156 data = _mm512_mask_i32gather_pd(zero, mask, index, base_ptr, 8);
1173 scatter(
const unsigned int *offsets,
double *base_ptr)
const
1175 for (
unsigned int i = 0; i < 8; ++i)
1176 for (
unsigned int j = i + 1; j < 8; ++j)
1177 Assert(offsets[i] != offsets[j],
1178 ExcMessage(
"Result of scatter undefined if two offset elements"
1179 " point to the same position"));
1184 const __m256 index_val =
1185 _mm256_loadu_ps(
reinterpret_cast<const float *
>(offsets));
1186 const __m256i
index = *
reinterpret_cast<const __m256i *
>(&index_val);
1187 _mm512_i32scatter_pd(base_ptr, index,
data, 8);
1224 __m512d
mask = _mm512_set1_pd(-0.);
1226 res.
data =
reinterpret_cast<__m512d
>(
1227 _mm512_andnot_epi64(
reinterpret_cast<__m512i
>(mask),
1228 reinterpret_cast<__m512i
>(
data)));
1259 template <
typename Number2, std::
size_t w
idth2>
1262 template <
typename Number2, std::
size_t w
idth2>
1265 template <
typename Number2, std::
size_t w
idth2>
1269 template <
typename Number2, std::
size_t w
idth2>
1284 const unsigned int * offsets,
1292 const unsigned int n_chunks = n_entries / 4;
1293 for (
unsigned int i = 0; i < n_chunks; ++i)
1295 __m512d t0, t1, t2, t3 = {};
1297 t0 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[0] + 4 * i), 0);
1298 t0 = _mm512_insertf64x4(t0, _mm256_loadu_pd(in + offsets[2] + 4 * i), 1);
1299 t1 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[1] + 4 * i), 0);
1300 t1 = _mm512_insertf64x4(t1, _mm256_loadu_pd(in + offsets[3] + 4 * i), 1);
1301 t2 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[4] + 4 * i), 0);
1302 t2 = _mm512_insertf64x4(t2, _mm256_loadu_pd(in + offsets[6] + 4 * i), 1);
1303 t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[5] + 4 * i), 0);
1304 t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[7] + 4 * i), 1);
1306 __m512d
v0 = _mm512_shuffle_f64x2(t0, t2, 0x88);
1307 __m512d
v1 = _mm512_shuffle_f64x2(t0, t2, 0xdd);
1308 __m512d v2 = _mm512_shuffle_f64x2(t1, t3, 0x88);
1309 __m512d v3 = _mm512_shuffle_f64x2(t1, t3, 0xdd);
1310 out[4 * i + 0].
data = _mm512_unpacklo_pd(
v0, v2);
1311 out[4 * i + 1].
data = _mm512_unpackhi_pd(
v0, v2);
1312 out[4 * i + 2].
data = _mm512_unpacklo_pd(
v1, v3);
1313 out[4 * i + 3].
data = _mm512_unpackhi_pd(
v1, v3);
1316 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1317 out[i].
gather(in + i, offsets);
1328 const std::array<double *, 8> &in,
1331 const unsigned int n_chunks = n_entries / 4;
1332 for (
unsigned int i = 0; i < n_chunks; ++i)
1334 __m512d t0, t1, t2, t3 = {};
1336 t0 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[0] + 4 * i), 0);
1337 t0 = _mm512_insertf64x4(t0, _mm256_loadu_pd(in[2] + 4 * i), 1);
1338 t1 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[1] + 4 * i), 0);
1339 t1 = _mm512_insertf64x4(t1, _mm256_loadu_pd(in[3] + 4 * i), 1);
1340 t2 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[4] + 4 * i), 0);
1341 t2 = _mm512_insertf64x4(t2, _mm256_loadu_pd(in[6] + 4 * i), 1);
1342 t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[5] + 4 * i), 0);
1343 t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[7] + 4 * i), 1);
1345 __m512d
v0 = _mm512_shuffle_f64x2(t0, t2, 0x88);
1346 __m512d
v1 = _mm512_shuffle_f64x2(t0, t2, 0xdd);
1347 __m512d v2 = _mm512_shuffle_f64x2(t1, t3, 0x88);
1348 __m512d v3 = _mm512_shuffle_f64x2(t1, t3, 0xdd);
1349 out[4 * i + 0].
data = _mm512_unpacklo_pd(
v0, v2);
1350 out[4 * i + 1].
data = _mm512_unpackhi_pd(
v0, v2);
1351 out[4 * i + 2].
data = _mm512_unpacklo_pd(
v1, v3);
1352 out[4 * i + 3].
data = _mm512_unpackhi_pd(
v1, v3);
1355 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1367 const unsigned int n_entries,
1369 const unsigned int * offsets,
1374 const unsigned int n_chunks = n_entries / 4;
1375 __m512i mask1 = _mm512_set_epi64(0xd, 0xc, 0x5, 0x4, 0x9, 0x8, 0x1, 0x0);
1376 __m512i mask2 = _mm512_set_epi64(0xf, 0xe, 0x7, 0x6, 0xb, 0xa, 0x3, 0x2);
1377 for (
unsigned int i = 0; i < n_chunks; ++i)
1379 __m512d t0 = _mm512_unpacklo_pd(in[i * 4].data, in[i * 4 + 1].data);
1380 __m512d t1 = _mm512_unpackhi_pd(in[i * 4].data, in[i * 4 + 1].data);
1381 __m512d t2 = _mm512_unpacklo_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
1382 __m512d t3 = _mm512_unpackhi_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
1383 __m512d
v0 = _mm512_permutex2var_pd(t0, mask1, t2);
1384 __m512d
v1 = _mm512_permutex2var_pd(t0, mask2, t2);
1385 __m512d v2 = _mm512_permutex2var_pd(t1, mask1, t3);
1386 __m512d v3 = _mm512_permutex2var_pd(t1, mask2, t3);
1387 __m256d res0 = _mm512_extractf64x4_pd(
v0, 0);
1388 __m256d res4 = _mm512_extractf64x4_pd(
v0, 1);
1389 __m256d res1 = _mm512_extractf64x4_pd(v2, 0);
1390 __m256d res5 = _mm512_extractf64x4_pd(v2, 1);
1391 __m256d res2 = _mm512_extractf64x4_pd(
v1, 0);
1392 __m256d res6 = _mm512_extractf64x4_pd(
v1, 1);
1393 __m256d res3 = _mm512_extractf64x4_pd(v3, 0);
1394 __m256d res7 = _mm512_extractf64x4_pd(v3, 1);
1401 res0 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[0]), res0);
1402 _mm256_storeu_pd(out + 4 * i + offsets[0], res0);
1403 res1 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[1]), res1);
1404 _mm256_storeu_pd(out + 4 * i + offsets[1], res1);
1405 res2 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[2]), res2);
1406 _mm256_storeu_pd(out + 4 * i + offsets[2], res2);
1407 res3 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[3]), res3);
1408 _mm256_storeu_pd(out + 4 * i + offsets[3], res3);
1409 res4 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[4]), res4);
1410 _mm256_storeu_pd(out + 4 * i + offsets[4], res4);
1411 res5 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[5]), res5);
1412 _mm256_storeu_pd(out + 4 * i + offsets[5], res5);
1413 res6 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[6]), res6);
1414 _mm256_storeu_pd(out + 4 * i + offsets[6], res6);
1415 res7 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[7]), res7);
1416 _mm256_storeu_pd(out + 4 * i + offsets[7], res7);
1420 _mm256_storeu_pd(out + 4 * i + offsets[0], res0);
1421 _mm256_storeu_pd(out + 4 * i + offsets[1], res1);
1422 _mm256_storeu_pd(out + 4 * i + offsets[2], res2);
1423 _mm256_storeu_pd(out + 4 * i + offsets[3], res3);
1424 _mm256_storeu_pd(out + 4 * i + offsets[4], res4);
1425 _mm256_storeu_pd(out + 4 * i + offsets[5], res5);
1426 _mm256_storeu_pd(out + 4 * i + offsets[6], res6);
1427 _mm256_storeu_pd(out + 4 * i + offsets[7], res7);
1433 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1434 for (
unsigned int v = 0; v < 8; ++v)
1435 out[offsets[v] + i] += in[i][v];
1437 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1438 for (
unsigned int v = 0; v < 8; ++v)
1439 out[offsets[v] + i] = in[i][v];
1450 const unsigned int n_entries,
1452 std::array<double *, 8> & out)
1456 const unsigned int n_chunks = n_entries / 4;
1457 __m512i mask1 = _mm512_set_epi64(0xd, 0xc, 0x5, 0x4, 0x9, 0x8, 0x1, 0x0);
1458 __m512i mask2 = _mm512_set_epi64(0xf, 0xe, 0x7, 0x6, 0xb, 0xa, 0x3, 0x2);
1459 for (
unsigned int i = 0; i < n_chunks; ++i)
1461 __m512d t0 = _mm512_unpacklo_pd(in[i * 4].data, in[i * 4 + 1].data);
1462 __m512d t1 = _mm512_unpackhi_pd(in[i * 4].data, in[i * 4 + 1].data);
1463 __m512d t2 = _mm512_unpacklo_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
1464 __m512d t3 = _mm512_unpackhi_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
1465 __m512d
v0 = _mm512_permutex2var_pd(t0, mask1, t2);
1466 __m512d
v1 = _mm512_permutex2var_pd(t0, mask2, t2);
1467 __m512d v2 = _mm512_permutex2var_pd(t1, mask1, t3);
1468 __m512d v3 = _mm512_permutex2var_pd(t1, mask2, t3);
1469 __m256d res0 = _mm512_extractf64x4_pd(
v0, 0);
1470 __m256d res4 = _mm512_extractf64x4_pd(
v0, 1);
1471 __m256d res1 = _mm512_extractf64x4_pd(v2, 0);
1472 __m256d res5 = _mm512_extractf64x4_pd(v2, 1);
1473 __m256d res2 = _mm512_extractf64x4_pd(
v1, 0);
1474 __m256d res6 = _mm512_extractf64x4_pd(
v1, 1);
1475 __m256d res3 = _mm512_extractf64x4_pd(v3, 0);
1476 __m256d res7 = _mm512_extractf64x4_pd(v3, 1);
1480 res0 = _mm256_add_pd(_mm256_loadu_pd(out[0] + 4 * i), res0);
1481 _mm256_storeu_pd(out[0] + 4 * i, res0);
1482 res1 = _mm256_add_pd(_mm256_loadu_pd(out[1] + 4 * i), res1);
1483 _mm256_storeu_pd(out[1] + 4 * i, res1);
1484 res2 = _mm256_add_pd(_mm256_loadu_pd(out[2] + 4 * i), res2);
1485 _mm256_storeu_pd(out[2] + 4 * i, res2);
1486 res3 = _mm256_add_pd(_mm256_loadu_pd(out[3] + 4 * i), res3);
1487 _mm256_storeu_pd(out[3] + 4 * i, res3);
1488 res4 = _mm256_add_pd(_mm256_loadu_pd(out[4] + 4 * i), res4);
1489 _mm256_storeu_pd(out[4] + 4 * i, res4);
1490 res5 = _mm256_add_pd(_mm256_loadu_pd(out[5] + 4 * i), res5);
1491 _mm256_storeu_pd(out[5] + 4 * i, res5);
1492 res6 = _mm256_add_pd(_mm256_loadu_pd(out[6] + 4 * i), res6);
1493 _mm256_storeu_pd(out[6] + 4 * i, res6);
1494 res7 = _mm256_add_pd(_mm256_loadu_pd(out[7] + 4 * i), res7);
1495 _mm256_storeu_pd(out[7] + 4 * i, res7);
1499 _mm256_storeu_pd(out[0] + 4 * i, res0);
1500 _mm256_storeu_pd(out[1] + 4 * i, res1);
1501 _mm256_storeu_pd(out[2] + 4 * i, res2);
1502 _mm256_storeu_pd(out[3] + 4 * i, res3);
1503 _mm256_storeu_pd(out[4] + 4 * i, res4);
1504 _mm256_storeu_pd(out[5] + 4 * i, res5);
1505 _mm256_storeu_pd(out[6] + 4 * i, res6);
1506 _mm256_storeu_pd(out[7] + 4 * i, res7);
1511 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1512 for (
unsigned int v = 0; v < 8; ++v)
1513 out[v][i] += in[i][v];
1515 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1516 for (
unsigned int v = 0; v < 8; ++v)
1517 out[v][i] = in[i][v];
1552 template <
typename U>
1564 data = _mm512_set1_ps(x);
1576 return *(
reinterpret_cast<float *
>(&
data) + comp);
1587 return *(
reinterpret_cast<const float *
>(&
data) + comp);
1602# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1617# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1631# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1646# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1661 load(
const float *ptr)
1663 data = _mm512_loadu_ps(ptr);
1674 store(
float *ptr)
const
1676 _mm512_storeu_ps(ptr,
data);
1687 Assert(
reinterpret_cast<std::size_t
>(ptr) % 64 == 0,
1689 _mm512_stream_ps(ptr,
data);
1706 gather(
const float *base_ptr,
const unsigned int *offsets)
1711 const __m512 index_val =
1712 _mm512_loadu_ps(
reinterpret_cast<const float *
>(offsets));
1713 const __m512i
index = *
reinterpret_cast<const __m512i *
>(&index_val);
1719 __mmask16
mask = 0xFFFF;
1721 data = _mm512_mask_i32gather_ps(zero, mask, index, base_ptr, 4);
1738 scatter(
const unsigned int *offsets,
float *base_ptr)
const
1740 for (
unsigned int i = 0; i < 16; ++i)
1741 for (
unsigned int j = i + 1; j < 16; ++j)
1742 Assert(offsets[i] != offsets[j],
1743 ExcMessage(
"Result of scatter undefined if two offset elements"
1744 " point to the same position"));
1749 const __m512 index_val =
1750 _mm512_loadu_ps(
reinterpret_cast<const float *
>(offsets));
1751 const __m512i
index = *
reinterpret_cast<const __m512i *
>(&index_val);
1752 _mm512_i32scatter_ps(base_ptr, index,
data, 4);
1789 __m512
mask = _mm512_set1_ps(-0.f);
1791 res.
data =
reinterpret_cast<__m512
>(
1792 _mm512_andnot_epi32(
reinterpret_cast<__m512i
>(mask),
1793 reinterpret_cast<__m512i
>(
data)));
1824 template <
typename Number2, std::
size_t w
idth2>
1827 template <
typename Number2, std::
size_t w
idth2>
1830 template <
typename Number2, std::
size_t w
idth2>
1834 template <
typename Number2, std::
size_t w
idth2>
1849 const unsigned int * offsets,
1856 const unsigned int n_chunks = n_entries / 4;
1864 __m512 t0, t1, t2, t3;
1867 for (
unsigned int i = 0; i < n_chunks; ++i)
1869 t0 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[0] + 4 * i), 0);
1870 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[4] + 4 * i), 1);
1871 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[8] + 4 * i), 2);
1872 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[12] + 4 * i), 3);
1873 t1 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[1] + 4 * i), 0);
1874 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[5] + 4 * i), 1);
1875 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[9] + 4 * i), 2);
1876 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[13] + 4 * i), 3);
1877 t2 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[2] + 4 * i), 0);
1878 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[6] + 4 * i), 1);
1879 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[10] + 4 * i), 2);
1880 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[14] + 4 * i), 3);
1881 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[3] + 4 * i), 0);
1882 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[7] + 4 * i), 1);
1883 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[11] + 4 * i), 2);
1884 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[15] + 4 * i), 3);
1886 __m512
v0 = _mm512_shuffle_ps(t0, t1, 0x44);
1887 __m512
v1 = _mm512_shuffle_ps(t0, t1, 0xee);
1888 __m512 v2 = _mm512_shuffle_ps(t2, t3, 0x44);
1889 __m512 v3 = _mm512_shuffle_ps(t2, t3, 0xee);
1891 out[4 * i + 0].
data = _mm512_shuffle_ps(
v0, v2, 0x88);
1892 out[4 * i + 1].
data = _mm512_shuffle_ps(
v0, v2, 0xdd);
1893 out[4 * i + 2].
data = _mm512_shuffle_ps(
v1, v3, 0x88);
1894 out[4 * i + 3].
data = _mm512_shuffle_ps(
v1, v3, 0xdd);
1898 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1899 out[i].
gather(in + i, offsets);
1910 const std::array<float *, 16> &in,
1915 const unsigned int n_chunks = n_entries / 4;
1917 __m512 t0, t1, t2, t3;
1920 for (
unsigned int i = 0; i < n_chunks; ++i)
1922 t0 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[0] + 4 * i), 0);
1923 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[4] + 4 * i), 1);
1924 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[8] + 4 * i), 2);
1925 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[12] + 4 * i), 3);
1926 t1 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[1] + 4 * i), 0);
1927 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[5] + 4 * i), 1);
1928 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[9] + 4 * i), 2);
1929 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[13] + 4 * i), 3);
1930 t2 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[2] + 4 * i), 0);
1931 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[6] + 4 * i), 1);
1932 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[10] + 4 * i), 2);
1933 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[14] + 4 * i), 3);
1934 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[3] + 4 * i), 0);
1935 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[7] + 4 * i), 1);
1936 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[11] + 4 * i), 2);
1937 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[15] + 4 * i), 3);
1939 __m512
v0 = _mm512_shuffle_ps(t0, t1, 0x44);
1940 __m512
v1 = _mm512_shuffle_ps(t0, t1, 0xee);
1941 __m512 v2 = _mm512_shuffle_ps(t2, t3, 0x44);
1942 __m512 v3 = _mm512_shuffle_ps(t2, t3, 0xee);
1944 out[4 * i + 0].
data = _mm512_shuffle_ps(
v0, v2, 0x88);
1945 out[4 * i + 1].
data = _mm512_shuffle_ps(
v0, v2, 0xdd);
1946 out[4 * i + 2].
data = _mm512_shuffle_ps(
v1, v3, 0x88);
1947 out[4 * i + 3].
data = _mm512_shuffle_ps(
v1, v3, 0xdd);
1950 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1962 const unsigned int n_entries,
1964 const unsigned int * offsets,
1967 const unsigned int n_chunks = n_entries / 4;
1968 for (
unsigned int i = 0; i < n_chunks; ++i)
1970 __m512 t0 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0x44);
1971 __m512 t1 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0xee);
1973 _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0x44);
1975 _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0xee);
1976 __m512 u0 = _mm512_shuffle_ps(t0, t2, 0x88);
1977 __m512 u1 = _mm512_shuffle_ps(t0, t2, 0xdd);
1978 __m512 u2 = _mm512_shuffle_ps(t1, t3, 0x88);
1979 __m512 u3 = _mm512_shuffle_ps(t1, t3, 0xdd);
1981 __m128 res0 = _mm512_extractf32x4_ps(u0, 0);
1982 __m128 res4 = _mm512_extractf32x4_ps(u0, 1);
1983 __m128 res8 = _mm512_extractf32x4_ps(u0, 2);
1984 __m128 res12 = _mm512_extractf32x4_ps(u0, 3);
1985 __m128 res1 = _mm512_extractf32x4_ps(u1, 0);
1986 __m128 res5 = _mm512_extractf32x4_ps(u1, 1);
1987 __m128 res9 = _mm512_extractf32x4_ps(u1, 2);
1988 __m128 res13 = _mm512_extractf32x4_ps(u1, 3);
1989 __m128 res2 = _mm512_extractf32x4_ps(u2, 0);
1990 __m128 res6 = _mm512_extractf32x4_ps(u2, 1);
1991 __m128 res10 = _mm512_extractf32x4_ps(u2, 2);
1992 __m128 res14 = _mm512_extractf32x4_ps(u2, 3);
1993 __m128 res3 = _mm512_extractf32x4_ps(u3, 0);
1994 __m128 res7 = _mm512_extractf32x4_ps(u3, 1);
1995 __m128 res11 = _mm512_extractf32x4_ps(u3, 2);
1996 __m128 res15 = _mm512_extractf32x4_ps(u3, 3);
2003 res0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), res0);
2004 _mm_storeu_ps(out + 4 * i + offsets[0], res0);
2005 res1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), res1);
2006 _mm_storeu_ps(out + 4 * i + offsets[1], res1);
2007 res2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), res2);
2008 _mm_storeu_ps(out + 4 * i + offsets[2], res2);
2009 res3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), res3);
2010 _mm_storeu_ps(out + 4 * i + offsets[3], res3);
2011 res4 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[4]), res4);
2012 _mm_storeu_ps(out + 4 * i + offsets[4], res4);
2013 res5 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[5]), res5);
2014 _mm_storeu_ps(out + 4 * i + offsets[5], res5);
2015 res6 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[6]), res6);
2016 _mm_storeu_ps(out + 4 * i + offsets[6], res6);
2017 res7 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[7]), res7);
2018 _mm_storeu_ps(out + 4 * i + offsets[7], res7);
2019 res8 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[8]), res8);
2020 _mm_storeu_ps(out + 4 * i + offsets[8], res8);
2021 res9 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[9]), res9);
2022 _mm_storeu_ps(out + 4 * i + offsets[9], res9);
2023 res10 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[10]), res10);
2024 _mm_storeu_ps(out + 4 * i + offsets[10], res10);
2025 res11 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[11]), res11);
2026 _mm_storeu_ps(out + 4 * i + offsets[11], res11);
2027 res12 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[12]), res12);
2028 _mm_storeu_ps(out + 4 * i + offsets[12], res12);
2029 res13 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[13]), res13);
2030 _mm_storeu_ps(out + 4 * i + offsets[13], res13);
2031 res14 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[14]), res14);
2032 _mm_storeu_ps(out + 4 * i + offsets[14], res14);
2033 res15 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[15]), res15);
2034 _mm_storeu_ps(out + 4 * i + offsets[15], res15);
2038 _mm_storeu_ps(out + 4 * i + offsets[0], res0);
2039 _mm_storeu_ps(out + 4 * i + offsets[1], res1);
2040 _mm_storeu_ps(out + 4 * i + offsets[2], res2);
2041 _mm_storeu_ps(out + 4 * i + offsets[3], res3);
2042 _mm_storeu_ps(out + 4 * i + offsets[4], res4);
2043 _mm_storeu_ps(out + 4 * i + offsets[5], res5);
2044 _mm_storeu_ps(out + 4 * i + offsets[6], res6);
2045 _mm_storeu_ps(out + 4 * i + offsets[7], res7);
2046 _mm_storeu_ps(out + 4 * i + offsets[8], res8);
2047 _mm_storeu_ps(out + 4 * i + offsets[9], res9);
2048 _mm_storeu_ps(out + 4 * i + offsets[10], res10);
2049 _mm_storeu_ps(out + 4 * i + offsets[11], res11);
2050 _mm_storeu_ps(out + 4 * i + offsets[12], res12);
2051 _mm_storeu_ps(out + 4 * i + offsets[13], res13);
2052 _mm_storeu_ps(out + 4 * i + offsets[14], res14);
2053 _mm_storeu_ps(out + 4 * i + offsets[15], res15);
2059 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2060 for (
unsigned int v = 0; v < 16; ++v)
2061 out[offsets[v] + i] += in[i][v];
2063 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2064 for (
unsigned int v = 0; v < 16; ++v)
2065 out[offsets[v] + i] = in[i][v];
2076 const unsigned int n_entries,
2078 std::array<float *, 16> & out)
2082 const unsigned int n_chunks = n_entries / 4;
2083 for (
unsigned int i = 0; i < n_chunks; ++i)
2085 __m512 t0 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0x44);
2086 __m512 t1 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0xee);
2088 _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0x44);
2090 _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0xee);
2091 __m512 u0 = _mm512_shuffle_ps(t0, t2, 0x88);
2092 __m512 u1 = _mm512_shuffle_ps(t0, t2, 0xdd);
2093 __m512 u2 = _mm512_shuffle_ps(t1, t3, 0x88);
2094 __m512 u3 = _mm512_shuffle_ps(t1, t3, 0xdd);
2096 __m128 res0 = _mm512_extractf32x4_ps(u0, 0);
2097 __m128 res4 = _mm512_extractf32x4_ps(u0, 1);
2098 __m128 res8 = _mm512_extractf32x4_ps(u0, 2);
2099 __m128 res12 = _mm512_extractf32x4_ps(u0, 3);
2100 __m128 res1 = _mm512_extractf32x4_ps(u1, 0);
2101 __m128 res5 = _mm512_extractf32x4_ps(u1, 1);
2102 __m128 res9 = _mm512_extractf32x4_ps(u1, 2);
2103 __m128 res13 = _mm512_extractf32x4_ps(u1, 3);
2104 __m128 res2 = _mm512_extractf32x4_ps(u2, 0);
2105 __m128 res6 = _mm512_extractf32x4_ps(u2, 1);
2106 __m128 res10 = _mm512_extractf32x4_ps(u2, 2);
2107 __m128 res14 = _mm512_extractf32x4_ps(u2, 3);
2108 __m128 res3 = _mm512_extractf32x4_ps(u3, 0);
2109 __m128 res7 = _mm512_extractf32x4_ps(u3, 1);
2110 __m128 res11 = _mm512_extractf32x4_ps(u3, 2);
2111 __m128 res15 = _mm512_extractf32x4_ps(u3, 3);
2115 res0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), res0);
2116 _mm_storeu_ps(out[0] + 4 * i, res0);
2117 res1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), res1);
2118 _mm_storeu_ps(out[1] + 4 * i, res1);
2119 res2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), res2);
2120 _mm_storeu_ps(out[2] + 4 * i, res2);
2121 res3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), res3);
2122 _mm_storeu_ps(out[3] + 4 * i, res3);
2123 res4 = _mm_add_ps(_mm_loadu_ps(out[4] + 4 * i), res4);
2124 _mm_storeu_ps(out[4] + 4 * i, res4);
2125 res5 = _mm_add_ps(_mm_loadu_ps(out[5] + 4 * i), res5);
2126 _mm_storeu_ps(out[5] + 4 * i, res5);
2127 res6 = _mm_add_ps(_mm_loadu_ps(out[6] + 4 * i), res6);
2128 _mm_storeu_ps(out[6] + 4 * i, res6);
2129 res7 = _mm_add_ps(_mm_loadu_ps(out[7] + 4 * i), res7);
2130 _mm_storeu_ps(out[7] + 4 * i, res7);
2131 res8 = _mm_add_ps(_mm_loadu_ps(out[8] + 4 * i), res8);
2132 _mm_storeu_ps(out[8] + 4 * i, res8);
2133 res9 = _mm_add_ps(_mm_loadu_ps(out[9] + 4 * i), res9);
2134 _mm_storeu_ps(out[9] + 4 * i, res9);
2135 res10 = _mm_add_ps(_mm_loadu_ps(out[10] + 4 * i), res10);
2136 _mm_storeu_ps(out[10] + 4 * i, res10);
2137 res11 = _mm_add_ps(_mm_loadu_ps(out[11] + 4 * i), res11);
2138 _mm_storeu_ps(out[11] + 4 * i, res11);
2139 res12 = _mm_add_ps(_mm_loadu_ps(out[12] + 4 * i), res12);
2140 _mm_storeu_ps(out[12] + 4 * i, res12);
2141 res13 = _mm_add_ps(_mm_loadu_ps(out[13] + 4 * i), res13);
2142 _mm_storeu_ps(out[13] + 4 * i, res13);
2143 res14 = _mm_add_ps(_mm_loadu_ps(out[14] + 4 * i), res14);
2144 _mm_storeu_ps(out[14] + 4 * i, res14);
2145 res15 = _mm_add_ps(_mm_loadu_ps(out[15] + 4 * i), res15);
2146 _mm_storeu_ps(out[15] + 4 * i, res15);
2150 _mm_storeu_ps(out[0] + 4 * i, res0);
2151 _mm_storeu_ps(out[1] + 4 * i, res1);
2152 _mm_storeu_ps(out[2] + 4 * i, res2);
2153 _mm_storeu_ps(out[3] + 4 * i, res3);
2154 _mm_storeu_ps(out[4] + 4 * i, res4);
2155 _mm_storeu_ps(out[5] + 4 * i, res5);
2156 _mm_storeu_ps(out[6] + 4 * i, res6);
2157 _mm_storeu_ps(out[7] + 4 * i, res7);
2158 _mm_storeu_ps(out[8] + 4 * i, res8);
2159 _mm_storeu_ps(out[9] + 4 * i, res9);
2160 _mm_storeu_ps(out[10] + 4 * i, res10);
2161 _mm_storeu_ps(out[11] + 4 * i, res11);
2162 _mm_storeu_ps(out[12] + 4 * i, res12);
2163 _mm_storeu_ps(out[13] + 4 * i, res13);
2164 _mm_storeu_ps(out[14] + 4 * i, res14);
2165 _mm_storeu_ps(out[15] + 4 * i, res15);
2170 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2171 for (
unsigned int v = 0; v < 16; ++v)
2172 out[v][i] += in[i][v];
2174 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2175 for (
unsigned int v = 0; v < 16; ++v)
2176 out[v][i] = in[i][v];
2181# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
2213 template <
typename U>
2225 data = _mm256_set1_pd(x);
2237 return *(
reinterpret_cast<double *
>(&
data) + comp);
2248 return *(
reinterpret_cast<const double *
>(&
data) + comp);
2263# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2278# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2292# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2307# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2322 load(
const double *ptr)
2324 data = _mm256_loadu_pd(ptr);
2335 store(
double *ptr)
const
2337 _mm256_storeu_pd(ptr,
data);
2348 Assert(
reinterpret_cast<std::size_t
>(ptr) % 32 == 0,
2350 _mm256_stream_pd(ptr,
data);
2367 gather(
const double *base_ptr,
const unsigned int *offsets)
2373 const __m128 index_val =
2374 _mm_loadu_ps(
reinterpret_cast<const float *
>(offsets));
2375 const __m128i
index = *
reinterpret_cast<const __m128i *
>(&index_val);
2380 __m256d zero = _mm256_setzero_pd();
2381 __m256d
mask = _mm256_cmp_pd(zero, zero, _CMP_EQ_OQ);
2383 data = _mm256_mask_i32gather_pd(zero, base_ptr, index, mask, 8);
2385 for (
unsigned int i = 0; i < 4; ++i)
2386 *(
reinterpret_cast<double *
>(&
data) + i) = base_ptr[offsets[i]];
2404 scatter(
const unsigned int *offsets,
double *base_ptr)
const
2407 for (
unsigned int i = 0; i < 4; ++i)
2408 base_ptr[offsets[i]] = *(
reinterpret_cast<const double *
>(&
data) + i);
2443 __m256d
mask = _mm256_set1_pd(-0.);
2445 res.
data = _mm256_andnot_pd(mask,
data);
2476 template <
typename Number2, std::
size_t w
idth2>
2479 template <
typename Number2, std::
size_t w
idth2>
2482 template <
typename Number2, std::
size_t w
idth2>
2486 template <
typename Number2, std::
size_t w
idth2>
2501 const unsigned int * offsets,
2504 const unsigned int n_chunks = n_entries / 4;
2505 const double * in0 = in + offsets[0];
2506 const double * in1 = in + offsets[1];
2507 const double * in2 = in + offsets[2];
2508 const double * in3 = in + offsets[3];
2510 for (
unsigned int i = 0; i < n_chunks; ++i)
2512 __m256d u0 = _mm256_loadu_pd(in0 + 4 * i);
2513 __m256d u1 = _mm256_loadu_pd(in1 + 4 * i);
2514 __m256d u2 = _mm256_loadu_pd(in2 + 4 * i);
2515 __m256d u3 = _mm256_loadu_pd(in3 + 4 * i);
2516 __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2517 __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2518 __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2519 __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2520 out[4 * i + 0].
data = _mm256_unpacklo_pd(t0, t1);
2521 out[4 * i + 1].
data = _mm256_unpackhi_pd(t0, t1);
2522 out[4 * i + 2].
data = _mm256_unpacklo_pd(t2, t3);
2523 out[4 * i + 3].
data = _mm256_unpackhi_pd(t2, t3);
2527 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2528 out[i].
gather(in + i, offsets);
2539 const std::array<double *, 4> &in,
2544 const unsigned int n_chunks = n_entries / 4;
2545 const double * in0 = in[0];
2546 const double * in1 = in[1];
2547 const double * in2 = in[2];
2548 const double * in3 = in[3];
2550 for (
unsigned int i = 0; i < n_chunks; ++i)
2552 __m256d u0 = _mm256_loadu_pd(in0 + 4 * i);
2553 __m256d u1 = _mm256_loadu_pd(in1 + 4 * i);
2554 __m256d u2 = _mm256_loadu_pd(in2 + 4 * i);
2555 __m256d u3 = _mm256_loadu_pd(in3 + 4 * i);
2556 __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2557 __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2558 __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2559 __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2560 out[4 * i + 0].
data = _mm256_unpacklo_pd(t0, t1);
2561 out[4 * i + 1].
data = _mm256_unpackhi_pd(t0, t1);
2562 out[4 * i + 2].
data = _mm256_unpacklo_pd(t2, t3);
2563 out[4 * i + 3].
data = _mm256_unpackhi_pd(t2, t3);
2566 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2578 const unsigned int n_entries,
2580 const unsigned int * offsets,
2583 const unsigned int n_chunks = n_entries / 4;
2584 double * out0 = out + offsets[0];
2585 double * out1 = out + offsets[1];
2586 double * out2 = out + offsets[2];
2587 double * out3 = out + offsets[3];
2588 for (
unsigned int i = 0; i < n_chunks; ++i)
2590 __m256d u0 = in[4 * i + 0].
data;
2591 __m256d u1 = in[4 * i + 1].
data;
2592 __m256d u2 = in[4 * i + 2].
data;
2593 __m256d u3 = in[4 * i + 3].
data;
2594 __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2595 __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2596 __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2597 __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2598 __m256d res0 = _mm256_unpacklo_pd(t0, t1);
2599 __m256d res1 = _mm256_unpackhi_pd(t0, t1);
2600 __m256d res2 = _mm256_unpacklo_pd(t2, t3);
2601 __m256d res3 = _mm256_unpackhi_pd(t2, t3);
2608 res0 = _mm256_add_pd(_mm256_loadu_pd(out0 + 4 * i), res0);
2609 _mm256_storeu_pd(out0 + 4 * i, res0);
2610 res1 = _mm256_add_pd(_mm256_loadu_pd(out1 + 4 * i), res1);
2611 _mm256_storeu_pd(out1 + 4 * i, res1);
2612 res2 = _mm256_add_pd(_mm256_loadu_pd(out2 + 4 * i), res2);
2613 _mm256_storeu_pd(out2 + 4 * i, res2);
2614 res3 = _mm256_add_pd(_mm256_loadu_pd(out3 + 4 * i), res3);
2615 _mm256_storeu_pd(out3 + 4 * i, res3);
2619 _mm256_storeu_pd(out0 + 4 * i, res0);
2620 _mm256_storeu_pd(out1 + 4 * i, res1);
2621 _mm256_storeu_pd(out2 + 4 * i, res2);
2622 _mm256_storeu_pd(out3 + 4 * i, res3);
2628 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2629 for (
unsigned int v = 0; v < 4; ++v)
2630 out[offsets[v] + i] += in[i][v];
2632 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2633 for (
unsigned int v = 0; v < 4; ++v)
2634 out[offsets[v] + i] = in[i][v];
2645 const unsigned int n_entries,
2647 std::array<double *, 4> & out)
2651 const unsigned int n_chunks = n_entries / 4;
2652 double * out0 = out[0];
2653 double * out1 = out[1];
2654 double * out2 = out[2];
2655 double * out3 = out[3];
2656 for (
unsigned int i = 0; i < n_chunks; ++i)
2658 __m256d u0 = in[4 * i + 0].
data;
2659 __m256d u1 = in[4 * i + 1].
data;
2660 __m256d u2 = in[4 * i + 2].
data;
2661 __m256d u3 = in[4 * i + 3].
data;
2662 __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2663 __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2664 __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2665 __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2666 __m256d res0 = _mm256_unpacklo_pd(t0, t1);
2667 __m256d res1 = _mm256_unpackhi_pd(t0, t1);
2668 __m256d res2 = _mm256_unpacklo_pd(t2, t3);
2669 __m256d res3 = _mm256_unpackhi_pd(t2, t3);
2676 res0 = _mm256_add_pd(_mm256_loadu_pd(out0 + 4 * i), res0);
2677 _mm256_storeu_pd(out0 + 4 * i, res0);
2678 res1 = _mm256_add_pd(_mm256_loadu_pd(out1 + 4 * i), res1);
2679 _mm256_storeu_pd(out1 + 4 * i, res1);
2680 res2 = _mm256_add_pd(_mm256_loadu_pd(out2 + 4 * i), res2);
2681 _mm256_storeu_pd(out2 + 4 * i, res2);
2682 res3 = _mm256_add_pd(_mm256_loadu_pd(out3 + 4 * i), res3);
2683 _mm256_storeu_pd(out3 + 4 * i, res3);
2687 _mm256_storeu_pd(out0 + 4 * i, res0);
2688 _mm256_storeu_pd(out1 + 4 * i, res1);
2689 _mm256_storeu_pd(out2 + 4 * i, res2);
2690 _mm256_storeu_pd(out3 + 4 * i, res3);
2696 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2697 for (
unsigned int v = 0; v < 4; ++v)
2698 out[v][i] += in[i][v];
2700 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2701 for (
unsigned int v = 0; v < 4; ++v)
2702 out[v][i] = in[i][v];
2737 template <
typename U>
2749 data = _mm256_set1_ps(x);
2761 return *(
reinterpret_cast<float *
>(&
data) + comp);
2772 return *(
reinterpret_cast<const float *
>(&
data) + comp);
2787# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2802# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2816# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2831# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2846 load(
const float *ptr)
2848 data = _mm256_loadu_ps(ptr);
2859 store(
float *ptr)
const
2861 _mm256_storeu_ps(ptr,
data);
2872 Assert(
reinterpret_cast<std::size_t
>(ptr) % 32 == 0,
2874 _mm256_stream_ps(ptr,
data);
2891 gather(
const float *base_ptr,
const unsigned int *offsets)
2897 const __m256 index_val =
2898 _mm256_loadu_ps(
reinterpret_cast<const float *
>(offsets));
2899 const __m256i
index = *
reinterpret_cast<const __m256i *
>(&index_val);
2904 __m256 zero = _mm256_setzero_ps();
2905 __m256
mask = _mm256_cmp_ps(zero, zero, _CMP_EQ_OQ);
2907 data = _mm256_mask_i32gather_ps(zero, base_ptr, index, mask, 4);
2909 for (
unsigned int i = 0; i < 8; ++i)
2910 *(
reinterpret_cast<float *
>(&
data) + i) = base_ptr[offsets[i]];
2928 scatter(
const unsigned int *offsets,
float *base_ptr)
const
2931 for (
unsigned int i = 0; i < 8; ++i)
2932 base_ptr[offsets[i]] = *(
reinterpret_cast<const float *
>(&
data) + i);
2967 __m256
mask = _mm256_set1_ps(-0.f);
2969 res.
data = _mm256_andnot_ps(mask,
data);
3000 template <
typename Number2, std::
size_t w
idth2>
3003 template <
typename Number2, std::
size_t w
idth2>
3006 template <
typename Number2, std::
size_t w
idth2>
3010 template <
typename Number2, std::
size_t w
idth2>
3025 const unsigned int * offsets,
3028 const unsigned int n_chunks = n_entries / 4;
3029 for (
unsigned int i = 0; i < n_chunks; ++i)
3033 __m256 t0, t1, t2, t3 = {};
3034 t0 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[0]), 0);
3035 t0 = _mm256_insertf128_ps(t0, _mm_loadu_ps(in + 4 * i + offsets[4]), 1);
3036 t1 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[1]), 0);
3037 t1 = _mm256_insertf128_ps(t1, _mm_loadu_ps(in + 4 * i + offsets[5]), 1);
3038 t2 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[2]), 0);
3039 t2 = _mm256_insertf128_ps(t2, _mm_loadu_ps(in + 4 * i + offsets[6]), 1);
3040 t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[3]), 0);
3041 t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[7]), 1);
3043 __m256
v0 = _mm256_shuffle_ps(t0, t1, 0x44);
3044 __m256
v1 = _mm256_shuffle_ps(t0, t1, 0xee);
3045 __m256 v2 = _mm256_shuffle_ps(t2, t3, 0x44);
3046 __m256 v3 = _mm256_shuffle_ps(t2, t3, 0xee);
3047 out[4 * i + 0].
data = _mm256_shuffle_ps(
v0, v2, 0x88);
3048 out[4 * i + 1].
data = _mm256_shuffle_ps(
v0, v2, 0xdd);
3049 out[4 * i + 2].
data = _mm256_shuffle_ps(
v1, v3, 0x88);
3050 out[4 * i + 3].
data = _mm256_shuffle_ps(
v1, v3, 0xdd);
3054 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3055 out[i].
gather(in + i, offsets);
3066 const std::array<float *, 8> &in,
3071 const unsigned int n_chunks = n_entries / 4;
3072 for (
unsigned int i = 0; i < n_chunks; ++i)
3074 __m256 t0, t1, t2, t3 = {};
3075 t0 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[0] + 4 * i), 0);
3076 t0 = _mm256_insertf128_ps(t0, _mm_loadu_ps(in[4] + 4 * i), 1);
3077 t1 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[1] + 4 * i), 0);
3078 t1 = _mm256_insertf128_ps(t1, _mm_loadu_ps(in[5] + 4 * i), 1);
3079 t2 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[2] + 4 * i), 0);
3080 t2 = _mm256_insertf128_ps(t2, _mm_loadu_ps(in[6] + 4 * i), 1);
3081 t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[3] + 4 * i), 0);
3082 t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[7] + 4 * i), 1);
3084 __m256
v0 = _mm256_shuffle_ps(t0, t1, 0x44);
3085 __m256
v1 = _mm256_shuffle_ps(t0, t1, 0xee);
3086 __m256 v2 = _mm256_shuffle_ps(t2, t3, 0x44);
3087 __m256 v3 = _mm256_shuffle_ps(t2, t3, 0xee);
3088 out[4 * i + 0].
data = _mm256_shuffle_ps(
v0, v2, 0x88);
3089 out[4 * i + 1].
data = _mm256_shuffle_ps(
v0, v2, 0xdd);
3090 out[4 * i + 2].
data = _mm256_shuffle_ps(
v1, v3, 0x88);
3091 out[4 * i + 3].
data = _mm256_shuffle_ps(
v1, v3, 0xdd);
3094 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3106 const unsigned int n_entries,
3108 const unsigned int * offsets,
3111 const unsigned int n_chunks = n_entries / 4;
3112 for (
unsigned int i = 0; i < n_chunks; ++i)
3114 __m256 u0 = in[4 * i + 0].
data;
3115 __m256 u1 = in[4 * i + 1].
data;
3116 __m256 u2 = in[4 * i + 2].
data;
3117 __m256 u3 = in[4 * i + 3].
data;
3118 __m256 t0 = _mm256_shuffle_ps(u0, u1, 0x44);
3119 __m256 t1 = _mm256_shuffle_ps(u0, u1, 0xee);
3120 __m256 t2 = _mm256_shuffle_ps(u2, u3, 0x44);
3121 __m256 t3 = _mm256_shuffle_ps(u2, u3, 0xee);
3122 u0 = _mm256_shuffle_ps(t0, t2, 0x88);
3123 u1 = _mm256_shuffle_ps(t0, t2, 0xdd);
3124 u2 = _mm256_shuffle_ps(t1, t3, 0x88);
3125 u3 = _mm256_shuffle_ps(t1, t3, 0xdd);
3126 __m128 res0 = _mm256_extractf128_ps(u0, 0);
3127 __m128 res4 = _mm256_extractf128_ps(u0, 1);
3128 __m128 res1 = _mm256_extractf128_ps(u1, 0);
3129 __m128 res5 = _mm256_extractf128_ps(u1, 1);
3130 __m128 res2 = _mm256_extractf128_ps(u2, 0);
3131 __m128 res6 = _mm256_extractf128_ps(u2, 1);
3132 __m128 res3 = _mm256_extractf128_ps(u3, 0);
3133 __m128 res7 = _mm256_extractf128_ps(u3, 1);
3140 res0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), res0);
3141 _mm_storeu_ps(out + 4 * i + offsets[0], res0);
3142 res1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), res1);
3143 _mm_storeu_ps(out + 4 * i + offsets[1], res1);
3144 res2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), res2);
3145 _mm_storeu_ps(out + 4 * i + offsets[2], res2);
3146 res3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), res3);
3147 _mm_storeu_ps(out + 4 * i + offsets[3], res3);
3148 res4 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[4]), res4);
3149 _mm_storeu_ps(out + 4 * i + offsets[4], res4);
3150 res5 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[5]), res5);
3151 _mm_storeu_ps(out + 4 * i + offsets[5], res5);
3152 res6 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[6]), res6);
3153 _mm_storeu_ps(out + 4 * i + offsets[6], res6);
3154 res7 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[7]), res7);
3155 _mm_storeu_ps(out + 4 * i + offsets[7], res7);
3159 _mm_storeu_ps(out + 4 * i + offsets[0], res0);
3160 _mm_storeu_ps(out + 4 * i + offsets[1], res1);
3161 _mm_storeu_ps(out + 4 * i + offsets[2], res2);
3162 _mm_storeu_ps(out + 4 * i + offsets[3], res3);
3163 _mm_storeu_ps(out + 4 * i + offsets[4], res4);
3164 _mm_storeu_ps(out + 4 * i + offsets[5], res5);
3165 _mm_storeu_ps(out + 4 * i + offsets[6], res6);
3166 _mm_storeu_ps(out + 4 * i + offsets[7], res7);
3172 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3173 for (
unsigned int v = 0; v < 8; ++v)
3174 out[offsets[v] + i] += in[i][v];
3176 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3177 for (
unsigned int v = 0; v < 8; ++v)
3178 out[offsets[v] + i] = in[i][v];
3189 const unsigned int n_entries,
3191 std::array<float *, 8> & out)
3195 const unsigned int n_chunks = n_entries / 4;
3196 for (
unsigned int i = 0; i < n_chunks; ++i)
3198 __m256 u0 = in[4 * i + 0].
data;
3199 __m256 u1 = in[4 * i + 1].
data;
3200 __m256 u2 = in[4 * i + 2].
data;
3201 __m256 u3 = in[4 * i + 3].
data;
3202 __m256 t0 = _mm256_shuffle_ps(u0, u1, 0x44);
3203 __m256 t1 = _mm256_shuffle_ps(u0, u1, 0xee);
3204 __m256 t2 = _mm256_shuffle_ps(u2, u3, 0x44);
3205 __m256 t3 = _mm256_shuffle_ps(u2, u3, 0xee);
3206 u0 = _mm256_shuffle_ps(t0, t2, 0x88);
3207 u1 = _mm256_shuffle_ps(t0, t2, 0xdd);
3208 u2 = _mm256_shuffle_ps(t1, t3, 0x88);
3209 u3 = _mm256_shuffle_ps(t1, t3, 0xdd);
3210 __m128 res0 = _mm256_extractf128_ps(u0, 0);
3211 __m128 res4 = _mm256_extractf128_ps(u0, 1);
3212 __m128 res1 = _mm256_extractf128_ps(u1, 0);
3213 __m128 res5 = _mm256_extractf128_ps(u1, 1);
3214 __m128 res2 = _mm256_extractf128_ps(u2, 0);
3215 __m128 res6 = _mm256_extractf128_ps(u2, 1);
3216 __m128 res3 = _mm256_extractf128_ps(u3, 0);
3217 __m128 res7 = _mm256_extractf128_ps(u3, 1);
3221 res0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), res0);
3222 _mm_storeu_ps(out[0] + 4 * i, res0);
3223 res1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), res1);
3224 _mm_storeu_ps(out[1] + 4 * i, res1);
3225 res2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), res2);
3226 _mm_storeu_ps(out[2] + 4 * i, res2);
3227 res3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), res3);
3228 _mm_storeu_ps(out[3] + 4 * i, res3);
3229 res4 = _mm_add_ps(_mm_loadu_ps(out[4] + 4 * i), res4);
3230 _mm_storeu_ps(out[4] + 4 * i, res4);
3231 res5 = _mm_add_ps(_mm_loadu_ps(out[5] + 4 * i), res5);
3232 _mm_storeu_ps(out[5] + 4 * i, res5);
3233 res6 = _mm_add_ps(_mm_loadu_ps(out[6] + 4 * i), res6);
3234 _mm_storeu_ps(out[6] + 4 * i, res6);
3235 res7 = _mm_add_ps(_mm_loadu_ps(out[7] + 4 * i), res7);
3236 _mm_storeu_ps(out[7] + 4 * i, res7);
3240 _mm_storeu_ps(out[0] + 4 * i, res0);
3241 _mm_storeu_ps(out[1] + 4 * i, res1);
3242 _mm_storeu_ps(out[2] + 4 * i, res2);
3243 _mm_storeu_ps(out[3] + 4 * i, res3);
3244 _mm_storeu_ps(out[4] + 4 * i, res4);
3245 _mm_storeu_ps(out[5] + 4 * i, res5);
3246 _mm_storeu_ps(out[6] + 4 * i, res6);
3247 _mm_storeu_ps(out[7] + 4 * i, res7);
3252 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3253 for (
unsigned int v = 0; v < 8; ++v)
3254 out[v][i] += in[i][v];
3256 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3257 for (
unsigned int v = 0; v < 8; ++v)
3258 out[v][i] = in[i][v];
3263# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__SSE2__)
3295 template <
typename U>
3307 data = _mm_set1_pd(x);
3319 return *(
reinterpret_cast<double *
>(&
data) + comp);
3330 return *(
reinterpret_cast<const double *
>(&
data) + comp);
3340# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3355# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3370# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3385# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3400 load(
const double *ptr)
3402 data = _mm_loadu_pd(ptr);
3413 store(
double *ptr)
const
3415 _mm_storeu_pd(ptr,
data);
3426 Assert(
reinterpret_cast<std::size_t
>(ptr) % 16 == 0,
3428 _mm_stream_pd(ptr,
data);
3445 gather(
const double *base_ptr,
const unsigned int *offsets)
3447 for (
unsigned int i = 0; i < 2; ++i)
3448 *(
reinterpret_cast<double *
>(&
data) + i) = base_ptr[offsets[i]];
3465 scatter(
const unsigned int *offsets,
double *base_ptr)
const
3467 for (
unsigned int i = 0; i < 2; ++i)
3468 base_ptr[offsets[i]] = *(
reinterpret_cast<const double *
>(&
data) + i);
3504 __m128d
mask = _mm_set1_pd(-0.);
3506 res.
data = _mm_andnot_pd(mask,
data);
3537 template <
typename Number2, std::
size_t w
idth2>
3540 template <
typename Number2, std::
size_t w
idth2>
3543 template <
typename Number2, std::
size_t w
idth2>
3547 template <
typename Number2, std::
size_t w
idth2>
3562 const unsigned int * offsets,
3565 const unsigned int n_chunks = n_entries / 2;
3566 for (
unsigned int i = 0; i < n_chunks; ++i)
3568 __m128d u0 = _mm_loadu_pd(in + 2 * i + offsets[0]);
3569 __m128d u1 = _mm_loadu_pd(in + 2 * i + offsets[1]);
3570 out[2 * i + 0].
data = _mm_unpacklo_pd(u0, u1);
3571 out[2 * i + 1].
data = _mm_unpackhi_pd(u0, u1);
3575 for (
unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3576 for (
unsigned int v = 0; v < 2; ++v)
3577 out[i][v] = in[offsets[v] + i];
3588 const std::array<double *, 2> &in,
3593 const unsigned int n_chunks = n_entries / 2;
3594 for (
unsigned int i = 0; i < n_chunks; ++i)
3596 __m128d u0 = _mm_loadu_pd(in[0] + 2 * i);
3597 __m128d u1 = _mm_loadu_pd(in[1] + 2 * i);
3598 out[2 * i + 0].
data = _mm_unpacklo_pd(u0, u1);
3599 out[2 * i + 1].
data = _mm_unpackhi_pd(u0, u1);
3602 for (
unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3603 for (
unsigned int v = 0; v < 2; ++v)
3604 out[i][v] = in[v][i];
3615 const unsigned int n_entries,
3617 const unsigned int * offsets,
3620 const unsigned int n_chunks = n_entries / 2;
3623 for (
unsigned int i = 0; i < n_chunks; ++i)
3625 __m128d u0 = in[2 * i + 0].
data;
3626 __m128d u1 = in[2 * i + 1].
data;
3627 __m128d res0 = _mm_unpacklo_pd(u0, u1);
3628 __m128d res1 = _mm_unpackhi_pd(u0, u1);
3629 _mm_storeu_pd(out + 2 * i + offsets[0],
3630 _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[0]),
3632 _mm_storeu_pd(out + 2 * i + offsets[1],
3633 _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[1]),
3637 for (
unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3638 for (
unsigned int v = 0; v < 2; ++v)
3639 out[offsets[v] + i] += in[i][v];
3643 for (
unsigned int i = 0; i < n_chunks; ++i)
3645 __m128d u0 = in[2 * i + 0].
data;
3646 __m128d u1 = in[2 * i + 1].
data;
3647 __m128d res0 = _mm_unpacklo_pd(u0, u1);
3648 __m128d res1 = _mm_unpackhi_pd(u0, u1);
3649 _mm_storeu_pd(out + 2 * i + offsets[0], res0);
3650 _mm_storeu_pd(out + 2 * i + offsets[1], res1);
3653 for (
unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3654 for (
unsigned int v = 0; v < 2; ++v)
3655 out[offsets[v] + i] = in[i][v];
3667 const unsigned int n_entries,
3669 std::array<double *, 2> & out)
3673 const unsigned int n_chunks = n_entries / 2;
3676 for (
unsigned int i = 0; i < n_chunks; ++i)
3678 __m128d u0 = in[2 * i + 0].
data;
3679 __m128d u1 = in[2 * i + 1].
data;
3680 __m128d res0 = _mm_unpacklo_pd(u0, u1);
3681 __m128d res1 = _mm_unpackhi_pd(u0, u1);
3682 _mm_storeu_pd(out[0] + 2 * i,
3683 _mm_add_pd(_mm_loadu_pd(out[0] + 2 * i), res0));
3684 _mm_storeu_pd(out[1] + 2 * i,
3685 _mm_add_pd(_mm_loadu_pd(out[1] + 2 * i), res1));
3688 for (
unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3689 for (
unsigned int v = 0; v < 2; ++v)
3690 out[v][i] += in[i][v];
3694 for (
unsigned int i = 0; i < n_chunks; ++i)
3696 __m128d u0 = in[2 * i + 0].
data;
3697 __m128d u1 = in[2 * i + 1].
data;
3698 __m128d res0 = _mm_unpacklo_pd(u0, u1);
3699 __m128d res1 = _mm_unpackhi_pd(u0, u1);
3700 _mm_storeu_pd(out[0] + 2 * i, res0);
3701 _mm_storeu_pd(out[1] + 2 * i, res1);
3704 for (
unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3705 for (
unsigned int v = 0; v < 2; ++v)
3706 out[v][i] = in[i][v];
3746 template <
typename U>
3755 data = _mm_set1_ps(x);
3767 return *(
reinterpret_cast<float *
>(&
data) + comp);
3778 return *(
reinterpret_cast<const float *
>(&
data) + comp);
3788# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3803# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3818# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3833# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3848 load(
const float *ptr)
3850 data = _mm_loadu_ps(ptr);
3861 store(
float *ptr)
const
3863 _mm_storeu_ps(ptr,
data);
3874 Assert(
reinterpret_cast<std::size_t
>(ptr) % 16 == 0,
3876 _mm_stream_ps(ptr,
data);
3893 gather(
const float *base_ptr,
const unsigned int *offsets)
3895 for (
unsigned int i = 0; i < 4; ++i)
3896 *(
reinterpret_cast<float *
>(&
data) + i) = base_ptr[offsets[i]];
3913 scatter(
const unsigned int *offsets,
float *base_ptr)
const
3915 for (
unsigned int i = 0; i < 4; ++i)
3916 base_ptr[offsets[i]] = *(
reinterpret_cast<const float *
>(&
data) + i);
3951 __m128
mask = _mm_set1_ps(-0.f);
3953 res.
data = _mm_andnot_ps(mask,
data);
3984 template <
typename Number2, std::
size_t w
idth2>
3987 template <
typename Number2, std::
size_t w
idth2>
3990 template <
typename Number2, std::
size_t w
idth2>
3994 template <
typename Number2, std::
size_t w
idth2>
4009 const unsigned int * offsets,
4012 const unsigned int n_chunks = n_entries / 4;
4013 for (
unsigned int i = 0; i < n_chunks; ++i)
4015 __m128 u0 = _mm_loadu_ps(in + 4 * i + offsets[0]);
4016 __m128 u1 = _mm_loadu_ps(in + 4 * i + offsets[1]);
4017 __m128 u2 = _mm_loadu_ps(in + 4 * i + offsets[2]);
4018 __m128 u3 = _mm_loadu_ps(in + 4 * i + offsets[3]);
4019 __m128
v0 = _mm_shuffle_ps(u0, u1, 0x44);
4020 __m128
v1 = _mm_shuffle_ps(u0, u1, 0xee);
4021 __m128 v2 = _mm_shuffle_ps(u2, u3, 0x44);
4022 __m128 v3 = _mm_shuffle_ps(u2, u3, 0xee);
4023 out[4 * i + 0].
data = _mm_shuffle_ps(
v0, v2, 0x88);
4024 out[4 * i + 1].
data = _mm_shuffle_ps(
v0, v2, 0xdd);
4025 out[4 * i + 2].
data = _mm_shuffle_ps(
v1, v3, 0x88);
4026 out[4 * i + 3].
data = _mm_shuffle_ps(
v1, v3, 0xdd);
4030 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4031 for (
unsigned int v = 0; v < 4; ++v)
4032 out[i][v] = in[offsets[v] + i];
4043 const std::array<float *, 4> &in,
4048 const unsigned int n_chunks = n_entries / 4;
4049 for (
unsigned int i = 0; i < n_chunks; ++i)
4051 __m128 u0 = _mm_loadu_ps(in[0] + 4 * i);
4052 __m128 u1 = _mm_loadu_ps(in[1] + 4 * i);
4053 __m128 u2 = _mm_loadu_ps(in[2] + 4 * i);
4054 __m128 u3 = _mm_loadu_ps(in[3] + 4 * i);
4055 __m128
v0 = _mm_shuffle_ps(u0, u1, 0x44);
4056 __m128
v1 = _mm_shuffle_ps(u0, u1, 0xee);
4057 __m128 v2 = _mm_shuffle_ps(u2, u3, 0x44);
4058 __m128 v3 = _mm_shuffle_ps(u2, u3, 0xee);
4059 out[4 * i + 0].
data = _mm_shuffle_ps(
v0, v2, 0x88);
4060 out[4 * i + 1].
data = _mm_shuffle_ps(
v0, v2, 0xdd);
4061 out[4 * i + 2].
data = _mm_shuffle_ps(
v1, v3, 0x88);
4062 out[4 * i + 3].
data = _mm_shuffle_ps(
v1, v3, 0xdd);
4065 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4066 for (
unsigned int v = 0; v < 4; ++v)
4067 out[i][v] = in[v][i];
4078 const unsigned int n_entries,
4080 const unsigned int * offsets,
4083 const unsigned int n_chunks = n_entries / 4;
4084 for (
unsigned int i = 0; i < n_chunks; ++i)
4086 __m128 u0 = in[4 * i + 0].
data;
4087 __m128 u1 = in[4 * i + 1].
data;
4088 __m128 u2 = in[4 * i + 2].
data;
4089 __m128 u3 = in[4 * i + 3].
data;
4090 __m128 t0 = _mm_shuffle_ps(u0, u1, 0x44);
4091 __m128 t1 = _mm_shuffle_ps(u0, u1, 0xee);
4092 __m128 t2 = _mm_shuffle_ps(u2, u3, 0x44);
4093 __m128 t3 = _mm_shuffle_ps(u2, u3, 0xee);
4094 u0 = _mm_shuffle_ps(t0, t2, 0x88);
4095 u1 = _mm_shuffle_ps(t0, t2, 0xdd);
4096 u2 = _mm_shuffle_ps(t1, t3, 0x88);
4097 u3 = _mm_shuffle_ps(t1, t3, 0xdd);
4104 u0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), u0);
4105 _mm_storeu_ps(out + 4 * i + offsets[0], u0);
4106 u1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), u1);
4107 _mm_storeu_ps(out + 4 * i + offsets[1], u1);
4108 u2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), u2);
4109 _mm_storeu_ps(out + 4 * i + offsets[2], u2);
4110 u3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), u3);
4111 _mm_storeu_ps(out + 4 * i + offsets[3], u3);
4115 _mm_storeu_ps(out + 4 * i + offsets[0], u0);
4116 _mm_storeu_ps(out + 4 * i + offsets[1], u1);
4117 _mm_storeu_ps(out + 4 * i + offsets[2], u2);
4118 _mm_storeu_ps(out + 4 * i + offsets[3], u3);
4124 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4125 for (
unsigned int v = 0; v < 4; ++v)
4126 out[offsets[v] + i] += in[i][v];
4128 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4129 for (
unsigned int v = 0; v < 4; ++v)
4130 out[offsets[v] + i] = in[i][v];
4141 const unsigned int n_entries,
4143 std::array<float *, 4> & out)
4147 const unsigned int n_chunks = n_entries / 4;
4148 for (
unsigned int i = 0; i < n_chunks; ++i)
4150 __m128 u0 = in[4 * i + 0].
data;
4151 __m128 u1 = in[4 * i + 1].
data;
4152 __m128 u2 = in[4 * i + 2].
data;
4153 __m128 u3 = in[4 * i + 3].
data;
4154 __m128 t0 = _mm_shuffle_ps(u0, u1, 0x44);
4155 __m128 t1 = _mm_shuffle_ps(u0, u1, 0xee);
4156 __m128 t2 = _mm_shuffle_ps(u2, u3, 0x44);
4157 __m128 t3 = _mm_shuffle_ps(u2, u3, 0xee);
4158 u0 = _mm_shuffle_ps(t0, t2, 0x88);
4159 u1 = _mm_shuffle_ps(t0, t2, 0xdd);
4160 u2 = _mm_shuffle_ps(t1, t3, 0x88);
4161 u3 = _mm_shuffle_ps(t1, t3, 0xdd);
4165 u0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), u0);
4166 _mm_storeu_ps(out[0] + 4 * i, u0);
4167 u1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), u1);
4168 _mm_storeu_ps(out[1] + 4 * i, u1);
4169 u2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), u2);
4170 _mm_storeu_ps(out[2] + 4 * i, u2);
4171 u3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), u3);
4172 _mm_storeu_ps(out[3] + 4 * i, u3);
4176 _mm_storeu_ps(out[0] + 4 * i, u0);
4177 _mm_storeu_ps(out[1] + 4 * i, u1);
4178 _mm_storeu_ps(out[2] + 4 * i, u2);
4179 _mm_storeu_ps(out[3] + 4 * i, u3);
4184 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4185 for (
unsigned int v = 0; v < 4; ++v)
4186 out[v][i] += in[i][v];
4188 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4189 for (
unsigned int v = 0; v < 4; ++v)
4190 out[v][i] = in[i][v];
4197# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__ALTIVEC__) && \
4227 template <
typename U>
4239 data = vec_splats(x);
4256 return *(
reinterpret_cast<double *
>(&
data) + comp);
4267 return *(
reinterpret_cast<const double *
>(&
data) + comp);
4320 load(
const double *ptr)
4322 data = vec_vsx_ld(0, ptr);
4331 store(
double *ptr)
const
4333 vec_vsx_st(
data, 0, ptr);
4351 gather(
const double *base_ptr,
const unsigned int *offsets)
4353 for (
unsigned int i = 0; i < 2; ++i)
4354 *(
reinterpret_cast<double *
>(&
data) + i) = base_ptr[offsets[i]];
4362 scatter(
const unsigned int *offsets,
double *base_ptr)
const
4364 for (
unsigned int i = 0; i < 2; ++i)
4365 base_ptr[offsets[i]] = *(
reinterpret_cast<const double *
>(&
data) + i);
4373 __vector
double data;
4429 template <
typename Number2, std::
size_t w
idth2>
4432 template <
typename Number2, std::
size_t w
idth2>
4435 template <
typename Number2, std::
size_t w
idth2>
4439 template <
typename Number2, std::
size_t w
idth2>
4474 template <
typename U>
4486 data = vec_splats(x);
4503 return *(
reinterpret_cast<float *
>(&
data) + comp);
4514 return *(
reinterpret_cast<const float *
>(&
data) + comp);
4567 load(
const float *ptr)
4569 data = vec_vsx_ld(0, ptr);
4578 store(
float *ptr)
const
4580 vec_vsx_st(
data, 0, ptr);
4598 gather(
const float *base_ptr,
const unsigned int *offsets)
4600 for (
unsigned int i = 0; i < 4; ++i)
4601 *(
reinterpret_cast<float *
>(&
data) + i) = base_ptr[offsets[i]];
4609 scatter(
const unsigned int *offsets,
float *base_ptr)
const
4611 for (
unsigned int i = 0; i < 4; ++i)
4612 base_ptr[offsets[i]] = *(
reinterpret_cast<const float *
>(&
data) + i);
4620 __vector
float data;
4676 template <
typename Number2, std::
size_t w
idth2>
4679 template <
typename Number2, std::
size_t w
idth2>
4682 template <
typename Number2, std::
size_t w
idth2>
4686 template <
typename Number2, std::
size_t w
idth2>
4708template <
typename Number, std::
size_t w
idth>
4713 for (
unsigned int i = 0; i < VectorizedArray<Number, width>::size(); ++i)
4714 if (lhs[i] != rhs[i])
4726template <
typename Number, std::
size_t w
idth>
4740template <
typename Number, std::
size_t w
idth>
4754template <
typename Number, std::
size_t w
idth>
4768template <
typename Number, std::
size_t w
idth>
4783template <
typename Number, std::
size_t w
idth>
4799template <std::
size_t w
idth>
4813template <
typename Number, std::
size_t w
idth>
4828template <std::
size_t w
idth>
4841template <
typename Number, std::
size_t w
idth>
4857template <std::
size_t w
idth>
4871template <
typename Number, std::
size_t w
idth>
4887template <std::
size_t w
idth>
4901template <
typename Number, std::
size_t w
idth>
4917template <std::
size_t w
idth>
4931template <
typename Number, std::
size_t w
idth>
4946template <std::
size_t w
idth>
4959template <
typename Number, std::
size_t w
idth>
4975template <std::
size_t w
idth>
4989template <
typename Number, std::
size_t w
idth>
5005template <std::
size_t w
idth>
5018template <
typename Number, std::
size_t w
idth>
5030template <
typename Number, std::
size_t w
idth>
5044template <
typename Number, std::
size_t w
idth>
5045inline std::ostream &
5049 for (
unsigned int i = 0; i < n - 1; ++i)
5073#if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
5154template <SIMDComparison predicate,
typename Number>
5157 const Number &right,
5158 const Number &true_value,
5159 const Number &false_value)
5165 mask = (left == right);
5168 mask = (left != right);
5171 mask = (left < right);
5174 mask = (left <= right);
5177 mask = (left > right);
5180 mask = (left >= right);
5184 return mask ? true_value : false_value;
5192template <SIMDComparison predicate,
typename Number>
5200 result.
data = compare_and_apply_mask<predicate, Number>(left.
data,
5210# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && defined(__AVX512F__)
5212template <SIMDComparison predicate>
5219 const __mmask16
mask =
5220 _mm512_cmp_ps_mask(left.
data, right.
data,
static_cast<int>(predicate));
5222 result.
data = _mm512_mask_mov_ps(false_values.
data, mask, true_values.
data);
5228template <SIMDComparison predicate>
5235 const __mmask16
mask =
5236 _mm512_cmp_pd_mask(left.
data, right.
data,
static_cast<int>(predicate));
5238 result.
data = _mm512_mask_mov_pd(false_values.
data, mask, true_values.
data);
5244# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
5246template <SIMDComparison predicate>
5254 _mm256_cmp_ps(left.
data, right.
data,
static_cast<int>(predicate));
5257 result.
data = _mm256_blendv_ps(false_values.
data, true_values.
data, mask);
5262template <SIMDComparison predicate>
5270 _mm256_cmp_pd(left.
data, right.
data,
static_cast<int>(predicate));
5273 result.
data = _mm256_blendv_pd(false_values.
data, true_values.
data, mask);
5279# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__SSE2__)
5281template <SIMDComparison predicate>
5312 result.
data = _mm_or_ps(_mm_and_ps(mask, true_values.
data),
5313 _mm_andnot_ps(mask, false_values.
data));
5319template <SIMDComparison predicate>
5350 result.
data = _mm_or_pd(_mm_and_pd(mask, true_values.
data),
5351 _mm_andnot_pd(mask, false_values.
data));
5362 template <
typename T>
5368 template <
typename T, std::
size_t w
idth>
5393 template <
typename Number, std::
size_t w
idth>
5394 inline ::VectorizedArray<Number, width>
5395 sin(const ::VectorizedArray<Number, width> &x)
5403 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5407 out.
load(&values[0]);
5420 template <
typename Number, std::
size_t w
idth>
5421 inline ::VectorizedArray<Number, width>
5422 cos(const ::VectorizedArray<Number, width> &x)
5425 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5429 out.
load(&values[0]);
5442 template <
typename Number, std::
size_t w
idth>
5443 inline ::VectorizedArray<Number, width>
5444 tan(const ::VectorizedArray<Number, width> &x)
5447 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5451 out.
load(&values[0]);
5464 template <
typename Number, std::
size_t w
idth>
5465 inline ::VectorizedArray<Number, width>
5466 exp(const ::VectorizedArray<Number, width> &x)
5469 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5473 out.
load(&values[0]);
5486 template <
typename Number, std::
size_t w
idth>
5487 inline ::VectorizedArray<Number, width>
5488 log(const ::VectorizedArray<Number, width> &x)
5491 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5495 out.
load(&values[0]);
5508 template <
typename Number, std::
size_t w
idth>
5509 inline ::VectorizedArray<Number, width>
5510 sqrt(const ::VectorizedArray<Number, width> &x)
5512 return x.get_sqrt();
5524 template <
typename Number, std::
size_t w
idth>
5525 inline ::VectorizedArray<Number, width>
5526 pow(const ::VectorizedArray<Number, width> &x,
const Number p)
5529 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5533 out.
load(&values[0]);
5547 template <
typename Number, std::
size_t w
idth>
5548 inline ::VectorizedArray<Number, width>
5549 pow(const ::VectorizedArray<Number, width> &x,
5550 const ::VectorizedArray<Number, width> &p)
5553 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5557 out.
load(&values[0]);
5570 template <
typename Number, std::
size_t w
idth>
5571 inline ::VectorizedArray<Number, width>
5572 abs(const ::VectorizedArray<Number, width> &x)
5586 template <
typename Number, std::
size_t w
idth>
5587 inline ::VectorizedArray<Number, width>
5588 max(const ::VectorizedArray<Number, width> &x,
5589 const ::VectorizedArray<Number, width> &y)
5591 return x.get_max(y);
5603 template <
typename Number, std::
size_t w
idth>
5604 inline ::VectorizedArray<Number, width>
5605 min(const ::VectorizedArray<Number, width> &x,
5606 const ::VectorizedArray<Number, width> &y)
5608 return x.get_min(y);
VectorizedArrayBase()=default
VectorizedArrayIterator< const T > begin() const
VectorizedArrayIterator< const T > end() const
static constexpr std::size_t size()
VectorizedArrayBase(const std::initializer_list< U > &list)
VectorizedArrayIterator< T > end()
VectorizedArrayIterator< T > begin()
VectorizedArrayIterator< T > & operator+=(const std::size_t offset)
VectorizedArrayIterator< T > & operator=(const VectorizedArrayIterator< T > &other)=default
VectorizedArrayIterator< T > & operator--()
VectorizedArrayIterator< T > & operator++()
std::ptrdiff_t operator-(const VectorizedArrayIterator< T > &other) const
bool operator==(const VectorizedArrayIterator< T > &other) const
VectorizedArrayIterator(T &data, const std::size_t lane)
const T::value_type & operator*() const
std::enable_if<!std::is_same< U, constU >::value, typenameT::value_type >::type & operator*()
bool operator!=(const VectorizedArrayIterator< T > &other) const
VectorizedArrayIterator< T > operator+(const std::size_t &offset) const
VectorizedArray< Number, width > operator-(const VectorizedArray< Number, width > &u)
VectorizedArray< float, width > operator+(const VectorizedArray< float, width > &v, const double u)
VectorizedArray & operator/=(const VectorizedArray &vec)
void gather(const Number *base_ptr, const unsigned int *offsets)
void vectorized_load_and_transpose(const unsigned int n_entries, const Number *in, const unsigned int *offsets, VectorizedArray< Number, width > *out)
VectorizedArray< Number, width > operator+(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArrayType make_vectorized_array(const typename VectorizedArrayType::value_type &u)
VectorizedArray< Number, width > operator/(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArray get_abs() const
VectorizedArray< float, width > operator/(const VectorizedArray< float, width > &v, const double u)
VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &x, const ::VectorizedArray< Number, width > &y)
VectorizedArray< Number, width > log(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > operator*(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArray< Number, width > exp(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > operator-(const VectorizedArray< Number, width > &v, const Number &u)
Number & operator[](const unsigned int comp)
VectorizedArray< float, width > operator-(const double u, const VectorizedArray< float, width > &v)
VectorizedArray< Number, width > operator+(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > operator+(const VectorizedArray< Number, width > &u)
VectorizedArray()=default
bool operator==(const VectorizedArray< Number, width > &lhs, const VectorizedArray< Number, width > &rhs)
VectorizedArray< Number, width > tan(const ::VectorizedArray< Number, width > &x)
VectorizedArray(const Number scalar)
VectorizedArray< Number, width > operator-(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray< float, width > operator*(const VectorizedArray< float, width > &v, const double u)
VectorizedArray & operator*=(const VectorizedArray &vec)
VectorizedArray get_max(const VectorizedArray &other) const
const Number & operator[](const unsigned int comp) const
VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &x, const ::VectorizedArray< Number, width > &y)
VectorizedArray get_min(const VectorizedArray &other) const
VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &x, const Number p)
VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &x, const ::VectorizedArray< Number, width > &p)
VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &x)
VectorizedArray< float, width > operator-(const VectorizedArray< float, width > &v, const double u)
void store(Number *ptr) const
void scatter(const unsigned int *offsets, Number *base_ptr) const
VectorizedArray< Number, width > operator-(const Number &u, const VectorizedArray< Number, width > &v)
void load(const Number *ptr)
VectorizedArray< Number, width > operator*(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray & operator-=(const VectorizedArray &vec)
VectorizedArray< float, width > operator+(const double u, const VectorizedArray< float, width > &v)
VectorizedArray< Number, width > operator*(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray get_sqrt() const
VectorizedArray< Number, width > operator/(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray & operator+=(const VectorizedArray &vec)
VectorizedArray & operator=(const Number scalar)
VectorizedArray< Number, width > make_vectorized_array(const Number &u)
VectorizedArray< Number, width > operator/(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > operator+(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > cos(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &x)
void streaming_store(Number *ptr) const
VectorizedArray(const std::initializer_list< U > &list)
void vectorized_transpose_and_store(const bool add_into, const unsigned int n_entries, const VectorizedArray< Number, width > *in, const unsigned int *offsets, Number *out)
VectorizedArray< float, width > operator/(const double u, const VectorizedArray< float, width > &v)
VectorizedArray< float, width > operator*(const double u, const VectorizedArray< float, width > &v)
#define DEAL_II_ALWAYS_INLINE
#define DEAL_II_NAMESPACE_OPEN
#define DEAL_II_NAMESPACE_CLOSE
__global__ void vec_add(Number *val, const Number a, const size_type N)
#define Assert(cond, exc)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcMessage(std::string arg1)
::VectorizedArray< Number, width > log(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > exp(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > tan(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > cos(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &, const Number p)
::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)
typename T::value_type value_type
std::ptrdiff_t difference_type
random_access_iterator_tag iterator_category
void gather(VectorizedArray< Number, width > &out, const std::array< Number *, width > &ptrs, const unsigned int offset)
void vectorized_load_and_transpose(const unsigned int n_entries, const Number *in, const unsigned int *offsets, VectorizedArray< Number, width > *out)
std::ostream & operator<<(std::ostream &out, const VectorizedArray< Number, width > &p)
Number compare_and_apply_mask(const Number &left, const Number &right, const Number &true_value, const Number &false_value)
void vectorized_transpose_and_store(const bool add_into, const unsigned int n_entries, const VectorizedArray< Number, width > *in, const unsigned int *offsets, Number *out)