16#ifndef dealii_vectorization_h
17#define dealii_vectorization_h
45#if DEAL_II_VECTORIZATION_WIDTH_IN_BITS > 0
54# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && !defined(__AVX__)
56 "Mismatch in vectorization capabilities: AVX was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native."
58# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && !defined(__AVX512F__)
60 "Mismatch in vectorization capabilities: AVX-512F was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native."
65# elif defined(__ALTIVEC__)
73# elif defined(__ARM_NEON)
75# elif defined(__x86_64__)
76# include <x86intrin.h>
88template <
typename Number, std::
size_t w
idth>
122 "You are trying to compare iterators into different arrays."));
134 "You are trying to compare iterators into different arrays."));
142 constexpr const typename T::value_type &
154 template <
typename U = T>
155 constexpr std::enable_if_t<!std::is_same_v<U, const U>,
156 typename T::value_type> &
199 "You can't decrement an iterator that is already at the beginning of the range."));
217 constexpr std::ptrdiff_t
220 return static_cast<std::ptrdiff_t
>(
lane) -
221 static_cast<std::ptrdiff_t
>(other.
lane);
250template <
typename VectorizedArrayType, std::
size_t w
idth>
266 template <
typename U>
269 const unsigned int n_initializers = list.size();
271 ExcMessage(
"The initializer list must have at most "
272 "as many elements as the vector length."));
275 std::copy_n(list.begin(), n_initializers, this->begin());
278 if (n_initializers <
size())
279 std::fill(this->
begin() + n_initializers, this->
end(), 0.0);
285 static constexpr std::size_t
298 static_cast<VectorizedArrayType &
>(*
this), 0);
309 static_cast<const VectorizedArrayType &
>(*
this), 0);
319 static_cast<VectorizedArrayType &
>(*
this), width);
330 static_cast<const VectorizedArrayType &
>(*
this), width);
347 VectorizedArrayType p =
static_cast<const VectorizedArrayType &
>(*this);
443template <
typename Number, std::
size_t w
idth>
474 static_assert(width == 1,
475 "You specified an illegal width that is not supported.");
483 template <
typename U>
487 static_assert(width == 1,
488 "You specified an illegal width that is not supported.");
586 template <
typename OtherNumber>
599 template <
typename OtherNumber>
673 gather(
const Number *base_ptr,
const unsigned int *offsets)
675 data = base_ptr[offsets[0]];
673 gather(
const Number *base_ptr,
const unsigned int *offsets) {
…}
692 scatter(
const unsigned int *offsets, Number *base_ptr)
const
694 base_ptr[offsets[0]] =
data;
692 scatter(
const unsigned int *offsets, Number *base_ptr)
const {
…}
769 template <
typename Number2, std::
size_t w
idth2>
772 template <
typename Number2, std::
size_t w
idth2>
775 template <
typename Number2, std::
size_t w
idth2>
779 template <
typename Number2, std::
size_t w
idth2>
798template <
typename Number,
816template <
typename VectorizedArrayType>
821 std::is_same_v<VectorizedArrayType,
823 VectorizedArrayType::size()>>,
824 "VectorizedArrayType is not a VectorizedArray.");
826 VectorizedArrayType result = u;
843template <
typename Number, std::
size_t w
idth>
846 const std::array<Number *, width> &ptrs,
847 const unsigned int offset)
849 for (
unsigned int v = 0; v < width; ++v)
850 out.
data[v] = ptrs[v][offset];
880template <
typename Number, std::
size_t w
idth>
884 const unsigned int *offsets,
887 for (
unsigned int i = 0; i < n_entries; ++i)
888 for (
unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
889 out[i][v] = in[offsets[v] + i];
904template <
typename Number, std::
size_t w
idth>
907 const std::array<Number *, width> &in,
910 for (
unsigned int i = 0; i < n_entries; ++i)
911 for (
unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
912 out[i][v] = in[v][i];
955template <
typename Number, std::
size_t w
idth>
958 const unsigned int n_entries,
960 const unsigned int *offsets,
964 for (
unsigned int i = 0; i < n_entries; ++i)
965 for (
unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
966 out[offsets[v] + i] += in[i][v];
968 for (
unsigned int i = 0; i < n_entries; ++i)
969 for (
unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
970 out[offsets[v] + i] = in[i][v];
985template <
typename Number, std::
size_t w
idth>
988 const unsigned int n_entries,
990 std::array<Number *, width> &out)
993 for (
unsigned int i = 0; i < n_entries; ++i)
994 for (
unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
995 out[v][i] += in[i][v];
997 for (
unsigned int i = 0; i < n_entries; ++i)
998 for (
unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
999 out[v][i] = in[i][v];
1007# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__ARM_NEON)
1045 template <
typename U>
1056 data = vdupq_n_f64(x);
1066 operator=(
const double scalar) && =
delete;
1074 return *(
reinterpret_cast<double *
>(&
data) + comp);
1083 return *(
reinterpret_cast<const double *
>(&
data) + comp);
1132 load(
const double *ptr)
1134 data = vld1q_f64(ptr);
1139 load(
const float *ptr)
1142 for (
unsigned int i = 0; i < 2; ++i)
1153 store(
double *ptr)
const
1155 vst1q_f64(ptr,
data);
1160 store(
float *ptr)
const
1163 for (
unsigned int i = 0; i < 2; ++i)
1175 Assert(
reinterpret_cast<std::size_t
>(ptr) % 16 == 0,
1177 vst1q_f64(ptr,
data);
1193 gather(
const double *base_ptr,
const unsigned int *offsets)
1195 for (
unsigned int i = 0; i < 2; ++i)
1196 *(
reinterpret_cast<double *
>(&
data) + i) = base_ptr[offsets[i]];
1212 scatter(
const unsigned int *offsets,
double *base_ptr)
const
1214 for (
unsigned int i = 0; i < 2; ++i)
1215 base_ptr[offsets[i]] = *(
reinterpret_cast<const double *
>(&
data) + i);
1225 return vaddvq_f64(
data);
1233 mutable float64x2_t
data;
1285 template <
typename Number2, std::
size_t w
idth2>
1288 template <
typename Number2, std::
size_t w
idth2>
1291 template <
typename Number2, std::
size_t w
idth2>
1295 template <
typename Number2, std::
size_t w
idth2>
1337 template <
typename U>
1348 data = vdupq_n_f32(x);
1358 operator=(
const float scalar) && =
delete;
1366 return *(
reinterpret_cast<float *
>(&
data) + comp);
1375 return *(
reinterpret_cast<const float *
>(&
data) + comp);
1424 load(
const float *ptr)
1426 data = vld1q_f32(ptr);
1436 store(
float *ptr)
const
1438 vst1q_f32(ptr,
data);
1449 Assert(
reinterpret_cast<std::size_t
>(ptr) % 16 == 0,
1451 vst1q_f32(ptr,
data);
1467 gather(
const float *base_ptr,
const unsigned int *offsets)
1469 for (
unsigned int i = 0; i < 4; ++i)
1470 *(
reinterpret_cast<float *
>(&
data) + i) = base_ptr[offsets[i]];
1486 scatter(
const unsigned int *offsets,
float *base_ptr)
const
1488 for (
unsigned int i = 0; i < 4; ++i)
1489 base_ptr[offsets[i]] = *(
reinterpret_cast<const float *
>(&
data) + i);
1499 return vaddvq_f32(
data);
1507 mutable float32x4_t
data;
1559 template <
typename Number2, std::
size_t w
idth2>
1562 template <
typename Number2, std::
size_t w
idth2>
1565 template <
typename Number2, std::
size_t w
idth2>
1569 template <
typename Number2, std::
size_t w
idth2>
1578# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__SSE2__)
1616 template <
typename U>
1628 data = _mm_set1_pd(x);
1638 operator=(
const double scalar) && =
delete;
1648 return *(
reinterpret_cast<double *
>(&
data) + comp);
1659 return *(
reinterpret_cast<const double *
>(&
data) + comp);
1669# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1684# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1699# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1714# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1729 load(
const double *ptr)
1731 data = _mm_loadu_pd(ptr);
1736 load(
const float *ptr)
1739 for (
unsigned int i = 0; i < 2; ++i)
1751 store(
double *ptr)
const
1753 _mm_storeu_pd(ptr,
data);
1758 store(
float *ptr)
const
1761 for (
unsigned int i = 0; i < 2; ++i)
1773 Assert(
reinterpret_cast<std::size_t
>(ptr) % 16 == 0,
1775 _mm_stream_pd(ptr,
data);
1792 gather(
const double *base_ptr,
const unsigned int *offsets)
1794 for (
unsigned int i = 0; i < 2; ++i)
1795 *(
reinterpret_cast<double *
>(&
data) + i) = base_ptr[offsets[i]];
1812 scatter(
const unsigned int *offsets,
double *base_ptr)
const
1814 for (
unsigned int i = 0; i < 2; ++i)
1815 base_ptr[offsets[i]] = *(
reinterpret_cast<const double *
>(&
data) + i);
1825 __m128d t1 = _mm_unpackhi_pd(
data,
data);
1826 __m128d t2 = _mm_add_pd(
data, t1);
1827 return _mm_cvtsd_f64(t2);
1863 __m128d
mask = _mm_set1_pd(-0.);
1865 res.
data = _mm_andnot_pd(mask,
data);
1896 template <
typename Number2, std::
size_t w
idth2>
1899 template <
typename Number2, std::
size_t w
idth2>
1902 template <
typename Number2, std::
size_t w
idth2>
1906 template <
typename Number2, std::
size_t w
idth2>
1921 const unsigned int *offsets,
1924 const unsigned int n_chunks = n_entries / 2;
1925 for (
unsigned int i = 0; i < n_chunks; ++i)
1927 __m128d u0 = _mm_loadu_pd(in + 2 * i + offsets[0]);
1928 __m128d u1 = _mm_loadu_pd(in + 2 * i + offsets[1]);
1929 out[2 * i + 0].
data = _mm_unpacklo_pd(u0, u1);
1930 out[2 * i + 1].
data = _mm_unpackhi_pd(u0, u1);
1934 for (
unsigned int i = 2 * n_chunks; i < n_entries; ++i)
1935 for (
unsigned int v = 0; v < 2; ++v)
1936 out[i][v] = in[offsets[v] + i];
1947 const std::array<double *, 2> &in,
1952 const unsigned int n_chunks = n_entries / 2;
1953 for (
unsigned int i = 0; i < n_chunks; ++i)
1955 __m128d u0 = _mm_loadu_pd(in[0] + 2 * i);
1956 __m128d u1 = _mm_loadu_pd(in[1] + 2 * i);
1957 out[2 * i + 0].
data = _mm_unpacklo_pd(u0, u1);
1958 out[2 * i + 1].
data = _mm_unpackhi_pd(u0, u1);
1961 for (
unsigned int i = 2 * n_chunks; i < n_entries; ++i)
1962 for (
unsigned int v = 0; v < 2; ++v)
1963 out[i][v] = in[v][i];
1974 const unsigned int n_entries,
1976 const unsigned int *offsets,
1979 const unsigned int n_chunks = n_entries / 2;
1982 for (
unsigned int i = 0; i < n_chunks; ++i)
1984 __m128d u0 = in[2 * i + 0].
data;
1985 __m128d u1 = in[2 * i + 1].
data;
1986 __m128d res0 = _mm_unpacklo_pd(u0, u1);
1987 __m128d res1 = _mm_unpackhi_pd(u0, u1);
1988 _mm_storeu_pd(out + 2 * i + offsets[0],
1989 _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[0]),
1991 _mm_storeu_pd(out + 2 * i + offsets[1],
1992 _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[1]),
1996 for (
unsigned int i = 2 * n_chunks; i < n_entries; ++i)
1997 for (
unsigned int v = 0; v < 2; ++v)
1998 out[offsets[v] + i] += in[i][v];
2002 for (
unsigned int i = 0; i < n_chunks; ++i)
2004 __m128d u0 = in[2 * i + 0].
data;
2005 __m128d u1 = in[2 * i + 1].
data;
2006 __m128d res0 = _mm_unpacklo_pd(u0, u1);
2007 __m128d res1 = _mm_unpackhi_pd(u0, u1);
2008 _mm_storeu_pd(out + 2 * i + offsets[0], res0);
2009 _mm_storeu_pd(out + 2 * i + offsets[1], res1);
2012 for (
unsigned int i = 2 * n_chunks; i < n_entries; ++i)
2013 for (
unsigned int v = 0; v < 2; ++v)
2014 out[offsets[v] + i] = in[i][v];
2026 const unsigned int n_entries,
2028 std::array<double *, 2> &out)
2032 const unsigned int n_chunks = n_entries / 2;
2035 for (
unsigned int i = 0; i < n_chunks; ++i)
2037 __m128d u0 = in[2 * i + 0].
data;
2038 __m128d u1 = in[2 * i + 1].
data;
2039 __m128d res0 = _mm_unpacklo_pd(u0, u1);
2040 __m128d res1 = _mm_unpackhi_pd(u0, u1);
2041 _mm_storeu_pd(out[0] + 2 * i,
2042 _mm_add_pd(_mm_loadu_pd(out[0] + 2 * i), res0));
2043 _mm_storeu_pd(out[1] + 2 * i,
2044 _mm_add_pd(_mm_loadu_pd(out[1] + 2 * i), res1));
2047 for (
unsigned int i = 2 * n_chunks; i < n_entries; ++i)
2048 for (
unsigned int v = 0; v < 2; ++v)
2049 out[v][i] += in[i][v];
2053 for (
unsigned int i = 0; i < n_chunks; ++i)
2055 __m128d u0 = in[2 * i + 0].
data;
2056 __m128d u1 = in[2 * i + 1].
data;
2057 __m128d res0 = _mm_unpacklo_pd(u0, u1);
2058 __m128d res1 = _mm_unpackhi_pd(u0, u1);
2059 _mm_storeu_pd(out[0] + 2 * i, res0);
2060 _mm_storeu_pd(out[1] + 2 * i, res1);
2063 for (
unsigned int i = 2 * n_chunks; i < n_entries; ++i)
2064 for (
unsigned int v = 0; v < 2; ++v)
2065 out[v][i] = in[i][v];
2107 template <
typename U>
2119 data = _mm_set1_ps(x);
2129 operator=(
const float scalar) && =
delete;
2139 return *(
reinterpret_cast<float *
>(&
data) + comp);
2150 return *(
reinterpret_cast<const float *
>(&
data) + comp);
2160# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2175# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2190# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2205# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2220 load(
const float *ptr)
2222 data = _mm_loadu_ps(ptr);
2233 store(
float *ptr)
const
2235 _mm_storeu_ps(ptr,
data);
2246 Assert(
reinterpret_cast<std::size_t
>(ptr) % 16 == 0,
2248 _mm_stream_ps(ptr,
data);
2265 gather(
const float *base_ptr,
const unsigned int *offsets)
2267 for (
unsigned int i = 0; i < 4; ++i)
2268 *(
reinterpret_cast<float *
>(&
data) + i) = base_ptr[offsets[i]];
2285 scatter(
const unsigned int *offsets,
float *base_ptr)
const
2287 for (
unsigned int i = 0; i < 4; ++i)
2288 base_ptr[offsets[i]] = *(
reinterpret_cast<const float *
>(&
data) + i);
2298 __m128 t1 = _mm_movehl_ps(
data,
data);
2299 __m128 t2 = _mm_add_ps(
data, t1);
2300 __m128 t3 = _mm_shuffle_ps(t2, t2, 1);
2301 __m128 t4 = _mm_add_ss(t2, t3);
2302 return _mm_cvtss_f32(t4);
2337 __m128
mask = _mm_set1_ps(-0.f);
2339 res.
data = _mm_andnot_ps(mask,
data);
2370 template <
typename Number2, std::
size_t w
idth2>
2373 template <
typename Number2, std::
size_t w
idth2>
2376 template <
typename Number2, std::
size_t w
idth2>
2380 template <
typename Number2, std::
size_t w
idth2>
2395 const unsigned int *offsets,
2398 const unsigned int n_chunks = n_entries / 4;
2399 for (
unsigned int i = 0; i < n_chunks; ++i)
2401 __m128 u0 = _mm_loadu_ps(in + 4 * i + offsets[0]);
2402 __m128 u1 = _mm_loadu_ps(in + 4 * i + offsets[1]);
2403 __m128 u2 = _mm_loadu_ps(in + 4 * i + offsets[2]);
2404 __m128 u3 = _mm_loadu_ps(in + 4 * i + offsets[3]);
2405 __m128
v0 = _mm_shuffle_ps(u0, u1, 0x44);
2406 __m128
v1 = _mm_shuffle_ps(u0, u1, 0xee);
2407 __m128 v2 = _mm_shuffle_ps(u2, u3, 0x44);
2408 __m128 v3 = _mm_shuffle_ps(u2, u3, 0xee);
2409 out[4 * i + 0].
data = _mm_shuffle_ps(
v0, v2, 0x88);
2410 out[4 * i + 1].
data = _mm_shuffle_ps(
v0, v2, 0xdd);
2411 out[4 * i + 2].
data = _mm_shuffle_ps(
v1, v3, 0x88);
2412 out[4 * i + 3].
data = _mm_shuffle_ps(
v1, v3, 0xdd);
2416 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2417 for (
unsigned int v = 0; v < 4; ++v)
2418 out[i][v] = in[offsets[v] + i];
2429 const std::array<float *, 4> &in,
2434 const unsigned int n_chunks = n_entries / 4;
2435 for (
unsigned int i = 0; i < n_chunks; ++i)
2437 __m128 u0 = _mm_loadu_ps(in[0] + 4 * i);
2438 __m128 u1 = _mm_loadu_ps(in[1] + 4 * i);
2439 __m128 u2 = _mm_loadu_ps(in[2] + 4 * i);
2440 __m128 u3 = _mm_loadu_ps(in[3] + 4 * i);
2441 __m128
v0 = _mm_shuffle_ps(u0, u1, 0x44);
2442 __m128
v1 = _mm_shuffle_ps(u0, u1, 0xee);
2443 __m128 v2 = _mm_shuffle_ps(u2, u3, 0x44);
2444 __m128 v3 = _mm_shuffle_ps(u2, u3, 0xee);
2445 out[4 * i + 0].
data = _mm_shuffle_ps(
v0, v2, 0x88);
2446 out[4 * i + 1].
data = _mm_shuffle_ps(
v0, v2, 0xdd);
2447 out[4 * i + 2].
data = _mm_shuffle_ps(
v1, v3, 0x88);
2448 out[4 * i + 3].
data = _mm_shuffle_ps(
v1, v3, 0xdd);
2451 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2452 for (
unsigned int v = 0; v < 4; ++v)
2453 out[i][v] = in[v][i];
2464 const unsigned int n_entries,
2466 const unsigned int *offsets,
2469 const unsigned int n_chunks = n_entries / 4;
2470 for (
unsigned int i = 0; i < n_chunks; ++i)
2472 __m128 u0 = in[4 * i + 0].
data;
2473 __m128 u1 = in[4 * i + 1].
data;
2474 __m128 u2 = in[4 * i + 2].
data;
2475 __m128 u3 = in[4 * i + 3].
data;
2476 __m128 t0 = _mm_shuffle_ps(u0, u1, 0x44);
2477 __m128 t1 = _mm_shuffle_ps(u0, u1, 0xee);
2478 __m128 t2 = _mm_shuffle_ps(u2, u3, 0x44);
2479 __m128 t3 = _mm_shuffle_ps(u2, u3, 0xee);
2480 u0 = _mm_shuffle_ps(t0, t2, 0x88);
2481 u1 = _mm_shuffle_ps(t0, t2, 0xdd);
2482 u2 = _mm_shuffle_ps(t1, t3, 0x88);
2483 u3 = _mm_shuffle_ps(t1, t3, 0xdd);
2490 u0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), u0);
2491 _mm_storeu_ps(out + 4 * i + offsets[0], u0);
2492 u1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), u1);
2493 _mm_storeu_ps(out + 4 * i + offsets[1], u1);
2494 u2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), u2);
2495 _mm_storeu_ps(out + 4 * i + offsets[2], u2);
2496 u3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), u3);
2497 _mm_storeu_ps(out + 4 * i + offsets[3], u3);
2501 _mm_storeu_ps(out + 4 * i + offsets[0], u0);
2502 _mm_storeu_ps(out + 4 * i + offsets[1], u1);
2503 _mm_storeu_ps(out + 4 * i + offsets[2], u2);
2504 _mm_storeu_ps(out + 4 * i + offsets[3], u3);
2510 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2511 for (
unsigned int v = 0; v < 4; ++v)
2512 out[offsets[v] + i] += in[i][v];
2514 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2515 for (
unsigned int v = 0; v < 4; ++v)
2516 out[offsets[v] + i] = in[i][v];
2527 const unsigned int n_entries,
2529 std::array<float *, 4> &out)
2533 const unsigned int n_chunks = n_entries / 4;
2534 for (
unsigned int i = 0; i < n_chunks; ++i)
2536 __m128 u0 = in[4 * i + 0].
data;
2537 __m128 u1 = in[4 * i + 1].
data;
2538 __m128 u2 = in[4 * i + 2].
data;
2539 __m128 u3 = in[4 * i + 3].
data;
2540 __m128 t0 = _mm_shuffle_ps(u0, u1, 0x44);
2541 __m128 t1 = _mm_shuffle_ps(u0, u1, 0xee);
2542 __m128 t2 = _mm_shuffle_ps(u2, u3, 0x44);
2543 __m128 t3 = _mm_shuffle_ps(u2, u3, 0xee);
2544 u0 = _mm_shuffle_ps(t0, t2, 0x88);
2545 u1 = _mm_shuffle_ps(t0, t2, 0xdd);
2546 u2 = _mm_shuffle_ps(t1, t3, 0x88);
2547 u3 = _mm_shuffle_ps(t1, t3, 0xdd);
2551 u0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), u0);
2552 _mm_storeu_ps(out[0] + 4 * i, u0);
2553 u1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), u1);
2554 _mm_storeu_ps(out[1] + 4 * i, u1);
2555 u2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), u2);
2556 _mm_storeu_ps(out[2] + 4 * i, u2);
2557 u3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), u3);
2558 _mm_storeu_ps(out[3] + 4 * i, u3);
2562 _mm_storeu_ps(out[0] + 4 * i, u0);
2563 _mm_storeu_ps(out[1] + 4 * i, u1);
2564 _mm_storeu_ps(out[2] + 4 * i, u2);
2565 _mm_storeu_ps(out[3] + 4 * i, u3);
2570 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2571 for (
unsigned int v = 0; v < 4; ++v)
2572 out[v][i] += in[i][v];
2574 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2575 for (
unsigned int v = 0; v < 4; ++v)
2576 out[v][i] = in[i][v];
2583# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
2621 template <
typename U>
2633 data = _mm256_set1_pd(x);
2643 operator=(
const double scalar) && =
delete;
2653 return *(
reinterpret_cast<double *
>(&
data) + comp);
2664 return *(
reinterpret_cast<const double *
>(&
data) + comp);
2679# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2694# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2708# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2723# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2738 load(
const double *ptr)
2740 data = _mm256_loadu_pd(ptr);
2745 load(
const float *ptr)
2747 data = _mm256_cvtps_pd(_mm_loadu_ps(ptr));
2758 store(
double *ptr)
const
2760 _mm256_storeu_pd(ptr,
data);
2765 store(
float *ptr)
const
2767 _mm_storeu_ps(ptr, _mm256_cvtpd_ps(
data));
2778 Assert(
reinterpret_cast<std::size_t
>(ptr) % 32 == 0,
2780 _mm256_stream_pd(ptr,
data);
2797 gather(
const double *base_ptr,
const unsigned int *offsets)
2799# if defined(__AVX2__) && defined(DEAL_II_USE_VECTORIZATION_GATHER)
2803 const __m128 index_val =
2804 _mm_loadu_ps(
reinterpret_cast<const float *
>(offsets));
2805 const __m128i
index = *
reinterpret_cast<const __m128i *
>(&index_val);
2810 __m256d
zero = _mm256_setzero_pd();
2811 __m256d
mask = _mm256_cmp_pd(zero, zero, _CMP_EQ_OQ);
2813 data = _mm256_mask_i32gather_pd(zero, base_ptr, index, mask, 8);
2815 for (
unsigned int i = 0; i < 4; ++i)
2816 *(
reinterpret_cast<double *
>(&
data) + i) = base_ptr[offsets[i]];
2834 scatter(
const unsigned int *offsets,
double *base_ptr)
const
2837 for (
unsigned int i = 0; i < 4; ++i)
2838 base_ptr[offsets[i]] = *(
reinterpret_cast<const double *
>(&
data) + i);
2849 t1.
data = _mm_add_pd(this->get_lower(), this->get_upper());
2868 return _mm256_castpd256_pd128(
data);
2878 return _mm256_extractf128_pd(
data, 1);
2905 __m256d
mask = _mm256_set1_pd(-0.);
2907 res.
data = _mm256_andnot_pd(mask,
data);
2938 template <
typename Number2, std::
size_t w
idth2>
2941 template <
typename Number2, std::
size_t w
idth2>
2944 template <
typename Number2, std::
size_t w
idth2>
2948 template <
typename Number2, std::
size_t w
idth2>
2963 const unsigned int *offsets,
2966 const unsigned int n_chunks = n_entries / 4;
2967 const double *in0 = in + offsets[0];
2968 const double *in1 = in + offsets[1];
2969 const double *in2 = in + offsets[2];
2970 const double *in3 = in + offsets[3];
2972 for (
unsigned int i = 0; i < n_chunks; ++i)
2974 __m256d u0 = _mm256_loadu_pd(in0 + 4 * i);
2975 __m256d u1 = _mm256_loadu_pd(in1 + 4 * i);
2976 __m256d u2 = _mm256_loadu_pd(in2 + 4 * i);
2977 __m256d u3 = _mm256_loadu_pd(in3 + 4 * i);
2978 __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2979 __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2980 __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2981 __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2982 out[4 * i + 0].
data = _mm256_unpacklo_pd(t0, t1);
2983 out[4 * i + 1].
data = _mm256_unpackhi_pd(t0, t1);
2984 out[4 * i + 2].
data = _mm256_unpacklo_pd(t2, t3);
2985 out[4 * i + 3].
data = _mm256_unpackhi_pd(t2, t3);
2989 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2990 out[i].
gather(in + i, offsets);
3001 const std::array<double *, 4> &in,
3006 const unsigned int n_chunks = n_entries / 4;
3007 const double *in0 = in[0];
3008 const double *in1 = in[1];
3009 const double *in2 = in[2];
3010 const double *in3 = in[3];
3012 for (
unsigned int i = 0; i < n_chunks; ++i)
3014 __m256d u0 = _mm256_loadu_pd(in0 + 4 * i);
3015 __m256d u1 = _mm256_loadu_pd(in1 + 4 * i);
3016 __m256d u2 = _mm256_loadu_pd(in2 + 4 * i);
3017 __m256d u3 = _mm256_loadu_pd(in3 + 4 * i);
3018 __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
3019 __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
3020 __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
3021 __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
3022 out[4 * i + 0].
data = _mm256_unpacklo_pd(t0, t1);
3023 out[4 * i + 1].
data = _mm256_unpackhi_pd(t0, t1);
3024 out[4 * i + 2].
data = _mm256_unpacklo_pd(t2, t3);
3025 out[4 * i + 3].
data = _mm256_unpackhi_pd(t2, t3);
3028 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3040 const unsigned int n_entries,
3042 const unsigned int *offsets,
3045 const unsigned int n_chunks = n_entries / 4;
3046 double *out0 = out + offsets[0];
3047 double *out1 = out + offsets[1];
3048 double *out2 = out + offsets[2];
3049 double *out3 = out + offsets[3];
3050 for (
unsigned int i = 0; i < n_chunks; ++i)
3052 __m256d u0 = in[4 * i + 0].
data;
3053 __m256d u1 = in[4 * i + 1].
data;
3054 __m256d u2 = in[4 * i + 2].
data;
3055 __m256d u3 = in[4 * i + 3].
data;
3056 __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
3057 __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
3058 __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
3059 __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
3060 __m256d res0 = _mm256_unpacklo_pd(t0, t1);
3061 __m256d res1 = _mm256_unpackhi_pd(t0, t1);
3062 __m256d res2 = _mm256_unpacklo_pd(t2, t3);
3063 __m256d res3 = _mm256_unpackhi_pd(t2, t3);
3070 res0 = _mm256_add_pd(_mm256_loadu_pd(out0 + 4 * i), res0);
3071 _mm256_storeu_pd(out0 + 4 * i, res0);
3072 res1 = _mm256_add_pd(_mm256_loadu_pd(out1 + 4 * i), res1);
3073 _mm256_storeu_pd(out1 + 4 * i, res1);
3074 res2 = _mm256_add_pd(_mm256_loadu_pd(out2 + 4 * i), res2);
3075 _mm256_storeu_pd(out2 + 4 * i, res2);
3076 res3 = _mm256_add_pd(_mm256_loadu_pd(out3 + 4 * i), res3);
3077 _mm256_storeu_pd(out3 + 4 * i, res3);
3081 _mm256_storeu_pd(out0 + 4 * i, res0);
3082 _mm256_storeu_pd(out1 + 4 * i, res1);
3083 _mm256_storeu_pd(out2 + 4 * i, res2);
3084 _mm256_storeu_pd(out3 + 4 * i, res3);
3090 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3091 for (
unsigned int v = 0; v < 4; ++v)
3092 out[offsets[v] + i] += in[i][v];
3094 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3095 for (
unsigned int v = 0; v < 4; ++v)
3096 out[offsets[v] + i] = in[i][v];
3107 const unsigned int n_entries,
3109 std::array<double *, 4> &out)
3113 const unsigned int n_chunks = n_entries / 4;
3114 double *out0 = out[0];
3115 double *out1 = out[1];
3116 double *out2 = out[2];
3117 double *out3 = out[3];
3118 for (
unsigned int i = 0; i < n_chunks; ++i)
3120 __m256d u0 = in[4 * i + 0].
data;
3121 __m256d u1 = in[4 * i + 1].
data;
3122 __m256d u2 = in[4 * i + 2].
data;
3123 __m256d u3 = in[4 * i + 3].
data;
3124 __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
3125 __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
3126 __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
3127 __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
3128 __m256d res0 = _mm256_unpacklo_pd(t0, t1);
3129 __m256d res1 = _mm256_unpackhi_pd(t0, t1);
3130 __m256d res2 = _mm256_unpacklo_pd(t2, t3);
3131 __m256d res3 = _mm256_unpackhi_pd(t2, t3);
3138 res0 = _mm256_add_pd(_mm256_loadu_pd(out0 + 4 * i), res0);
3139 _mm256_storeu_pd(out0 + 4 * i, res0);
3140 res1 = _mm256_add_pd(_mm256_loadu_pd(out1 + 4 * i), res1);
3141 _mm256_storeu_pd(out1 + 4 * i, res1);
3142 res2 = _mm256_add_pd(_mm256_loadu_pd(out2 + 4 * i), res2);
3143 _mm256_storeu_pd(out2 + 4 * i, res2);
3144 res3 = _mm256_add_pd(_mm256_loadu_pd(out3 + 4 * i), res3);
3145 _mm256_storeu_pd(out3 + 4 * i, res3);
3149 _mm256_storeu_pd(out0 + 4 * i, res0);
3150 _mm256_storeu_pd(out1 + 4 * i, res1);
3151 _mm256_storeu_pd(out2 + 4 * i, res2);
3152 _mm256_storeu_pd(out3 + 4 * i, res3);
3158 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3159 for (
unsigned int v = 0; v < 4; ++v)
3160 out[v][i] += in[i][v];
3162 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3163 for (
unsigned int v = 0; v < 4; ++v)
3164 out[v][i] = in[i][v];
3205 template <
typename U>
3217 data = _mm256_set1_ps(x);
3227 operator=(
const float scalar) && =
delete;
3237 return *(
reinterpret_cast<float *
>(&
data) + comp);
3248 return *(
reinterpret_cast<const float *
>(&
data) + comp);
3263# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3278# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3292# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3307# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3322 load(
const float *ptr)
3324 data = _mm256_loadu_ps(ptr);
3335 store(
float *ptr)
const
3337 _mm256_storeu_ps(ptr,
data);
3348 Assert(
reinterpret_cast<std::size_t
>(ptr) % 32 == 0,
3350 _mm256_stream_ps(ptr,
data);
3367 gather(
const float *base_ptr,
const unsigned int *offsets)
3369# if defined(__AVX2__) && defined(DEAL_II_USE_VECTORIZATION_GATHER)
3373 const __m256 index_val =
3374 _mm256_loadu_ps(
reinterpret_cast<const float *
>(offsets));
3375 const __m256i
index = *
reinterpret_cast<const __m256i *
>(&index_val);
3380 __m256
zero = _mm256_setzero_ps();
3381 __m256
mask = _mm256_cmp_ps(zero, zero, _CMP_EQ_OQ);
3383 data = _mm256_mask_i32gather_ps(zero, base_ptr, index, mask, 4);
3385 for (
unsigned int i = 0; i < 8; ++i)
3386 *(
reinterpret_cast<float *
>(&
data) + i) = base_ptr[offsets[i]];
3404 scatter(
const unsigned int *offsets,
float *base_ptr)
const
3407 for (
unsigned int i = 0; i < 8; ++i)
3408 base_ptr[offsets[i]] = *(
reinterpret_cast<const float *
>(&
data) + i);
3419 t1.
data = _mm_add_ps(this->get_lower(), this->get_upper());
3438 return _mm256_castps256_ps128(
data);
3448 return _mm256_extractf128_ps(
data, 1);
3475 __m256
mask = _mm256_set1_ps(-0.f);
3477 res.
data = _mm256_andnot_ps(mask,
data);
3508 template <
typename Number2, std::
size_t w
idth2>
3511 template <
typename Number2, std::
size_t w
idth2>
3514 template <
typename Number2, std::
size_t w
idth2>
3518 template <
typename Number2, std::
size_t w
idth2>
3533 const unsigned int *offsets,
3536 const unsigned int n_chunks = n_entries / 4;
3537 for (
unsigned int i = 0; i < n_chunks; ++i)
3541 __m256 t0, t1, t2, t3 = {};
3542 t0 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[0]), 0);
3543 t0 = _mm256_insertf128_ps(t0, _mm_loadu_ps(in + 4 * i + offsets[4]), 1);
3544 t1 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[1]), 0);
3545 t1 = _mm256_insertf128_ps(t1, _mm_loadu_ps(in + 4 * i + offsets[5]), 1);
3546 t2 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[2]), 0);
3547 t2 = _mm256_insertf128_ps(t2, _mm_loadu_ps(in + 4 * i + offsets[6]), 1);
3548 t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[3]), 0);
3549 t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[7]), 1);
3551 __m256
v0 = _mm256_shuffle_ps(t0, t1, 0x44);
3552 __m256
v1 = _mm256_shuffle_ps(t0, t1, 0xee);
3553 __m256 v2 = _mm256_shuffle_ps(t2, t3, 0x44);
3554 __m256 v3 = _mm256_shuffle_ps(t2, t3, 0xee);
3555 out[4 * i + 0].
data = _mm256_shuffle_ps(
v0, v2, 0x88);
3556 out[4 * i + 1].
data = _mm256_shuffle_ps(
v0, v2, 0xdd);
3557 out[4 * i + 2].
data = _mm256_shuffle_ps(
v1, v3, 0x88);
3558 out[4 * i + 3].
data = _mm256_shuffle_ps(
v1, v3, 0xdd);
3562 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3563 out[i].
gather(in + i, offsets);
3574 const std::array<float *, 8> &in,
3579 const unsigned int n_chunks = n_entries / 4;
3580 for (
unsigned int i = 0; i < n_chunks; ++i)
3582 __m256 t0, t1, t2, t3 = {};
3583 t0 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[0] + 4 * i), 0);
3584 t0 = _mm256_insertf128_ps(t0, _mm_loadu_ps(in[4] + 4 * i), 1);
3585 t1 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[1] + 4 * i), 0);
3586 t1 = _mm256_insertf128_ps(t1, _mm_loadu_ps(in[5] + 4 * i), 1);
3587 t2 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[2] + 4 * i), 0);
3588 t2 = _mm256_insertf128_ps(t2, _mm_loadu_ps(in[6] + 4 * i), 1);
3589 t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[3] + 4 * i), 0);
3590 t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[7] + 4 * i), 1);
3592 __m256
v0 = _mm256_shuffle_ps(t0, t1, 0x44);
3593 __m256
v1 = _mm256_shuffle_ps(t0, t1, 0xee);
3594 __m256 v2 = _mm256_shuffle_ps(t2, t3, 0x44);
3595 __m256 v3 = _mm256_shuffle_ps(t2, t3, 0xee);
3596 out[4 * i + 0].
data = _mm256_shuffle_ps(
v0, v2, 0x88);
3597 out[4 * i + 1].
data = _mm256_shuffle_ps(
v0, v2, 0xdd);
3598 out[4 * i + 2].
data = _mm256_shuffle_ps(
v1, v3, 0x88);
3599 out[4 * i + 3].
data = _mm256_shuffle_ps(
v1, v3, 0xdd);
3602 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3614 const unsigned int n_entries,
3616 const unsigned int *offsets,
3619 const unsigned int n_chunks = n_entries / 4;
3620 for (
unsigned int i = 0; i < n_chunks; ++i)
3622 __m256 u0 = in[4 * i + 0].
data;
3623 __m256 u1 = in[4 * i + 1].
data;
3624 __m256 u2 = in[4 * i + 2].
data;
3625 __m256 u3 = in[4 * i + 3].
data;
3626 __m256 t0 = _mm256_shuffle_ps(u0, u1, 0x44);
3627 __m256 t1 = _mm256_shuffle_ps(u0, u1, 0xee);
3628 __m256 t2 = _mm256_shuffle_ps(u2, u3, 0x44);
3629 __m256 t3 = _mm256_shuffle_ps(u2, u3, 0xee);
3630 u0 = _mm256_shuffle_ps(t0, t2, 0x88);
3631 u1 = _mm256_shuffle_ps(t0, t2, 0xdd);
3632 u2 = _mm256_shuffle_ps(t1, t3, 0x88);
3633 u3 = _mm256_shuffle_ps(t1, t3, 0xdd);
3634 __m128 res0 = _mm256_extractf128_ps(u0, 0);
3635 __m128 res4 = _mm256_extractf128_ps(u0, 1);
3636 __m128 res1 = _mm256_extractf128_ps(u1, 0);
3637 __m128 res5 = _mm256_extractf128_ps(u1, 1);
3638 __m128 res2 = _mm256_extractf128_ps(u2, 0);
3639 __m128 res6 = _mm256_extractf128_ps(u2, 1);
3640 __m128 res3 = _mm256_extractf128_ps(u3, 0);
3641 __m128 res7 = _mm256_extractf128_ps(u3, 1);
3648 res0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), res0);
3649 _mm_storeu_ps(out + 4 * i + offsets[0], res0);
3650 res1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), res1);
3651 _mm_storeu_ps(out + 4 * i + offsets[1], res1);
3652 res2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), res2);
3653 _mm_storeu_ps(out + 4 * i + offsets[2], res2);
3654 res3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), res3);
3655 _mm_storeu_ps(out + 4 * i + offsets[3], res3);
3656 res4 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[4]), res4);
3657 _mm_storeu_ps(out + 4 * i + offsets[4], res4);
3658 res5 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[5]), res5);
3659 _mm_storeu_ps(out + 4 * i + offsets[5], res5);
3660 res6 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[6]), res6);
3661 _mm_storeu_ps(out + 4 * i + offsets[6], res6);
3662 res7 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[7]), res7);
3663 _mm_storeu_ps(out + 4 * i + offsets[7], res7);
3667 _mm_storeu_ps(out + 4 * i + offsets[0], res0);
3668 _mm_storeu_ps(out + 4 * i + offsets[1], res1);
3669 _mm_storeu_ps(out + 4 * i + offsets[2], res2);
3670 _mm_storeu_ps(out + 4 * i + offsets[3], res3);
3671 _mm_storeu_ps(out + 4 * i + offsets[4], res4);
3672 _mm_storeu_ps(out + 4 * i + offsets[5], res5);
3673 _mm_storeu_ps(out + 4 * i + offsets[6], res6);
3674 _mm_storeu_ps(out + 4 * i + offsets[7], res7);
3680 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3681 for (
unsigned int v = 0; v < 8; ++v)
3682 out[offsets[v] + i] += in[i][v];
3684 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3685 for (
unsigned int v = 0; v < 8; ++v)
3686 out[offsets[v] + i] = in[i][v];
3697 const unsigned int n_entries,
3699 std::array<float *, 8> &out)
3703 const unsigned int n_chunks = n_entries / 4;
3704 for (
unsigned int i = 0; i < n_chunks; ++i)
3706 __m256 u0 = in[4 * i + 0].
data;
3707 __m256 u1 = in[4 * i + 1].
data;
3708 __m256 u2 = in[4 * i + 2].
data;
3709 __m256 u3 = in[4 * i + 3].
data;
3710 __m256 t0 = _mm256_shuffle_ps(u0, u1, 0x44);
3711 __m256 t1 = _mm256_shuffle_ps(u0, u1, 0xee);
3712 __m256 t2 = _mm256_shuffle_ps(u2, u3, 0x44);
3713 __m256 t3 = _mm256_shuffle_ps(u2, u3, 0xee);
3714 u0 = _mm256_shuffle_ps(t0, t2, 0x88);
3715 u1 = _mm256_shuffle_ps(t0, t2, 0xdd);
3716 u2 = _mm256_shuffle_ps(t1, t3, 0x88);
3717 u3 = _mm256_shuffle_ps(t1, t3, 0xdd);
3718 __m128 res0 = _mm256_extractf128_ps(u0, 0);
3719 __m128 res4 = _mm256_extractf128_ps(u0, 1);
3720 __m128 res1 = _mm256_extractf128_ps(u1, 0);
3721 __m128 res5 = _mm256_extractf128_ps(u1, 1);
3722 __m128 res2 = _mm256_extractf128_ps(u2, 0);
3723 __m128 res6 = _mm256_extractf128_ps(u2, 1);
3724 __m128 res3 = _mm256_extractf128_ps(u3, 0);
3725 __m128 res7 = _mm256_extractf128_ps(u3, 1);
3729 res0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), res0);
3730 _mm_storeu_ps(out[0] + 4 * i, res0);
3731 res1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), res1);
3732 _mm_storeu_ps(out[1] + 4 * i, res1);
3733 res2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), res2);
3734 _mm_storeu_ps(out[2] + 4 * i, res2);
3735 res3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), res3);
3736 _mm_storeu_ps(out[3] + 4 * i, res3);
3737 res4 = _mm_add_ps(_mm_loadu_ps(out[4] + 4 * i), res4);
3738 _mm_storeu_ps(out[4] + 4 * i, res4);
3739 res5 = _mm_add_ps(_mm_loadu_ps(out[5] + 4 * i), res5);
3740 _mm_storeu_ps(out[5] + 4 * i, res5);
3741 res6 = _mm_add_ps(_mm_loadu_ps(out[6] + 4 * i), res6);
3742 _mm_storeu_ps(out[6] + 4 * i, res6);
3743 res7 = _mm_add_ps(_mm_loadu_ps(out[7] + 4 * i), res7);
3744 _mm_storeu_ps(out[7] + 4 * i, res7);
3748 _mm_storeu_ps(out[0] + 4 * i, res0);
3749 _mm_storeu_ps(out[1] + 4 * i, res1);
3750 _mm_storeu_ps(out[2] + 4 * i, res2);
3751 _mm_storeu_ps(out[3] + 4 * i, res3);
3752 _mm_storeu_ps(out[4] + 4 * i, res4);
3753 _mm_storeu_ps(out[5] + 4 * i, res5);
3754 _mm_storeu_ps(out[6] + 4 * i, res6);
3755 _mm_storeu_ps(out[7] + 4 * i, res7);
3760 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3761 for (
unsigned int v = 0; v < 8; ++v)
3762 out[v][i] += in[i][v];
3764 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3765 for (
unsigned int v = 0; v < 8; ++v)
3766 out[v][i] = in[i][v];
3774# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && defined(__AVX512F__)
3812 template <
typename U>
3824 data = _mm512_set1_pd(x);
3835 operator=(
const double scalar) && =
delete;
3845 return *(
reinterpret_cast<double *
>(&
data) + comp);
3856 return *(
reinterpret_cast<const double *
>(&
data) + comp);
3871# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3886# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3900# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3915# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3930 load(
const double *ptr)
3932 data = _mm512_loadu_pd(ptr);
3937 load(
const float *ptr)
3939 data = _mm512_cvtps_pd(_mm256_loadu_ps(ptr));
3950 store(
double *ptr)
const
3952 _mm512_storeu_pd(ptr,
data);
3957 store(
float *ptr)
const
3959 _mm256_storeu_ps(ptr, _mm512_cvtpd_ps(
data));
3970 Assert(
reinterpret_cast<std::size_t
>(ptr) % 64 == 0,
3972 _mm512_stream_pd(ptr,
data);
3989 gather(
const double *base_ptr,
const unsigned int *offsets)
3991# ifdef DEAL_II_USE_VECTORIZATION_GATHER
3995 const __m256 index_val =
3996 _mm256_loadu_ps(
reinterpret_cast<const float *
>(offsets));
3997 const __m256i
index = *
reinterpret_cast<const __m256i *
>(&index_val);
4003 __mmask8
mask = 0xFF;
4005 data = _mm512_mask_i32gather_pd(zero, mask, index, base_ptr, 8);
4007 for (
unsigned int i = 0; i < 8; ++i)
4008 *(
reinterpret_cast<double *
>(&
data) + i) = base_ptr[offsets[i]];
4026 scatter(
const unsigned int *offsets,
double *base_ptr)
const
4028# ifdef DEAL_II_USE_VECTORIZATION_GATHER
4029 for (
unsigned int i = 0; i < 8; ++i)
4030 for (
unsigned int j = i + 1; j < 8; ++j)
4031 Assert(offsets[i] != offsets[j],
4032 ExcMessage(
"Result of scatter undefined if two offset elements"
4033 " point to the same position"));
4038 const __m256 index_val =
4039 _mm256_loadu_ps(
reinterpret_cast<const float *
>(offsets));
4040 const __m256i
index = *
reinterpret_cast<const __m256i *
>(&index_val);
4041 _mm512_i32scatter_pd(base_ptr, index,
data, 8);
4043 for (
unsigned int i = 0; i < 8; ++i)
4044 base_ptr[offsets[i]] = *(
reinterpret_cast<const double *
>(&
data) + i);
4056 t1.
data = _mm256_add_pd(this->get_lower(), this->get_upper());
4075 return _mm512_castpd512_pd256(
data);
4085 return _mm512_extractf64x4_pd(
data, 1);
4114 __m512d
mask = _mm512_set1_pd(-0.);
4116 res.
data =
reinterpret_cast<__m512d
>(
4117 _mm512_andnot_epi64(
reinterpret_cast<__m512i
>(mask),
4118 reinterpret_cast<__m512i
>(
data)));
4149 template <
typename Number2, std::
size_t w
idth2>
4152 template <
typename Number2, std::
size_t w
idth2>
4155 template <
typename Number2, std::
size_t w
idth2>
4159 template <
typename Number2, std::
size_t w
idth2>
4174 const unsigned int *offsets,
4182 const unsigned int n_chunks = n_entries / 4;
4183 for (
unsigned int i = 0; i < n_chunks; ++i)
4185 __m512d t0, t1, t2, t3 = {};
4187 t0 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[0] + 4 * i), 0);
4188 t0 = _mm512_insertf64x4(t0, _mm256_loadu_pd(in + offsets[2] + 4 * i), 1);
4189 t1 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[1] + 4 * i), 0);
4190 t1 = _mm512_insertf64x4(t1, _mm256_loadu_pd(in + offsets[3] + 4 * i), 1);
4191 t2 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[4] + 4 * i), 0);
4192 t2 = _mm512_insertf64x4(t2, _mm256_loadu_pd(in + offsets[6] + 4 * i), 1);
4193 t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[5] + 4 * i), 0);
4194 t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[7] + 4 * i), 1);
4196 __m512d
v0 = _mm512_shuffle_f64x2(t0, t2, 0x88);
4197 __m512d
v1 = _mm512_shuffle_f64x2(t0, t2, 0xdd);
4198 __m512d v2 = _mm512_shuffle_f64x2(t1, t3, 0x88);
4199 __m512d v3 = _mm512_shuffle_f64x2(t1, t3, 0xdd);
4200 out[4 * i + 0].
data = _mm512_unpacklo_pd(
v0, v2);
4201 out[4 * i + 1].
data = _mm512_unpackhi_pd(
v0, v2);
4202 out[4 * i + 2].
data = _mm512_unpacklo_pd(
v1, v3);
4203 out[4 * i + 3].
data = _mm512_unpackhi_pd(
v1, v3);
4206 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4207 out[i].
gather(in + i, offsets);
4218 const std::array<double *, 8> &in,
4221 const unsigned int n_chunks = n_entries / 4;
4222 for (
unsigned int i = 0; i < n_chunks; ++i)
4224 __m512d t0, t1, t2, t3 = {};
4226 t0 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[0] + 4 * i), 0);
4227 t0 = _mm512_insertf64x4(t0, _mm256_loadu_pd(in[2] + 4 * i), 1);
4228 t1 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[1] + 4 * i), 0);
4229 t1 = _mm512_insertf64x4(t1, _mm256_loadu_pd(in[3] + 4 * i), 1);
4230 t2 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[4] + 4 * i), 0);
4231 t2 = _mm512_insertf64x4(t2, _mm256_loadu_pd(in[6] + 4 * i), 1);
4232 t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[5] + 4 * i), 0);
4233 t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[7] + 4 * i), 1);
4235 __m512d
v0 = _mm512_shuffle_f64x2(t0, t2, 0x88);
4236 __m512d
v1 = _mm512_shuffle_f64x2(t0, t2, 0xdd);
4237 __m512d v2 = _mm512_shuffle_f64x2(t1, t3, 0x88);
4238 __m512d v3 = _mm512_shuffle_f64x2(t1, t3, 0xdd);
4239 out[4 * i + 0].
data = _mm512_unpacklo_pd(
v0, v2);
4240 out[4 * i + 1].
data = _mm512_unpackhi_pd(
v0, v2);
4241 out[4 * i + 2].
data = _mm512_unpacklo_pd(
v1, v3);
4242 out[4 * i + 3].
data = _mm512_unpackhi_pd(
v1, v3);
4245 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4257 const unsigned int n_entries,
4259 const unsigned int *offsets,
4264 const unsigned int n_chunks = n_entries / 4;
4265 __m512i mask1 = _mm512_set_epi64(0xd, 0xc, 0x5, 0x4, 0x9, 0x8, 0x1, 0x0);
4266 __m512i mask2 = _mm512_set_epi64(0xf, 0xe, 0x7, 0x6, 0xb, 0xa, 0x3, 0x2);
4267 for (
unsigned int i = 0; i < n_chunks; ++i)
4269 __m512d t0 = _mm512_unpacklo_pd(in[i * 4].
data, in[i * 4 + 1].
data);
4270 __m512d t1 = _mm512_unpackhi_pd(in[i * 4].
data, in[i * 4 + 1].
data);
4271 __m512d t2 = _mm512_unpacklo_pd(in[i * 4 + 2].
data, in[i * 4 + 3].
data);
4272 __m512d t3 = _mm512_unpackhi_pd(in[i * 4 + 2].
data, in[i * 4 + 3].
data);
4273 __m512d
v0 = _mm512_permutex2var_pd(t0, mask1, t2);
4274 __m512d
v1 = _mm512_permutex2var_pd(t0, mask2, t2);
4275 __m512d v2 = _mm512_permutex2var_pd(t1, mask1, t3);
4276 __m512d v3 = _mm512_permutex2var_pd(t1, mask2, t3);
4277 __m256d res0 = _mm512_extractf64x4_pd(
v0, 0);
4278 __m256d res4 = _mm512_extractf64x4_pd(
v0, 1);
4279 __m256d res1 = _mm512_extractf64x4_pd(v2, 0);
4280 __m256d res5 = _mm512_extractf64x4_pd(v2, 1);
4281 __m256d res2 = _mm512_extractf64x4_pd(
v1, 0);
4282 __m256d res6 = _mm512_extractf64x4_pd(
v1, 1);
4283 __m256d res3 = _mm512_extractf64x4_pd(v3, 0);
4284 __m256d res7 = _mm512_extractf64x4_pd(v3, 1);
4291 res0 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[0]), res0);
4292 _mm256_storeu_pd(out + 4 * i + offsets[0], res0);
4293 res1 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[1]), res1);
4294 _mm256_storeu_pd(out + 4 * i + offsets[1], res1);
4295 res2 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[2]), res2);
4296 _mm256_storeu_pd(out + 4 * i + offsets[2], res2);
4297 res3 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[3]), res3);
4298 _mm256_storeu_pd(out + 4 * i + offsets[3], res3);
4299 res4 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[4]), res4);
4300 _mm256_storeu_pd(out + 4 * i + offsets[4], res4);
4301 res5 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[5]), res5);
4302 _mm256_storeu_pd(out + 4 * i + offsets[5], res5);
4303 res6 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[6]), res6);
4304 _mm256_storeu_pd(out + 4 * i + offsets[6], res6);
4305 res7 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[7]), res7);
4306 _mm256_storeu_pd(out + 4 * i + offsets[7], res7);
4310 _mm256_storeu_pd(out + 4 * i + offsets[0], res0);
4311 _mm256_storeu_pd(out + 4 * i + offsets[1], res1);
4312 _mm256_storeu_pd(out + 4 * i + offsets[2], res2);
4313 _mm256_storeu_pd(out + 4 * i + offsets[3], res3);
4314 _mm256_storeu_pd(out + 4 * i + offsets[4], res4);
4315 _mm256_storeu_pd(out + 4 * i + offsets[5], res5);
4316 _mm256_storeu_pd(out + 4 * i + offsets[6], res6);
4317 _mm256_storeu_pd(out + 4 * i + offsets[7], res7);
4323 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4324 for (
unsigned int v = 0; v < 8; ++v)
4325 out[offsets[v] + i] += in[i][v];
4327 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4328 for (
unsigned int v = 0; v < 8; ++v)
4329 out[offsets[v] + i] = in[i][v];
4340 const unsigned int n_entries,
4342 std::array<double *, 8> &out)
4346 const unsigned int n_chunks = n_entries / 4;
4347 __m512i mask1 = _mm512_set_epi64(0xd, 0xc, 0x5, 0x4, 0x9, 0x8, 0x1, 0x0);
4348 __m512i mask2 = _mm512_set_epi64(0xf, 0xe, 0x7, 0x6, 0xb, 0xa, 0x3, 0x2);
4349 for (
unsigned int i = 0; i < n_chunks; ++i)
4351 __m512d t0 = _mm512_unpacklo_pd(in[i * 4].
data, in[i * 4 + 1].
data);
4352 __m512d t1 = _mm512_unpackhi_pd(in[i * 4].
data, in[i * 4 + 1].
data);
4353 __m512d t2 = _mm512_unpacklo_pd(in[i * 4 + 2].
data, in[i * 4 + 3].
data);
4354 __m512d t3 = _mm512_unpackhi_pd(in[i * 4 + 2].
data, in[i * 4 + 3].
data);
4355 __m512d
v0 = _mm512_permutex2var_pd(t0, mask1, t2);
4356 __m512d
v1 = _mm512_permutex2var_pd(t0, mask2, t2);
4357 __m512d v2 = _mm512_permutex2var_pd(t1, mask1, t3);
4358 __m512d v3 = _mm512_permutex2var_pd(t1, mask2, t3);
4359 __m256d res0 = _mm512_extractf64x4_pd(
v0, 0);
4360 __m256d res4 = _mm512_extractf64x4_pd(
v0, 1);
4361 __m256d res1 = _mm512_extractf64x4_pd(v2, 0);
4362 __m256d res5 = _mm512_extractf64x4_pd(v2, 1);
4363 __m256d res2 = _mm512_extractf64x4_pd(
v1, 0);
4364 __m256d res6 = _mm512_extractf64x4_pd(
v1, 1);
4365 __m256d res3 = _mm512_extractf64x4_pd(v3, 0);
4366 __m256d res7 = _mm512_extractf64x4_pd(v3, 1);
4370 res0 = _mm256_add_pd(_mm256_loadu_pd(out[0] + 4 * i), res0);
4371 _mm256_storeu_pd(out[0] + 4 * i, res0);
4372 res1 = _mm256_add_pd(_mm256_loadu_pd(out[1] + 4 * i), res1);
4373 _mm256_storeu_pd(out[1] + 4 * i, res1);
4374 res2 = _mm256_add_pd(_mm256_loadu_pd(out[2] + 4 * i), res2);
4375 _mm256_storeu_pd(out[2] + 4 * i, res2);
4376 res3 = _mm256_add_pd(_mm256_loadu_pd(out[3] + 4 * i), res3);
4377 _mm256_storeu_pd(out[3] + 4 * i, res3);
4378 res4 = _mm256_add_pd(_mm256_loadu_pd(out[4] + 4 * i), res4);
4379 _mm256_storeu_pd(out[4] + 4 * i, res4);
4380 res5 = _mm256_add_pd(_mm256_loadu_pd(out[5] + 4 * i), res5);
4381 _mm256_storeu_pd(out[5] + 4 * i, res5);
4382 res6 = _mm256_add_pd(_mm256_loadu_pd(out[6] + 4 * i), res6);
4383 _mm256_storeu_pd(out[6] + 4 * i, res6);
4384 res7 = _mm256_add_pd(_mm256_loadu_pd(out[7] + 4 * i), res7);
4385 _mm256_storeu_pd(out[7] + 4 * i, res7);
4389 _mm256_storeu_pd(out[0] + 4 * i, res0);
4390 _mm256_storeu_pd(out[1] + 4 * i, res1);
4391 _mm256_storeu_pd(out[2] + 4 * i, res2);
4392 _mm256_storeu_pd(out[3] + 4 * i, res3);
4393 _mm256_storeu_pd(out[4] + 4 * i, res4);
4394 _mm256_storeu_pd(out[5] + 4 * i, res5);
4395 _mm256_storeu_pd(out[6] + 4 * i, res6);
4396 _mm256_storeu_pd(out[7] + 4 * i, res7);
4401 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4402 for (
unsigned int v = 0; v < 8; ++v)
4403 out[v][i] += in[i][v];
4405 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4406 for (
unsigned int v = 0; v < 8; ++v)
4407 out[v][i] = in[i][v];
4448 template <
typename U>
4460 data = _mm512_set1_ps(x);
4470 operator=(
const float scalar) && =
delete;
4480 return *(
reinterpret_cast<float *
>(&
data) + comp);
4491 return *(
reinterpret_cast<const float *
>(&
data) + comp);
4506# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
4521# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
4535# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
4550# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
4565 load(
const float *ptr)
4567 data = _mm512_loadu_ps(ptr);
4578 store(
float *ptr)
const
4580 _mm512_storeu_ps(ptr,
data);
4591 Assert(
reinterpret_cast<std::size_t
>(ptr) % 64 == 0,
4593 _mm512_stream_ps(ptr,
data);
4610 gather(
const float *base_ptr,
const unsigned int *offsets)
4612# ifdef DEAL_II_USE_VECTORIZATION_GATHER
4616 const __m512 index_val =
4617 _mm512_loadu_ps(
reinterpret_cast<const float *
>(offsets));
4618 const __m512i
index = *
reinterpret_cast<const __m512i *
>(&index_val);
4624 __mmask16
mask = 0xFFFF;
4626 data = _mm512_mask_i32gather_ps(zero, mask, index, base_ptr, 4);
4628 for (
unsigned int i = 0; i < 16; ++i)
4629 *(
reinterpret_cast<float *
>(&
data) + i) = base_ptr[offsets[i]];
4647 scatter(
const unsigned int *offsets,
float *base_ptr)
const
4649# ifdef DEAL_II_USE_VECTORIZATION_GATHER
4650 for (
unsigned int i = 0; i < 16; ++i)
4651 for (
unsigned int j = i + 1; j < 16; ++j)
4652 Assert(offsets[i] != offsets[j],
4653 ExcMessage(
"Result of scatter undefined if two offset elements"
4654 " point to the same position"));
4659 const __m512 index_val =
4660 _mm512_loadu_ps(
reinterpret_cast<const float *
>(offsets));
4661 const __m512i
index = *
reinterpret_cast<const __m512i *
>(&index_val);
4662 _mm512_i32scatter_ps(base_ptr, index,
data, 4);
4664 for (
unsigned int i = 0; i < 16; ++i)
4665 base_ptr[offsets[i]] = *(
reinterpret_cast<const float *
>(&
data) + i);
4677 t1.
data = _mm256_add_ps(this->get_lower(), this->get_upper());
4696 return _mm512_castps512_ps256(
data);
4706 return _mm256_castpd_ps(_mm512_extractf64x4_pd(_mm512_castps_pd(
data), 1));
4735 __m512
mask = _mm512_set1_ps(-0.f);
4737 res.
data =
reinterpret_cast<__m512
>(
4738 _mm512_andnot_epi32(
reinterpret_cast<__m512i
>(mask),
4739 reinterpret_cast<__m512i
>(
data)));
4770 template <
typename Number2, std::
size_t w
idth2>
4773 template <
typename Number2, std::
size_t w
idth2>
4776 template <
typename Number2, std::
size_t w
idth2>
4780 template <
typename Number2, std::
size_t w
idth2>
4795 const unsigned int *offsets,
4802 const unsigned int n_chunks = n_entries / 4;
4810 __m512 t0, t1, t2, t3;
4813 for (
unsigned int i = 0; i < n_chunks; ++i)
4815 t0 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[0] + 4 * i), 0);
4816 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[4] + 4 * i), 1);
4817 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[8] + 4 * i), 2);
4818 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[12] + 4 * i), 3);
4819 t1 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[1] + 4 * i), 0);
4820 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[5] + 4 * i), 1);
4821 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[9] + 4 * i), 2);
4822 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[13] + 4 * i), 3);
4823 t2 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[2] + 4 * i), 0);
4824 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[6] + 4 * i), 1);
4825 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[10] + 4 * i), 2);
4826 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[14] + 4 * i), 3);
4827 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[3] + 4 * i), 0);
4828 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[7] + 4 * i), 1);
4829 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[11] + 4 * i), 2);
4830 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[15] + 4 * i), 3);
4832 __m512
v0 = _mm512_shuffle_ps(t0, t1, 0x44);
4833 __m512
v1 = _mm512_shuffle_ps(t0, t1, 0xee);
4834 __m512 v2 = _mm512_shuffle_ps(t2, t3, 0x44);
4835 __m512 v3 = _mm512_shuffle_ps(t2, t3, 0xee);
4837 out[4 * i + 0].
data = _mm512_shuffle_ps(
v0, v2, 0x88);
4838 out[4 * i + 1].
data = _mm512_shuffle_ps(
v0, v2, 0xdd);
4839 out[4 * i + 2].
data = _mm512_shuffle_ps(
v1, v3, 0x88);
4840 out[4 * i + 3].
data = _mm512_shuffle_ps(
v1, v3, 0xdd);
4844 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4845 out[i].
gather(in + i, offsets);
4856 const std::array<float *, 16> &in,
4861 const unsigned int n_chunks = n_entries / 4;
4863 __m512 t0, t1, t2, t3;
4866 for (
unsigned int i = 0; i < n_chunks; ++i)
4868 t0 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[0] + 4 * i), 0);
4869 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[4] + 4 * i), 1);
4870 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[8] + 4 * i), 2);
4871 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[12] + 4 * i), 3);
4872 t1 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[1] + 4 * i), 0);
4873 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[5] + 4 * i), 1);
4874 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[9] + 4 * i), 2);
4875 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[13] + 4 * i), 3);
4876 t2 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[2] + 4 * i), 0);
4877 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[6] + 4 * i), 1);
4878 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[10] + 4 * i), 2);
4879 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[14] + 4 * i), 3);
4880 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[3] + 4 * i), 0);
4881 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[7] + 4 * i), 1);
4882 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[11] + 4 * i), 2);
4883 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[15] + 4 * i), 3);
4885 __m512
v0 = _mm512_shuffle_ps(t0, t1, 0x44);
4886 __m512
v1 = _mm512_shuffle_ps(t0, t1, 0xee);
4887 __m512 v2 = _mm512_shuffle_ps(t2, t3, 0x44);
4888 __m512 v3 = _mm512_shuffle_ps(t2, t3, 0xee);
4890 out[4 * i + 0].
data = _mm512_shuffle_ps(
v0, v2, 0x88);
4891 out[4 * i + 1].
data = _mm512_shuffle_ps(
v0, v2, 0xdd);
4892 out[4 * i + 2].
data = _mm512_shuffle_ps(
v1, v3, 0x88);
4893 out[4 * i + 3].
data = _mm512_shuffle_ps(
v1, v3, 0xdd);
4896 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4908 const unsigned int n_entries,
4910 const unsigned int *offsets,
4913 const unsigned int n_chunks = n_entries / 4;
4914 for (
unsigned int i = 0; i < n_chunks; ++i)
4916 __m512 t0 = _mm512_shuffle_ps(in[4 * i].
data, in[1 + 4 * i].
data, 0x44);
4917 __m512 t1 = _mm512_shuffle_ps(in[4 * i].
data, in[1 + 4 * i].
data, 0xee);
4919 _mm512_shuffle_ps(in[2 + 4 * i].
data, in[3 + 4 * i].
data, 0x44);
4921 _mm512_shuffle_ps(in[2 + 4 * i].
data, in[3 + 4 * i].
data, 0xee);
4922 __m512 u0 = _mm512_shuffle_ps(t0, t2, 0x88);
4923 __m512 u1 = _mm512_shuffle_ps(t0, t2, 0xdd);
4924 __m512 u2 = _mm512_shuffle_ps(t1, t3, 0x88);
4925 __m512 u3 = _mm512_shuffle_ps(t1, t3, 0xdd);
4927 __m128 res0 = _mm512_extractf32x4_ps(u0, 0);
4928 __m128 res4 = _mm512_extractf32x4_ps(u0, 1);
4929 __m128 res8 = _mm512_extractf32x4_ps(u0, 2);
4930 __m128 res12 = _mm512_extractf32x4_ps(u0, 3);
4931 __m128 res1 = _mm512_extractf32x4_ps(u1, 0);
4932 __m128 res5 = _mm512_extractf32x4_ps(u1, 1);
4933 __m128 res9 = _mm512_extractf32x4_ps(u1, 2);
4934 __m128 res13 = _mm512_extractf32x4_ps(u1, 3);
4935 __m128 res2 = _mm512_extractf32x4_ps(u2, 0);
4936 __m128 res6 = _mm512_extractf32x4_ps(u2, 1);
4937 __m128 res10 = _mm512_extractf32x4_ps(u2, 2);
4938 __m128 res14 = _mm512_extractf32x4_ps(u2, 3);
4939 __m128 res3 = _mm512_extractf32x4_ps(u3, 0);
4940 __m128 res7 = _mm512_extractf32x4_ps(u3, 1);
4941 __m128 res11 = _mm512_extractf32x4_ps(u3, 2);
4942 __m128 res15 = _mm512_extractf32x4_ps(u3, 3);
4949 res0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), res0);
4950 _mm_storeu_ps(out + 4 * i + offsets[0], res0);
4951 res1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), res1);
4952 _mm_storeu_ps(out + 4 * i + offsets[1], res1);
4953 res2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), res2);
4954 _mm_storeu_ps(out + 4 * i + offsets[2], res2);
4955 res3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), res3);
4956 _mm_storeu_ps(out + 4 * i + offsets[3], res3);
4957 res4 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[4]), res4);
4958 _mm_storeu_ps(out + 4 * i + offsets[4], res4);
4959 res5 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[5]), res5);
4960 _mm_storeu_ps(out + 4 * i + offsets[5], res5);
4961 res6 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[6]), res6);
4962 _mm_storeu_ps(out + 4 * i + offsets[6], res6);
4963 res7 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[7]), res7);
4964 _mm_storeu_ps(out + 4 * i + offsets[7], res7);
4965 res8 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[8]), res8);
4966 _mm_storeu_ps(out + 4 * i + offsets[8], res8);
4967 res9 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[9]), res9);
4968 _mm_storeu_ps(out + 4 * i + offsets[9], res9);
4969 res10 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[10]), res10);
4970 _mm_storeu_ps(out + 4 * i + offsets[10], res10);
4971 res11 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[11]), res11);
4972 _mm_storeu_ps(out + 4 * i + offsets[11], res11);
4973 res12 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[12]), res12);
4974 _mm_storeu_ps(out + 4 * i + offsets[12], res12);
4975 res13 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[13]), res13);
4976 _mm_storeu_ps(out + 4 * i + offsets[13], res13);
4977 res14 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[14]), res14);
4978 _mm_storeu_ps(out + 4 * i + offsets[14], res14);
4979 res15 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[15]), res15);
4980 _mm_storeu_ps(out + 4 * i + offsets[15], res15);
4984 _mm_storeu_ps(out + 4 * i + offsets[0], res0);
4985 _mm_storeu_ps(out + 4 * i + offsets[1], res1);
4986 _mm_storeu_ps(out + 4 * i + offsets[2], res2);
4987 _mm_storeu_ps(out + 4 * i + offsets[3], res3);
4988 _mm_storeu_ps(out + 4 * i + offsets[4], res4);
4989 _mm_storeu_ps(out + 4 * i + offsets[5], res5);
4990 _mm_storeu_ps(out + 4 * i + offsets[6], res6);
4991 _mm_storeu_ps(out + 4 * i + offsets[7], res7);
4992 _mm_storeu_ps(out + 4 * i + offsets[8], res8);
4993 _mm_storeu_ps(out + 4 * i + offsets[9], res9);
4994 _mm_storeu_ps(out + 4 * i + offsets[10], res10);
4995 _mm_storeu_ps(out + 4 * i + offsets[11], res11);
4996 _mm_storeu_ps(out + 4 * i + offsets[12], res12);
4997 _mm_storeu_ps(out + 4 * i + offsets[13], res13);
4998 _mm_storeu_ps(out + 4 * i + offsets[14], res14);
4999 _mm_storeu_ps(out + 4 * i + offsets[15], res15);
5005 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
5006 for (
unsigned int v = 0; v < 16; ++v)
5007 out[offsets[v] + i] += in[i][v];
5009 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
5010 for (
unsigned int v = 0; v < 16; ++v)
5011 out[offsets[v] + i] = in[i][v];
5022 const unsigned int n_entries,
5024 std::array<float *, 16> &out)
5028 const unsigned int n_chunks = n_entries / 4;
5029 for (
unsigned int i = 0; i < n_chunks; ++i)
5031 __m512 t0 = _mm512_shuffle_ps(in[4 * i].
data, in[1 + 4 * i].
data, 0x44);
5032 __m512 t1 = _mm512_shuffle_ps(in[4 * i].
data, in[1 + 4 * i].
data, 0xee);
5034 _mm512_shuffle_ps(in[2 + 4 * i].
data, in[3 + 4 * i].
data, 0x44);
5036 _mm512_shuffle_ps(in[2 + 4 * i].
data, in[3 + 4 * i].
data, 0xee);
5037 __m512 u0 = _mm512_shuffle_ps(t0, t2, 0x88);
5038 __m512 u1 = _mm512_shuffle_ps(t0, t2, 0xdd);
5039 __m512 u2 = _mm512_shuffle_ps(t1, t3, 0x88);
5040 __m512 u3 = _mm512_shuffle_ps(t1, t3, 0xdd);
5042 __m128 res0 = _mm512_extractf32x4_ps(u0, 0);
5043 __m128 res4 = _mm512_extractf32x4_ps(u0, 1);
5044 __m128 res8 = _mm512_extractf32x4_ps(u0, 2);
5045 __m128 res12 = _mm512_extractf32x4_ps(u0, 3);
5046 __m128 res1 = _mm512_extractf32x4_ps(u1, 0);
5047 __m128 res5 = _mm512_extractf32x4_ps(u1, 1);
5048 __m128 res9 = _mm512_extractf32x4_ps(u1, 2);
5049 __m128 res13 = _mm512_extractf32x4_ps(u1, 3);
5050 __m128 res2 = _mm512_extractf32x4_ps(u2, 0);
5051 __m128 res6 = _mm512_extractf32x4_ps(u2, 1);
5052 __m128 res10 = _mm512_extractf32x4_ps(u2, 2);
5053 __m128 res14 = _mm512_extractf32x4_ps(u2, 3);
5054 __m128 res3 = _mm512_extractf32x4_ps(u3, 0);
5055 __m128 res7 = _mm512_extractf32x4_ps(u3, 1);
5056 __m128 res11 = _mm512_extractf32x4_ps(u3, 2);
5057 __m128 res15 = _mm512_extractf32x4_ps(u3, 3);
5061 res0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), res0);
5062 _mm_storeu_ps(out[0] + 4 * i, res0);
5063 res1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), res1);
5064 _mm_storeu_ps(out[1] + 4 * i, res1);
5065 res2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), res2);
5066 _mm_storeu_ps(out[2] + 4 * i, res2);
5067 res3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), res3);
5068 _mm_storeu_ps(out[3] + 4 * i, res3);
5069 res4 = _mm_add_ps(_mm_loadu_ps(out[4] + 4 * i), res4);
5070 _mm_storeu_ps(out[4] + 4 * i, res4);
5071 res5 = _mm_add_ps(_mm_loadu_ps(out[5] + 4 * i), res5);
5072 _mm_storeu_ps(out[5] + 4 * i, res5);
5073 res6 = _mm_add_ps(_mm_loadu_ps(out[6] + 4 * i), res6);
5074 _mm_storeu_ps(out[6] + 4 * i, res6);
5075 res7 = _mm_add_ps(_mm_loadu_ps(out[7] + 4 * i), res7);
5076 _mm_storeu_ps(out[7] + 4 * i, res7);
5077 res8 = _mm_add_ps(_mm_loadu_ps(out[8] + 4 * i), res8);
5078 _mm_storeu_ps(out[8] + 4 * i, res8);
5079 res9 = _mm_add_ps(_mm_loadu_ps(out[9] + 4 * i), res9);
5080 _mm_storeu_ps(out[9] + 4 * i, res9);
5081 res10 = _mm_add_ps(_mm_loadu_ps(out[10] + 4 * i), res10);
5082 _mm_storeu_ps(out[10] + 4 * i, res10);
5083 res11 = _mm_add_ps(_mm_loadu_ps(out[11] + 4 * i), res11);
5084 _mm_storeu_ps(out[11] + 4 * i, res11);
5085 res12 = _mm_add_ps(_mm_loadu_ps(out[12] + 4 * i), res12);
5086 _mm_storeu_ps(out[12] + 4 * i, res12);
5087 res13 = _mm_add_ps(_mm_loadu_ps(out[13] + 4 * i), res13);
5088 _mm_storeu_ps(out[13] + 4 * i, res13);
5089 res14 = _mm_add_ps(_mm_loadu_ps(out[14] + 4 * i), res14);
5090 _mm_storeu_ps(out[14] + 4 * i, res14);
5091 res15 = _mm_add_ps(_mm_loadu_ps(out[15] + 4 * i), res15);
5092 _mm_storeu_ps(out[15] + 4 * i, res15);
5096 _mm_storeu_ps(out[0] + 4 * i, res0);
5097 _mm_storeu_ps(out[1] + 4 * i, res1);
5098 _mm_storeu_ps(out[2] + 4 * i, res2);
5099 _mm_storeu_ps(out[3] + 4 * i, res3);
5100 _mm_storeu_ps(out[4] + 4 * i, res4);
5101 _mm_storeu_ps(out[5] + 4 * i, res5);
5102 _mm_storeu_ps(out[6] + 4 * i, res6);
5103 _mm_storeu_ps(out[7] + 4 * i, res7);
5104 _mm_storeu_ps(out[8] + 4 * i, res8);
5105 _mm_storeu_ps(out[9] + 4 * i, res9);
5106 _mm_storeu_ps(out[10] + 4 * i, res10);
5107 _mm_storeu_ps(out[11] + 4 * i, res11);
5108 _mm_storeu_ps(out[12] + 4 * i, res12);
5109 _mm_storeu_ps(out[13] + 4 * i, res13);
5110 _mm_storeu_ps(out[14] + 4 * i, res14);
5111 _mm_storeu_ps(out[15] + 4 * i, res15);
5116 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
5117 for (
unsigned int v = 0; v < 16; ++v)
5118 out[v][i] += in[i][v];
5120 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
5121 for (
unsigned int v = 0; v < 16; ++v)
5122 out[v][i] = in[i][v];
5127# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__ALTIVEC__) && \
5163 template <
typename U>
5175 data = vec_splats(x);
5190 operator=(
const double scalar) && =
delete;
5200 return *(
reinterpret_cast<double *
>(&
data) + comp);
5211 return *(
reinterpret_cast<const double *
>(&
data) + comp);
5264 load(
const double *ptr)
5266 data = vec_vsx_ld(0, ptr);
5275 store(
double *ptr)
const
5277 vec_vsx_st(
data, 0, ptr);
5295 gather(
const double *base_ptr,
const unsigned int *offsets)
5297 for (
unsigned int i = 0; i < 2; ++i)
5298 *(
reinterpret_cast<double *
>(&
data) + i) = base_ptr[offsets[i]];
5306 scatter(
const unsigned int *offsets,
double *base_ptr)
const
5308 for (
unsigned int i = 0; i < 2; ++i)
5309 base_ptr[offsets[i]] = *(
reinterpret_cast<const double *
>(&
data) + i);
5317 __vector
double data;
5373 template <
typename Number2, std::
size_t w
idth2>
5376 template <
typename Number2, std::
size_t w
idth2>
5379 template <
typename Number2, std::
size_t w
idth2>
5383 template <
typename Number2, std::
size_t w
idth2>
5424 template <
typename U>
5436 data = vec_splats(x);
5451 operator=(
const float scalar) && =
delete;
5461 return *(
reinterpret_cast<float *
>(&
data) + comp);
5472 return *(
reinterpret_cast<const float *
>(&
data) + comp);
5525 load(
const float *ptr)
5527 data = vec_vsx_ld(0, ptr);
5536 store(
float *ptr)
const
5538 vec_vsx_st(
data, 0, ptr);
5556 gather(
const float *base_ptr,
const unsigned int *offsets)
5558 for (
unsigned int i = 0; i < 4; ++i)
5559 *(
reinterpret_cast<float *
>(&
data) + i) = base_ptr[offsets[i]];
5567 scatter(
const unsigned int *offsets,
float *base_ptr)
const
5569 for (
unsigned int i = 0; i < 4; ++i)
5570 base_ptr[offsets[i]] = *(
reinterpret_cast<const float *
>(&
data) + i);
5578 __vector
float data;
5634 template <
typename Number2, std::
size_t w
idth2>
5637 template <
typename Number2, std::
size_t w
idth2>
5640 template <
typename Number2, std::
size_t w
idth2>
5644 template <
typename Number2, std::
size_t w
idth2>
5668template <
typename Number, std::
size_t w
idth>
5673 for (
unsigned int i = 0; i < VectorizedArray<Number, width>::size(); ++i)
5674 if (lhs[i] != rhs[i])
5686template <
typename Number, std::
size_t w
idth>
5700template <
typename Number, std::
size_t w
idth>
5714template <
typename Number, std::
size_t w
idth>
5728template <
typename Number, std::
size_t w
idth>
5743template <
typename Number, std::
size_t w
idth>
5759template <std::
size_t w
idth>
5773template <
typename Number, std::
size_t w
idth>
5788template <std::
size_t w
idth>
5801template <
typename Number, std::
size_t w
idth>
5817template <std::
size_t w
idth>
5831template <
typename Number, std::
size_t w
idth>
5847template <std::
size_t w
idth>
5861template <
typename Number, std::
size_t w
idth>
5877template <std::
size_t w
idth>
5891template <
typename Number, std::
size_t w
idth>
5906template <std::
size_t w
idth>
5919template <
typename Number, std::
size_t w
idth>
5935template <std::
size_t w
idth>
5949template <
typename Number, std::
size_t w
idth>
5965template <std::
size_t w
idth>
5978template <
typename Number, std::
size_t w
idth>
5990template <
typename Number, std::
size_t w
idth>
6004template <
typename Number, std::
size_t w
idth>
6005inline std::ostream &
6009 for (
unsigned int i = 0; i < n - 1; ++i)
6005inline std::ostream & {
…}
6032#if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
6113template <SIMDComparison predicate,
typename Number>
6116 const Number &right,
6117 const Number &true_value,
6118 const Number &false_value)
6124 mask = (left == right);
6127 mask = (left != right);
6130 mask = (left < right);
6133 mask = (left <= right);
6136 mask = (left > right);
6139 mask = (left >= right);
6143 return mask ? true_value : false_value;
6151template <SIMDComparison predicate,
typename Number>
6159 result.
data = compare_and_apply_mask<predicate, Number>(left.
data,
6169# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && defined(__AVX512F__)
6171template <SIMDComparison predicate>
6178 const __mmask16
mask =
6179 _mm512_cmp_ps_mask(left.
data, right.
data,
static_cast<int>(predicate));
6181 result.
data = _mm512_mask_mov_ps(false_values.
data, mask, true_values.
data);
6187template <SIMDComparison predicate>
6194 const __mmask16
mask =
6195 _mm512_cmp_pd_mask(left.
data, right.
data,
static_cast<int>(predicate));
6197 result.
data = _mm512_mask_mov_pd(false_values.
data, mask, true_values.
data);
6203# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
6205template <SIMDComparison predicate>
6213 _mm256_cmp_ps(left.
data, right.
data,
static_cast<int>(predicate));
6216 result.
data = _mm256_blendv_ps(false_values.
data, true_values.
data, mask);
6221template <SIMDComparison predicate>
6229 _mm256_cmp_pd(left.
data, right.
data,
static_cast<int>(predicate));
6232 result.
data = _mm256_blendv_pd(false_values.
data, true_values.
data, mask);
6238# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__SSE2__)
6240template <SIMDComparison predicate>
6271 result.
data = _mm_or_ps(_mm_and_ps(mask, true_values.
data),
6272 _mm_andnot_ps(mask, false_values.
data));
6278template <SIMDComparison predicate>
6309 result.
data = _mm_or_pd(_mm_and_pd(mask, true_values.
data),
6310 _mm_andnot_pd(mask, false_values.
data));
6317# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__ARM_NEON)
6319template <SIMDComparison predicate>
6350 result.
data = vreinterpretq_f32_u32(vorrq_u32(
6351 vandq_u32(mask, vreinterpretq_u32_f32(true_values.
data)),
6352 vandq_u32(vmvnq_u32(mask), vreinterpretq_u32_f32(false_values.
data))));
6358template <SIMDComparison predicate>
6372 mask = vreinterpretq_u64_u32(
6373 vmvnq_u32(vreinterpretq_u32_u64(vceqq_f64(left.
data, right.
data))));
6390 result.
data = vreinterpretq_f64_u64(vorrq_u64(
6391 vandq_u64(mask, vreinterpretq_u64_f64(true_values.
data)),
6392 vandq_u64(vreinterpretq_u64_u32(vmvnq_u32(vreinterpretq_u32_u64(mask))),
6393 vreinterpretq_u64_f64(false_values.
data))));
6404 template <
typename T>
6415 static constexpr std::size_t
6432 static constexpr std::size_t
6486 template <
typename T, std::
size_t w
idth_>
6497 static constexpr std::size_t
6515 static constexpr std::size_t
6588 template <
typename Number, std::
size_t w
idth>
6589 inline ::VectorizedArray<Number, width>
6590 sin(const ::VectorizedArray<Number, width> &x)
6593 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6590 sin(const ::VectorizedArray<Number, width> &x) {
…}
6608 template <
typename Number, std::
size_t w
idth>
6609 inline ::VectorizedArray<Number, width>
6610 cos(const ::VectorizedArray<Number, width> &x)
6613 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6610 cos(const ::VectorizedArray<Number, width> &x) {
…}
6628 template <
typename Number, std::
size_t w
idth>
6629 inline ::VectorizedArray<Number, width>
6630 tan(const ::VectorizedArray<Number, width> &x)
6633 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6630 tan(const ::VectorizedArray<Number, width> &x) {
…}
6648 template <
typename Number, std::
size_t w
idth>
6649 inline ::VectorizedArray<Number, width>
6650 acos(const ::VectorizedArray<Number, width> &x)
6653 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6650 acos(const ::VectorizedArray<Number, width> &x) {
…}
6668 template <
typename Number, std::
size_t w
idth>
6669 inline ::VectorizedArray<Number, width>
6670 asin(const ::VectorizedArray<Number, width> &x)
6673 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6670 asin(const ::VectorizedArray<Number, width> &x) {
…}
6688 template <
typename Number, std::
size_t w
idth>
6689 inline ::VectorizedArray<Number, width>
6690 atan(const ::VectorizedArray<Number, width> &x)
6693 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6690 atan(const ::VectorizedArray<Number, width> &x) {
…}
6708 template <
typename Number, std::
size_t w
idth>
6709 inline ::VectorizedArray<Number, width>
6710 cosh(const ::VectorizedArray<Number, width> &x)
6713 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6710 cosh(const ::VectorizedArray<Number, width> &x) {
…}
6728 template <
typename Number, std::
size_t w
idth>
6729 inline ::VectorizedArray<Number, width>
6730 sinh(const ::VectorizedArray<Number, width> &x)
6733 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6730 sinh(const ::VectorizedArray<Number, width> &x) {
…}
6748 template <
typename Number, std::
size_t w
idth>
6749 inline ::VectorizedArray<Number, width>
6750 tanh(const ::VectorizedArray<Number, width> &x)
6753 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6750 tanh(const ::VectorizedArray<Number, width> &x) {
…}
6768 template <
typename Number, std::
size_t w
idth>
6769 inline ::VectorizedArray<Number, width>
6770 acosh(const ::VectorizedArray<Number, width> &x)
6773 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6770 acosh(const ::VectorizedArray<Number, width> &x) {
…}
6788 template <
typename Number, std::
size_t w
idth>
6789 inline ::VectorizedArray<Number, width>
6790 asinh(const ::VectorizedArray<Number, width> &x)
6793 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6790 asinh(const ::VectorizedArray<Number, width> &x) {
…}
6808 template <
typename Number, std::
size_t w
idth>
6809 inline ::VectorizedArray<Number, width>
6810 atanh(const ::VectorizedArray<Number, width> &x)
6813 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6810 atanh(const ::VectorizedArray<Number, width> &x) {
…}
6828 template <
typename Number, std::
size_t w
idth>
6829 inline ::VectorizedArray<Number, width>
6830 exp(const ::VectorizedArray<Number, width> &x)
6833 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6830 exp(const ::VectorizedArray<Number, width> &x) {
…}
6848 template <
typename Number, std::
size_t w
idth>
6849 inline ::VectorizedArray<Number, width>
6850 log(const ::VectorizedArray<Number, width> &x)
6853 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6850 log(const ::VectorizedArray<Number, width> &x) {
…}
6868 template <
typename Number, std::
size_t w
idth>
6869 inline ::VectorizedArray<Number, width>
6870 sqrt(const ::VectorizedArray<Number, width> &x)
6872 return x.get_sqrt();
6870 sqrt(const ::VectorizedArray<Number, width> &x) {
…}
6884 template <
typename Number, std::
size_t w
idth>
6885 inline ::VectorizedArray<Number, width>
6886 pow(const ::VectorizedArray<Number, width> &x,
const Number p)
6889 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6886 pow(const ::VectorizedArray<Number, width> &x,
const Number p) {
…}
6905 template <
typename Number, std::
size_t w
idth>
6906 inline ::VectorizedArray<Number, width>
6907 pow(const ::VectorizedArray<Number, width> &x,
6908 const ::VectorizedArray<Number, width> &p)
6911 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6907 pow(const ::VectorizedArray<Number, width> &x, {
…}
6926 template <
typename Number, std::
size_t w
idth>
6927 inline ::VectorizedArray<Number, width>
6928 abs(const ::VectorizedArray<Number, width> &x)
6928 abs(const ::VectorizedArray<Number, width> &x) {
…}
6942 template <
typename Number, std::
size_t w
idth>
6943 inline ::VectorizedArray<Number, width>
6944 max(const ::VectorizedArray<Number, width> &x,
6945 const ::VectorizedArray<Number, width> &y)
6947 return x.get_max(y);
6944 max(const ::VectorizedArray<Number, width> &x, {
…}
6959 template <
typename Number, std::
size_t w
idth>
6960 inline ::VectorizedArray<Number, width>
6961 min(const ::VectorizedArray<Number, width> &x,
6962 const ::VectorizedArray<Number, width> &y)
6964 return x.get_min(y);
6961 min(const ::VectorizedArray<Number, width> &x, {
…}
6975#ifdef DEAL_II_HAVE_CXX20
constexpr VectorizedArrayBase()=default
constexpr VectorizedArrayIterator< const VectorizedArrayType > begin() const
constexpr VectorizedArrayBase(const std::initializer_list< U > &list)
constexpr VectorizedArrayIterator< const VectorizedArrayType > end() const
constexpr VectorizedArrayIterator< VectorizedArrayType > begin()
constexpr VectorizedArrayIterator< VectorizedArrayType > end()
static constexpr std::size_t size()
auto dot_product(const VectorizedArrayType &v) const
constexpr VectorizedArrayIterator< T > & operator++()
constexpr VectorizedArrayIterator(T &data, const std::size_t lane)
constexpr VectorizedArrayIterator< T > operator+(const std::size_t &offset) const
constexpr VectorizedArrayIterator< T > & operator+=(const std::size_t offset)
constexpr bool operator==(const VectorizedArrayIterator< T > &other) const
constexpr std::enable_if_t<!std::is_same_v< U, const U >, typename T::value_type > & operator*()
constexpr const T::value_type & operator*() const
constexpr VectorizedArrayIterator< T > & operator--()
constexpr std::ptrdiff_t operator-(const VectorizedArrayIterator< T > &other) const
constexpr bool operator!=(const VectorizedArrayIterator< T > &other) const
VectorizedArray< Number, width > operator-(const VectorizedArray< Number, width > &u)
VectorizedArray & operator=(const Number scalar) &
VectorizedArray< float, width > operator+(const VectorizedArray< float, width > &v, const double u)
VectorizedArray & operator/=(const VectorizedArray &vec)
void gather(const Number *base_ptr, const unsigned int *offsets)
void vectorized_load_and_transpose(const unsigned int n_entries, const Number *in, const unsigned int *offsets, VectorizedArray< Number, width > *out)
VectorizedArray< Number, width > operator+(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArrayType make_vectorized_array(const typename VectorizedArrayType::value_type &u)
VectorizedArray< Number, width > operator/(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArray< Number, width > asinh(const ::VectorizedArray< Number, width > &x)
VectorizedArray get_abs() const
VectorizedArray< float, width > operator/(const VectorizedArray< float, width > &v, const double u)
VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &x, const ::VectorizedArray< Number, width > &y)
VectorizedArray< Number, width > log(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > operator*(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArray< Number, width > exp(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > operator-(const VectorizedArray< Number, width > &v, const Number &u)
Number & operator[](const unsigned int comp)
VectorizedArray< float, width > operator-(const double u, const VectorizedArray< float, width > &v)
VectorizedArray< Number, width > operator+(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > operator+(const VectorizedArray< Number, width > &u)
VectorizedArray()=default
bool operator==(const VectorizedArray< Number, width > &lhs, const VectorizedArray< Number, width > &rhs)
VectorizedArray< Number, width > tan(const ::VectorizedArray< Number, width > &x)
VectorizedArray(const Number scalar)
VectorizedArray< Number, width > operator-(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray< float, width > operator*(const VectorizedArray< float, width > &v, const double u)
VectorizedArray & operator*=(const VectorizedArray &vec)
VectorizedArray get_max(const VectorizedArray &other) const
VectorizedArray< Number, width > atan(const ::VectorizedArray< Number, width > &x)
const Number & operator[](const unsigned int comp) const
VectorizedArray< Number, width > tanh(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &x, const ::VectorizedArray< Number, width > &y)
VectorizedArray get_min(const VectorizedArray &other) const
VectorizedArray< Number, width > cosh(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &x, const Number p)
VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &x, const ::VectorizedArray< Number, width > &p)
VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &x)
void store(OtherNumber *ptr) const
VectorizedArray< float, width > operator-(const VectorizedArray< float, width > &v, const double u)
void load(const OtherNumber *ptr)
void scatter(const unsigned int *offsets, Number *base_ptr) const
VectorizedArray< Number, width > atanh(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > operator-(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray & operator=(const Number scalar) &&=delete
VectorizedArray< Number, width > operator*(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray & operator-=(const VectorizedArray &vec)
VectorizedArray< Number, width > acos(const ::VectorizedArray< Number, width > &x)
VectorizedArray< float, width > operator+(const double u, const VectorizedArray< float, width > &v)
VectorizedArray< Number, width > operator*(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > sinh(const ::VectorizedArray< Number, width > &x)
VectorizedArray get_sqrt() const
VectorizedArray< Number, width > asin(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > operator/(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray & operator+=(const VectorizedArray &vec)
VectorizedArray< Number, width > make_vectorized_array(const Number &u)
VectorizedArray< Number, width > operator/(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > operator+(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > acosh(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > cos(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &x)
void streaming_store(Number *ptr) const
VectorizedArray(const std::initializer_list< U > &list)
void vectorized_transpose_and_store(const bool add_into, const unsigned int n_entries, const VectorizedArray< Number, width > *in, const unsigned int *offsets, Number *out)
VectorizedArray< float, width > operator/(const double u, const VectorizedArray< float, width > &v)
VectorizedArray< float, width > operator*(const double u, const VectorizedArray< float, width > &v)
static constexpr bool is_implemented
#define DEAL_II_ALWAYS_INLINE
#define DEAL_II_OPENMP_SIMD_PRAGMA
#define DEAL_II_NAMESPACE_OPEN
#define DEAL_II_NAMESPACE_CLOSE
#define Assert(cond, exc)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcMessage(std::string arg1)
std::vector< index_type > data
constexpr types::blas_int zero
inline ::VectorizedArray< Number, width > acosh(const ::VectorizedArray< Number, width > &x)
::VectorizedArray< Number, width > log(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > exp(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > tan(const ::VectorizedArray< Number, width > &)
inline ::VectorizedArray< Number, width > asinh(const ::VectorizedArray< Number, width > &x)
inline ::VectorizedArray< Number, width > tanh(const ::VectorizedArray< Number, width > &x)
inline ::VectorizedArray< Number, width > sinh(const ::VectorizedArray< Number, width > &x)
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > cos(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &)
inline ::VectorizedArray< Number, width > cosh(const ::VectorizedArray< Number, width > &x)
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &, const Number p)
inline ::VectorizedArray< Number, width > atanh(const ::VectorizedArray< Number, width > &x)
inline ::VectorizedArray< Number, width > atan(const ::VectorizedArray< Number, width > &x)
::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)
inline ::VectorizedArray< Number, width > acos(const ::VectorizedArray< Number, width > &x)
inline ::VectorizedArray< Number, width > asin(const ::VectorizedArray< Number, width > &x)
static value_type & get(vectorized_value_type &values, unsigned int c)
static constexpr std::size_t stride()
static vectorized_value_type & get_from_vectorized(vectorized_value_type &values, unsigned int c)
static constexpr std::size_t width()
static const value_type & get(const vectorized_value_type &values, unsigned int c)
static const vectorized_value_type & get_from_vectorized(const vectorized_value_type &values, unsigned int c)
static constexpr std::size_t width()
static constexpr std::size_t stride()
static const value_type & get(const value_type &value, unsigned int c)
VectorizedArray< T > vectorized_value_type
static const value_type & get_from_vectorized(const vectorized_value_type &values, unsigned int c)
static value_type & get_from_vectorized(vectorized_value_type &values, unsigned int c)
static value_type & get(value_type &value, unsigned int c)
typename T::value_type value_type
std::ptrdiff_t difference_type
random_access_iterator_tag iterator_category
void gather(VectorizedArray< Number, width > &out, const std::array< Number *, width > &ptrs, const unsigned int offset)
void vectorized_load_and_transpose(const unsigned int n_entries, const Number *in, const unsigned int *offsets, VectorizedArray< Number, width > *out)
std::ostream & operator<<(std::ostream &out, const VectorizedArray< Number, width > &p)
Number compare_and_apply_mask(const Number &left, const Number &right, const Number &true_value, const Number &false_value)
void vectorized_transpose_and_store(const bool add_into, const unsigned int n_entries, const VectorizedArray< Number, width > *in, const unsigned int *offsets, Number *out)