16#ifndef dealii_vectorization_h
17#define dealii_vectorization_h
44#if DEAL_II_VECTORIZATION_WIDTH_IN_BITS > 0
53# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && !defined(__AVX__)
55 "Mismatch in vectorization capabilities: AVX was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native."
57# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && !defined(__AVX512F__)
59 "Mismatch in vectorization capabilities: AVX-512F was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native."
64# elif defined(__ALTIVEC__)
72# elif defined(__ARM_NEON)
74# elif defined(__x86_64__)
75# include <x86intrin.h>
87template <
typename Number, std::
size_t w
idth>
121 "You are trying to compare iterators into different arrays."));
133 "You are trying to compare iterators into different arrays."));
141 constexpr const typename T::value_type &
153 template <
typename U = T>
154 constexpr std::enable_if_t<!std::is_same_v<U, const U>,
155 typename T::value_type> &
198 "You can't decrement an iterator that is already at the beginning of the range."));
216 constexpr std::ptrdiff_t
219 return static_cast<std::ptrdiff_t
>(
lane) -
220 static_cast<ptrdiff_t
>(other.
lane);
249template <
typename VectorizedArrayType, std::
size_t w
idth>
265 template <
typename U>
268 const unsigned int n_initializers = list.size();
270 ExcMessage(
"The initializer list must have at most "
271 "as many elements as the vector length."));
274 std::copy_n(list.begin(), n_initializers, this->begin());
277 if (n_initializers <
size())
278 std::fill(this->
begin() + n_initializers, this->
end(), 0.0);
284 static constexpr std::size_t
297 static_cast<VectorizedArrayType &
>(*
this), 0);
308 static_cast<const VectorizedArrayType &
>(*
this), 0);
318 static_cast<VectorizedArrayType &
>(*
this), width);
329 static_cast<const VectorizedArrayType &
>(*
this), width);
346 VectorizedArrayType p =
static_cast<const VectorizedArrayType &
>(*this);
442template <
typename Number, std::
size_t w
idth>
473 static_assert(width == 1,
474 "You specified an illegal width that is not supported.");
482 template <
typename U>
486 static_assert(width == 1,
487 "You specified an illegal width that is not supported.");
585 template <
typename OtherNumber>
598 template <
typename OtherNumber>
672 gather(
const Number *base_ptr,
const unsigned int *offsets)
674 data = base_ptr[offsets[0]];
691 scatter(
const unsigned int *offsets, Number *base_ptr)
const
693 base_ptr[offsets[0]] =
data;
768 template <
typename Number2, std::
size_t w
idth2>
771 template <
typename Number2, std::
size_t w
idth2>
774 template <
typename Number2, std::
size_t w
idth2>
778 template <
typename Number2, std::
size_t w
idth2>
797template <
typename Number,
815template <
typename VectorizedArrayType>
820 std::is_same_v<VectorizedArrayType,
822 VectorizedArrayType::size()>>,
823 "VectorizedArrayType is not a VectorizedArray.");
825 VectorizedArrayType result = u;
842template <
typename Number, std::
size_t w
idth>
845 const std::array<Number *, width> &ptrs,
846 const unsigned int offset)
848 for (
unsigned int v = 0; v < width; ++v)
849 out.
data[v] = ptrs[v][offset];
879template <
typename Number, std::
size_t w
idth>
883 const unsigned int *offsets,
886 for (
unsigned int i = 0; i < n_entries; ++i)
887 for (
unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
888 out[i][v] = in[offsets[v] + i];
903template <
typename Number, std::
size_t w
idth>
906 const std::array<Number *, width> &in,
909 for (
unsigned int i = 0; i < n_entries; ++i)
910 for (
unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
911 out[i][v] = in[v][i];
954template <
typename Number, std::
size_t w
idth>
957 const unsigned int n_entries,
959 const unsigned int *offsets,
963 for (
unsigned int i = 0; i < n_entries; ++i)
964 for (
unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
965 out[offsets[v] + i] += in[i][v];
967 for (
unsigned int i = 0; i < n_entries; ++i)
968 for (
unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
969 out[offsets[v] + i] = in[i][v];
984template <
typename Number, std::
size_t w
idth>
987 const unsigned int n_entries,
989 std::array<Number *, width> &out)
992 for (
unsigned int i = 0; i < n_entries; ++i)
993 for (
unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
994 out[v][i] += in[i][v];
996 for (
unsigned int i = 0; i < n_entries; ++i)
997 for (
unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
998 out[v][i] = in[i][v];
1006# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__ARM_NEON)
1044 template <
typename U>
1055 data = vdupq_n_f64(x);
1065 operator=(
const double scalar) && =
delete;
1073 return *(
reinterpret_cast<double *
>(&
data) + comp);
1082 return *(
reinterpret_cast<const double *
>(&
data) + comp);
1131 load(
const double *ptr)
1133 data = vld1q_f64(ptr);
1138 load(
const float *ptr)
1141 for (
unsigned int i = 0; i < 2; ++i)
1152 store(
double *ptr)
const
1154 vst1q_f64(ptr,
data);
1159 store(
float *ptr)
const
1162 for (
unsigned int i = 0; i < 2; ++i)
1174 Assert(
reinterpret_cast<std::size_t
>(ptr) % 16 == 0,
1176 vst1q_f64(ptr,
data);
1192 gather(
const double *base_ptr,
const unsigned int *offsets)
1194 for (
unsigned int i = 0; i < 2; ++i)
1195 *(
reinterpret_cast<double *
>(&
data) + i) = base_ptr[offsets[i]];
1211 scatter(
const unsigned int *offsets,
double *base_ptr)
const
1213 for (
unsigned int i = 0; i < 2; ++i)
1214 base_ptr[offsets[i]] = *(
reinterpret_cast<const double *
>(&
data) + i);
1224 return vaddvq_f64(
data);
1232 mutable float64x2_t
data;
1284 template <
typename Number2, std::
size_t w
idth2>
1287 template <
typename Number2, std::
size_t w
idth2>
1290 template <
typename Number2, std::
size_t w
idth2>
1294 template <
typename Number2, std::
size_t w
idth2>
1336 template <
typename U>
1347 data = vdupq_n_f32(x);
1357 operator=(
const float scalar) && =
delete;
1365 return *(
reinterpret_cast<float *
>(&
data) + comp);
1374 return *(
reinterpret_cast<const float *
>(&
data) + comp);
1423 load(
const float *ptr)
1425 data = vld1q_f32(ptr);
1435 store(
float *ptr)
const
1437 vst1q_f32(ptr,
data);
1448 Assert(
reinterpret_cast<std::size_t
>(ptr) % 16 == 0,
1450 vst1q_f32(ptr,
data);
1466 gather(
const float *base_ptr,
const unsigned int *offsets)
1468 for (
unsigned int i = 0; i < 4; ++i)
1469 *(
reinterpret_cast<float *
>(&
data) + i) = base_ptr[offsets[i]];
1485 scatter(
const unsigned int *offsets,
float *base_ptr)
const
1487 for (
unsigned int i = 0; i < 4; ++i)
1488 base_ptr[offsets[i]] = *(
reinterpret_cast<const float *
>(&
data) + i);
1498 return vaddvq_f32(
data);
1506 mutable float32x4_t
data;
1558 template <
typename Number2, std::
size_t w
idth2>
1561 template <
typename Number2, std::
size_t w
idth2>
1564 template <
typename Number2, std::
size_t w
idth2>
1568 template <
typename Number2, std::
size_t w
idth2>
1577# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__SSE2__)
1615 template <
typename U>
1627 data = _mm_set1_pd(x);
1637 operator=(
const double scalar) && =
delete;
1647 return *(
reinterpret_cast<double *
>(&
data) + comp);
1658 return *(
reinterpret_cast<const double *
>(&
data) + comp);
1668# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1683# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1698# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1713# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1728 load(
const double *ptr)
1730 data = _mm_loadu_pd(ptr);
1735 load(
const float *ptr)
1738 for (
unsigned int i = 0; i < 2; ++i)
1750 store(
double *ptr)
const
1752 _mm_storeu_pd(ptr,
data);
1757 store(
float *ptr)
const
1760 for (
unsigned int i = 0; i < 2; ++i)
1772 Assert(
reinterpret_cast<std::size_t
>(ptr) % 16 == 0,
1774 _mm_stream_pd(ptr,
data);
1791 gather(
const double *base_ptr,
const unsigned int *offsets)
1793 for (
unsigned int i = 0; i < 2; ++i)
1794 *(
reinterpret_cast<double *
>(&
data) + i) = base_ptr[offsets[i]];
1811 scatter(
const unsigned int *offsets,
double *base_ptr)
const
1813 for (
unsigned int i = 0; i < 2; ++i)
1814 base_ptr[offsets[i]] = *(
reinterpret_cast<const double *
>(&
data) + i);
1824 __m128d t1 = _mm_unpackhi_pd(
data,
data);
1825 __m128d t2 = _mm_add_pd(
data, t1);
1826 return _mm_cvtsd_f64(t2);
1862 __m128d
mask = _mm_set1_pd(-0.);
1864 res.
data = _mm_andnot_pd(mask,
data);
1895 template <
typename Number2, std::
size_t w
idth2>
1898 template <
typename Number2, std::
size_t w
idth2>
1901 template <
typename Number2, std::
size_t w
idth2>
1905 template <
typename Number2, std::
size_t w
idth2>
1920 const unsigned int *offsets,
1923 const unsigned int n_chunks = n_entries / 2;
1924 for (
unsigned int i = 0; i < n_chunks; ++i)
1926 __m128d u0 = _mm_loadu_pd(in + 2 * i + offsets[0]);
1927 __m128d u1 = _mm_loadu_pd(in + 2 * i + offsets[1]);
1928 out[2 * i + 0].
data = _mm_unpacklo_pd(u0, u1);
1929 out[2 * i + 1].
data = _mm_unpackhi_pd(u0, u1);
1933 for (
unsigned int i = 2 * n_chunks; i < n_entries; ++i)
1934 for (
unsigned int v = 0; v < 2; ++v)
1935 out[i][v] = in[offsets[v] + i];
1946 const std::array<double *, 2> &in,
1951 const unsigned int n_chunks = n_entries / 2;
1952 for (
unsigned int i = 0; i < n_chunks; ++i)
1954 __m128d u0 = _mm_loadu_pd(in[0] + 2 * i);
1955 __m128d u1 = _mm_loadu_pd(in[1] + 2 * i);
1956 out[2 * i + 0].
data = _mm_unpacklo_pd(u0, u1);
1957 out[2 * i + 1].
data = _mm_unpackhi_pd(u0, u1);
1960 for (
unsigned int i = 2 * n_chunks; i < n_entries; ++i)
1961 for (
unsigned int v = 0; v < 2; ++v)
1962 out[i][v] = in[v][i];
1973 const unsigned int n_entries,
1975 const unsigned int *offsets,
1978 const unsigned int n_chunks = n_entries / 2;
1981 for (
unsigned int i = 0; i < n_chunks; ++i)
1983 __m128d u0 = in[2 * i + 0].
data;
1984 __m128d u1 = in[2 * i + 1].
data;
1985 __m128d res0 = _mm_unpacklo_pd(u0, u1);
1986 __m128d res1 = _mm_unpackhi_pd(u0, u1);
1987 _mm_storeu_pd(out + 2 * i + offsets[0],
1988 _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[0]),
1990 _mm_storeu_pd(out + 2 * i + offsets[1],
1991 _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[1]),
1995 for (
unsigned int i = 2 * n_chunks; i < n_entries; ++i)
1996 for (
unsigned int v = 0; v < 2; ++v)
1997 out[offsets[v] + i] += in[i][v];
2001 for (
unsigned int i = 0; i < n_chunks; ++i)
2003 __m128d u0 = in[2 * i + 0].
data;
2004 __m128d u1 = in[2 * i + 1].
data;
2005 __m128d res0 = _mm_unpacklo_pd(u0, u1);
2006 __m128d res1 = _mm_unpackhi_pd(u0, u1);
2007 _mm_storeu_pd(out + 2 * i + offsets[0], res0);
2008 _mm_storeu_pd(out + 2 * i + offsets[1], res1);
2011 for (
unsigned int i = 2 * n_chunks; i < n_entries; ++i)
2012 for (
unsigned int v = 0; v < 2; ++v)
2013 out[offsets[v] + i] = in[i][v];
2025 const unsigned int n_entries,
2027 std::array<double *, 2> &out)
2031 const unsigned int n_chunks = n_entries / 2;
2034 for (
unsigned int i = 0; i < n_chunks; ++i)
2036 __m128d u0 = in[2 * i + 0].
data;
2037 __m128d u1 = in[2 * i + 1].
data;
2038 __m128d res0 = _mm_unpacklo_pd(u0, u1);
2039 __m128d res1 = _mm_unpackhi_pd(u0, u1);
2040 _mm_storeu_pd(out[0] + 2 * i,
2041 _mm_add_pd(_mm_loadu_pd(out[0] + 2 * i), res0));
2042 _mm_storeu_pd(out[1] + 2 * i,
2043 _mm_add_pd(_mm_loadu_pd(out[1] + 2 * i), res1));
2046 for (
unsigned int i = 2 * n_chunks; i < n_entries; ++i)
2047 for (
unsigned int v = 0; v < 2; ++v)
2048 out[v][i] += in[i][v];
2052 for (
unsigned int i = 0; i < n_chunks; ++i)
2054 __m128d u0 = in[2 * i + 0].
data;
2055 __m128d u1 = in[2 * i + 1].
data;
2056 __m128d res0 = _mm_unpacklo_pd(u0, u1);
2057 __m128d res1 = _mm_unpackhi_pd(u0, u1);
2058 _mm_storeu_pd(out[0] + 2 * i, res0);
2059 _mm_storeu_pd(out[1] + 2 * i, res1);
2062 for (
unsigned int i = 2 * n_chunks; i < n_entries; ++i)
2063 for (
unsigned int v = 0; v < 2; ++v)
2064 out[v][i] = in[i][v];
2106 template <
typename U>
2118 data = _mm_set1_ps(x);
2128 operator=(
const float scalar) && =
delete;
2138 return *(
reinterpret_cast<float *
>(&
data) + comp);
2149 return *(
reinterpret_cast<const float *
>(&
data) + comp);
2159# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2174# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2189# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2204# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2219 load(
const float *ptr)
2221 data = _mm_loadu_ps(ptr);
2232 store(
float *ptr)
const
2234 _mm_storeu_ps(ptr,
data);
2245 Assert(
reinterpret_cast<std::size_t
>(ptr) % 16 == 0,
2247 _mm_stream_ps(ptr,
data);
2264 gather(
const float *base_ptr,
const unsigned int *offsets)
2266 for (
unsigned int i = 0; i < 4; ++i)
2267 *(
reinterpret_cast<float *
>(&
data) + i) = base_ptr[offsets[i]];
2284 scatter(
const unsigned int *offsets,
float *base_ptr)
const
2286 for (
unsigned int i = 0; i < 4; ++i)
2287 base_ptr[offsets[i]] = *(
reinterpret_cast<const float *
>(&
data) + i);
2297 __m128 t1 = _mm_movehl_ps(
data,
data);
2298 __m128 t2 = _mm_add_ps(
data, t1);
2299 __m128 t3 = _mm_shuffle_ps(t2, t2, 1);
2300 __m128 t4 = _mm_add_ss(t2, t3);
2301 return _mm_cvtss_f32(t4);
2336 __m128
mask = _mm_set1_ps(-0.f);
2338 res.
data = _mm_andnot_ps(mask,
data);
2369 template <
typename Number2, std::
size_t w
idth2>
2372 template <
typename Number2, std::
size_t w
idth2>
2375 template <
typename Number2, std::
size_t w
idth2>
2379 template <
typename Number2, std::
size_t w
idth2>
2394 const unsigned int *offsets,
2397 const unsigned int n_chunks = n_entries / 4;
2398 for (
unsigned int i = 0; i < n_chunks; ++i)
2400 __m128 u0 = _mm_loadu_ps(in + 4 * i + offsets[0]);
2401 __m128 u1 = _mm_loadu_ps(in + 4 * i + offsets[1]);
2402 __m128 u2 = _mm_loadu_ps(in + 4 * i + offsets[2]);
2403 __m128 u3 = _mm_loadu_ps(in + 4 * i + offsets[3]);
2404 __m128
v0 = _mm_shuffle_ps(u0, u1, 0x44);
2405 __m128
v1 = _mm_shuffle_ps(u0, u1, 0xee);
2406 __m128 v2 = _mm_shuffle_ps(u2, u3, 0x44);
2407 __m128 v3 = _mm_shuffle_ps(u2, u3, 0xee);
2408 out[4 * i + 0].
data = _mm_shuffle_ps(
v0, v2, 0x88);
2409 out[4 * i + 1].
data = _mm_shuffle_ps(
v0, v2, 0xdd);
2410 out[4 * i + 2].
data = _mm_shuffle_ps(
v1, v3, 0x88);
2411 out[4 * i + 3].
data = _mm_shuffle_ps(
v1, v3, 0xdd);
2415 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2416 for (
unsigned int v = 0; v < 4; ++v)
2417 out[i][v] = in[offsets[v] + i];
2428 const std::array<float *, 4> &in,
2433 const unsigned int n_chunks = n_entries / 4;
2434 for (
unsigned int i = 0; i < n_chunks; ++i)
2436 __m128 u0 = _mm_loadu_ps(in[0] + 4 * i);
2437 __m128 u1 = _mm_loadu_ps(in[1] + 4 * i);
2438 __m128 u2 = _mm_loadu_ps(in[2] + 4 * i);
2439 __m128 u3 = _mm_loadu_ps(in[3] + 4 * i);
2440 __m128
v0 = _mm_shuffle_ps(u0, u1, 0x44);
2441 __m128
v1 = _mm_shuffle_ps(u0, u1, 0xee);
2442 __m128 v2 = _mm_shuffle_ps(u2, u3, 0x44);
2443 __m128 v3 = _mm_shuffle_ps(u2, u3, 0xee);
2444 out[4 * i + 0].
data = _mm_shuffle_ps(
v0, v2, 0x88);
2445 out[4 * i + 1].
data = _mm_shuffle_ps(
v0, v2, 0xdd);
2446 out[4 * i + 2].
data = _mm_shuffle_ps(
v1, v3, 0x88);
2447 out[4 * i + 3].
data = _mm_shuffle_ps(
v1, v3, 0xdd);
2450 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2451 for (
unsigned int v = 0; v < 4; ++v)
2452 out[i][v] = in[v][i];
2463 const unsigned int n_entries,
2465 const unsigned int *offsets,
2468 const unsigned int n_chunks = n_entries / 4;
2469 for (
unsigned int i = 0; i < n_chunks; ++i)
2471 __m128 u0 = in[4 * i + 0].
data;
2472 __m128 u1 = in[4 * i + 1].
data;
2473 __m128 u2 = in[4 * i + 2].
data;
2474 __m128 u3 = in[4 * i + 3].
data;
2475 __m128 t0 = _mm_shuffle_ps(u0, u1, 0x44);
2476 __m128 t1 = _mm_shuffle_ps(u0, u1, 0xee);
2477 __m128 t2 = _mm_shuffle_ps(u2, u3, 0x44);
2478 __m128 t3 = _mm_shuffle_ps(u2, u3, 0xee);
2479 u0 = _mm_shuffle_ps(t0, t2, 0x88);
2480 u1 = _mm_shuffle_ps(t0, t2, 0xdd);
2481 u2 = _mm_shuffle_ps(t1, t3, 0x88);
2482 u3 = _mm_shuffle_ps(t1, t3, 0xdd);
2489 u0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), u0);
2490 _mm_storeu_ps(out + 4 * i + offsets[0], u0);
2491 u1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), u1);
2492 _mm_storeu_ps(out + 4 * i + offsets[1], u1);
2493 u2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), u2);
2494 _mm_storeu_ps(out + 4 * i + offsets[2], u2);
2495 u3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), u3);
2496 _mm_storeu_ps(out + 4 * i + offsets[3], u3);
2500 _mm_storeu_ps(out + 4 * i + offsets[0], u0);
2501 _mm_storeu_ps(out + 4 * i + offsets[1], u1);
2502 _mm_storeu_ps(out + 4 * i + offsets[2], u2);
2503 _mm_storeu_ps(out + 4 * i + offsets[3], u3);
2509 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2510 for (
unsigned int v = 0; v < 4; ++v)
2511 out[offsets[v] + i] += in[i][v];
2513 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2514 for (
unsigned int v = 0; v < 4; ++v)
2515 out[offsets[v] + i] = in[i][v];
2526 const unsigned int n_entries,
2528 std::array<float *, 4> &out)
2532 const unsigned int n_chunks = n_entries / 4;
2533 for (
unsigned int i = 0; i < n_chunks; ++i)
2535 __m128 u0 = in[4 * i + 0].
data;
2536 __m128 u1 = in[4 * i + 1].
data;
2537 __m128 u2 = in[4 * i + 2].
data;
2538 __m128 u3 = in[4 * i + 3].
data;
2539 __m128 t0 = _mm_shuffle_ps(u0, u1, 0x44);
2540 __m128 t1 = _mm_shuffle_ps(u0, u1, 0xee);
2541 __m128 t2 = _mm_shuffle_ps(u2, u3, 0x44);
2542 __m128 t3 = _mm_shuffle_ps(u2, u3, 0xee);
2543 u0 = _mm_shuffle_ps(t0, t2, 0x88);
2544 u1 = _mm_shuffle_ps(t0, t2, 0xdd);
2545 u2 = _mm_shuffle_ps(t1, t3, 0x88);
2546 u3 = _mm_shuffle_ps(t1, t3, 0xdd);
2550 u0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), u0);
2551 _mm_storeu_ps(out[0] + 4 * i, u0);
2552 u1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), u1);
2553 _mm_storeu_ps(out[1] + 4 * i, u1);
2554 u2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), u2);
2555 _mm_storeu_ps(out[2] + 4 * i, u2);
2556 u3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), u3);
2557 _mm_storeu_ps(out[3] + 4 * i, u3);
2561 _mm_storeu_ps(out[0] + 4 * i, u0);
2562 _mm_storeu_ps(out[1] + 4 * i, u1);
2563 _mm_storeu_ps(out[2] + 4 * i, u2);
2564 _mm_storeu_ps(out[3] + 4 * i, u3);
2569 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2570 for (
unsigned int v = 0; v < 4; ++v)
2571 out[v][i] += in[i][v];
2573 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2574 for (
unsigned int v = 0; v < 4; ++v)
2575 out[v][i] = in[i][v];
2582# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
2620 template <
typename U>
2632 data = _mm256_set1_pd(x);
2642 operator=(
const double scalar) && =
delete;
2652 return *(
reinterpret_cast<double *
>(&
data) + comp);
2663 return *(
reinterpret_cast<const double *
>(&
data) + comp);
2678# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2693# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2707# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2722# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2737 load(
const double *ptr)
2739 data = _mm256_loadu_pd(ptr);
2744 load(
const float *ptr)
2746 data = _mm256_cvtps_pd(_mm_loadu_ps(ptr));
2757 store(
double *ptr)
const
2759 _mm256_storeu_pd(ptr,
data);
2764 store(
float *ptr)
const
2766 _mm_storeu_ps(ptr, _mm256_cvtpd_ps(
data));
2777 Assert(
reinterpret_cast<std::size_t
>(ptr) % 32 == 0,
2779 _mm256_stream_pd(ptr,
data);
2796 gather(
const double *base_ptr,
const unsigned int *offsets)
2798# if defined(__AVX2__) && defined(DEAL_II_USE_VECTORIZATION_GATHER)
2802 const __m128 index_val =
2803 _mm_loadu_ps(
reinterpret_cast<const float *
>(offsets));
2804 const __m128i
index = *
reinterpret_cast<const __m128i *
>(&index_val);
2809 __m256d zero = _mm256_setzero_pd();
2810 __m256d
mask = _mm256_cmp_pd(zero, zero, _CMP_EQ_OQ);
2812 data = _mm256_mask_i32gather_pd(zero, base_ptr, index, mask, 8);
2814 for (
unsigned int i = 0; i < 4; ++i)
2815 *(
reinterpret_cast<double *
>(&
data) + i) = base_ptr[offsets[i]];
2833 scatter(
const unsigned int *offsets,
double *base_ptr)
const
2836 for (
unsigned int i = 0; i < 4; ++i)
2837 base_ptr[offsets[i]] = *(
reinterpret_cast<const double *
>(&
data) + i);
2848 t1.
data = _mm_add_pd(this->get_lower(), this->get_upper());
2867 return _mm256_castpd256_pd128(
data);
2877 return _mm256_extractf128_pd(
data, 1);
2904 __m256d
mask = _mm256_set1_pd(-0.);
2906 res.
data = _mm256_andnot_pd(mask,
data);
2937 template <
typename Number2, std::
size_t w
idth2>
2940 template <
typename Number2, std::
size_t w
idth2>
2943 template <
typename Number2, std::
size_t w
idth2>
2947 template <
typename Number2, std::
size_t w
idth2>
2962 const unsigned int *offsets,
2965 const unsigned int n_chunks = n_entries / 4;
2966 const double *in0 = in + offsets[0];
2967 const double *in1 = in + offsets[1];
2968 const double *in2 = in + offsets[2];
2969 const double *in3 = in + offsets[3];
2971 for (
unsigned int i = 0; i < n_chunks; ++i)
2973 __m256d u0 = _mm256_loadu_pd(in0 + 4 * i);
2974 __m256d u1 = _mm256_loadu_pd(in1 + 4 * i);
2975 __m256d u2 = _mm256_loadu_pd(in2 + 4 * i);
2976 __m256d u3 = _mm256_loadu_pd(in3 + 4 * i);
2977 __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2978 __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2979 __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2980 __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2981 out[4 * i + 0].
data = _mm256_unpacklo_pd(t0, t1);
2982 out[4 * i + 1].
data = _mm256_unpackhi_pd(t0, t1);
2983 out[4 * i + 2].
data = _mm256_unpacklo_pd(t2, t3);
2984 out[4 * i + 3].
data = _mm256_unpackhi_pd(t2, t3);
2988 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2989 out[i].
gather(in + i, offsets);
3000 const std::array<double *, 4> &in,
3005 const unsigned int n_chunks = n_entries / 4;
3006 const double *in0 = in[0];
3007 const double *in1 = in[1];
3008 const double *in2 = in[2];
3009 const double *in3 = in[3];
3011 for (
unsigned int i = 0; i < n_chunks; ++i)
3013 __m256d u0 = _mm256_loadu_pd(in0 + 4 * i);
3014 __m256d u1 = _mm256_loadu_pd(in1 + 4 * i);
3015 __m256d u2 = _mm256_loadu_pd(in2 + 4 * i);
3016 __m256d u3 = _mm256_loadu_pd(in3 + 4 * i);
3017 __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
3018 __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
3019 __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
3020 __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
3021 out[4 * i + 0].
data = _mm256_unpacklo_pd(t0, t1);
3022 out[4 * i + 1].
data = _mm256_unpackhi_pd(t0, t1);
3023 out[4 * i + 2].
data = _mm256_unpacklo_pd(t2, t3);
3024 out[4 * i + 3].
data = _mm256_unpackhi_pd(t2, t3);
3027 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3039 const unsigned int n_entries,
3041 const unsigned int *offsets,
3044 const unsigned int n_chunks = n_entries / 4;
3045 double *out0 = out + offsets[0];
3046 double *out1 = out + offsets[1];
3047 double *out2 = out + offsets[2];
3048 double *out3 = out + offsets[3];
3049 for (
unsigned int i = 0; i < n_chunks; ++i)
3051 __m256d u0 = in[4 * i + 0].
data;
3052 __m256d u1 = in[4 * i + 1].
data;
3053 __m256d u2 = in[4 * i + 2].
data;
3054 __m256d u3 = in[4 * i + 3].
data;
3055 __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
3056 __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
3057 __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
3058 __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
3059 __m256d res0 = _mm256_unpacklo_pd(t0, t1);
3060 __m256d res1 = _mm256_unpackhi_pd(t0, t1);
3061 __m256d res2 = _mm256_unpacklo_pd(t2, t3);
3062 __m256d res3 = _mm256_unpackhi_pd(t2, t3);
3069 res0 = _mm256_add_pd(_mm256_loadu_pd(out0 + 4 * i), res0);
3070 _mm256_storeu_pd(out0 + 4 * i, res0);
3071 res1 = _mm256_add_pd(_mm256_loadu_pd(out1 + 4 * i), res1);
3072 _mm256_storeu_pd(out1 + 4 * i, res1);
3073 res2 = _mm256_add_pd(_mm256_loadu_pd(out2 + 4 * i), res2);
3074 _mm256_storeu_pd(out2 + 4 * i, res2);
3075 res3 = _mm256_add_pd(_mm256_loadu_pd(out3 + 4 * i), res3);
3076 _mm256_storeu_pd(out3 + 4 * i, res3);
3080 _mm256_storeu_pd(out0 + 4 * i, res0);
3081 _mm256_storeu_pd(out1 + 4 * i, res1);
3082 _mm256_storeu_pd(out2 + 4 * i, res2);
3083 _mm256_storeu_pd(out3 + 4 * i, res3);
3089 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3090 for (
unsigned int v = 0; v < 4; ++v)
3091 out[offsets[v] + i] += in[i][v];
3093 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3094 for (
unsigned int v = 0; v < 4; ++v)
3095 out[offsets[v] + i] = in[i][v];
3106 const unsigned int n_entries,
3108 std::array<double *, 4> &out)
3112 const unsigned int n_chunks = n_entries / 4;
3113 double *out0 = out[0];
3114 double *out1 = out[1];
3115 double *out2 = out[2];
3116 double *out3 = out[3];
3117 for (
unsigned int i = 0; i < n_chunks; ++i)
3119 __m256d u0 = in[4 * i + 0].
data;
3120 __m256d u1 = in[4 * i + 1].
data;
3121 __m256d u2 = in[4 * i + 2].
data;
3122 __m256d u3 = in[4 * i + 3].
data;
3123 __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
3124 __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
3125 __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
3126 __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
3127 __m256d res0 = _mm256_unpacklo_pd(t0, t1);
3128 __m256d res1 = _mm256_unpackhi_pd(t0, t1);
3129 __m256d res2 = _mm256_unpacklo_pd(t2, t3);
3130 __m256d res3 = _mm256_unpackhi_pd(t2, t3);
3137 res0 = _mm256_add_pd(_mm256_loadu_pd(out0 + 4 * i), res0);
3138 _mm256_storeu_pd(out0 + 4 * i, res0);
3139 res1 = _mm256_add_pd(_mm256_loadu_pd(out1 + 4 * i), res1);
3140 _mm256_storeu_pd(out1 + 4 * i, res1);
3141 res2 = _mm256_add_pd(_mm256_loadu_pd(out2 + 4 * i), res2);
3142 _mm256_storeu_pd(out2 + 4 * i, res2);
3143 res3 = _mm256_add_pd(_mm256_loadu_pd(out3 + 4 * i), res3);
3144 _mm256_storeu_pd(out3 + 4 * i, res3);
3148 _mm256_storeu_pd(out0 + 4 * i, res0);
3149 _mm256_storeu_pd(out1 + 4 * i, res1);
3150 _mm256_storeu_pd(out2 + 4 * i, res2);
3151 _mm256_storeu_pd(out3 + 4 * i, res3);
3157 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3158 for (
unsigned int v = 0; v < 4; ++v)
3159 out[v][i] += in[i][v];
3161 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3162 for (
unsigned int v = 0; v < 4; ++v)
3163 out[v][i] = in[i][v];
3204 template <
typename U>
3216 data = _mm256_set1_ps(x);
3226 operator=(
const float scalar) && =
delete;
3236 return *(
reinterpret_cast<float *
>(&
data) + comp);
3247 return *(
reinterpret_cast<const float *
>(&
data) + comp);
3262# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3277# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3291# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3306# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3321 load(
const float *ptr)
3323 data = _mm256_loadu_ps(ptr);
3334 store(
float *ptr)
const
3336 _mm256_storeu_ps(ptr,
data);
3347 Assert(
reinterpret_cast<std::size_t
>(ptr) % 32 == 0,
3349 _mm256_stream_ps(ptr,
data);
3366 gather(
const float *base_ptr,
const unsigned int *offsets)
3368# if defined(__AVX2__) && defined(DEAL_II_USE_VECTORIZATION_GATHER)
3372 const __m256 index_val =
3373 _mm256_loadu_ps(
reinterpret_cast<const float *
>(offsets));
3374 const __m256i
index = *
reinterpret_cast<const __m256i *
>(&index_val);
3379 __m256 zero = _mm256_setzero_ps();
3380 __m256
mask = _mm256_cmp_ps(zero, zero, _CMP_EQ_OQ);
3382 data = _mm256_mask_i32gather_ps(zero, base_ptr, index, mask, 4);
3384 for (
unsigned int i = 0; i < 8; ++i)
3385 *(
reinterpret_cast<float *
>(&
data) + i) = base_ptr[offsets[i]];
3403 scatter(
const unsigned int *offsets,
float *base_ptr)
const
3406 for (
unsigned int i = 0; i < 8; ++i)
3407 base_ptr[offsets[i]] = *(
reinterpret_cast<const float *
>(&
data) + i);
3418 t1.
data = _mm_add_ps(this->get_lower(), this->get_upper());
3437 return _mm256_castps256_ps128(
data);
3447 return _mm256_extractf128_ps(
data, 1);
3474 __m256
mask = _mm256_set1_ps(-0.f);
3476 res.
data = _mm256_andnot_ps(mask,
data);
3507 template <
typename Number2, std::
size_t w
idth2>
3510 template <
typename Number2, std::
size_t w
idth2>
3513 template <
typename Number2, std::
size_t w
idth2>
3517 template <
typename Number2, std::
size_t w
idth2>
3532 const unsigned int *offsets,
3535 const unsigned int n_chunks = n_entries / 4;
3536 for (
unsigned int i = 0; i < n_chunks; ++i)
3540 __m256 t0, t1, t2, t3 = {};
3541 t0 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[0]), 0);
3542 t0 = _mm256_insertf128_ps(t0, _mm_loadu_ps(in + 4 * i + offsets[4]), 1);
3543 t1 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[1]), 0);
3544 t1 = _mm256_insertf128_ps(t1, _mm_loadu_ps(in + 4 * i + offsets[5]), 1);
3545 t2 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[2]), 0);
3546 t2 = _mm256_insertf128_ps(t2, _mm_loadu_ps(in + 4 * i + offsets[6]), 1);
3547 t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[3]), 0);
3548 t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[7]), 1);
3550 __m256
v0 = _mm256_shuffle_ps(t0, t1, 0x44);
3551 __m256
v1 = _mm256_shuffle_ps(t0, t1, 0xee);
3552 __m256 v2 = _mm256_shuffle_ps(t2, t3, 0x44);
3553 __m256 v3 = _mm256_shuffle_ps(t2, t3, 0xee);
3554 out[4 * i + 0].
data = _mm256_shuffle_ps(
v0, v2, 0x88);
3555 out[4 * i + 1].
data = _mm256_shuffle_ps(
v0, v2, 0xdd);
3556 out[4 * i + 2].
data = _mm256_shuffle_ps(
v1, v3, 0x88);
3557 out[4 * i + 3].
data = _mm256_shuffle_ps(
v1, v3, 0xdd);
3561 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3562 out[i].
gather(in + i, offsets);
3573 const std::array<float *, 8> &in,
3578 const unsigned int n_chunks = n_entries / 4;
3579 for (
unsigned int i = 0; i < n_chunks; ++i)
3581 __m256 t0, t1, t2, t3 = {};
3582 t0 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[0] + 4 * i), 0);
3583 t0 = _mm256_insertf128_ps(t0, _mm_loadu_ps(in[4] + 4 * i), 1);
3584 t1 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[1] + 4 * i), 0);
3585 t1 = _mm256_insertf128_ps(t1, _mm_loadu_ps(in[5] + 4 * i), 1);
3586 t2 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[2] + 4 * i), 0);
3587 t2 = _mm256_insertf128_ps(t2, _mm_loadu_ps(in[6] + 4 * i), 1);
3588 t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[3] + 4 * i), 0);
3589 t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[7] + 4 * i), 1);
3591 __m256
v0 = _mm256_shuffle_ps(t0, t1, 0x44);
3592 __m256
v1 = _mm256_shuffle_ps(t0, t1, 0xee);
3593 __m256 v2 = _mm256_shuffle_ps(t2, t3, 0x44);
3594 __m256 v3 = _mm256_shuffle_ps(t2, t3, 0xee);
3595 out[4 * i + 0].
data = _mm256_shuffle_ps(
v0, v2, 0x88);
3596 out[4 * i + 1].
data = _mm256_shuffle_ps(
v0, v2, 0xdd);
3597 out[4 * i + 2].
data = _mm256_shuffle_ps(
v1, v3, 0x88);
3598 out[4 * i + 3].
data = _mm256_shuffle_ps(
v1, v3, 0xdd);
3601 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3613 const unsigned int n_entries,
3615 const unsigned int *offsets,
3618 const unsigned int n_chunks = n_entries / 4;
3619 for (
unsigned int i = 0; i < n_chunks; ++i)
3621 __m256 u0 = in[4 * i + 0].
data;
3622 __m256 u1 = in[4 * i + 1].
data;
3623 __m256 u2 = in[4 * i + 2].
data;
3624 __m256 u3 = in[4 * i + 3].
data;
3625 __m256 t0 = _mm256_shuffle_ps(u0, u1, 0x44);
3626 __m256 t1 = _mm256_shuffle_ps(u0, u1, 0xee);
3627 __m256 t2 = _mm256_shuffle_ps(u2, u3, 0x44);
3628 __m256 t3 = _mm256_shuffle_ps(u2, u3, 0xee);
3629 u0 = _mm256_shuffle_ps(t0, t2, 0x88);
3630 u1 = _mm256_shuffle_ps(t0, t2, 0xdd);
3631 u2 = _mm256_shuffle_ps(t1, t3, 0x88);
3632 u3 = _mm256_shuffle_ps(t1, t3, 0xdd);
3633 __m128 res0 = _mm256_extractf128_ps(u0, 0);
3634 __m128 res4 = _mm256_extractf128_ps(u0, 1);
3635 __m128 res1 = _mm256_extractf128_ps(u1, 0);
3636 __m128 res5 = _mm256_extractf128_ps(u1, 1);
3637 __m128 res2 = _mm256_extractf128_ps(u2, 0);
3638 __m128 res6 = _mm256_extractf128_ps(u2, 1);
3639 __m128 res3 = _mm256_extractf128_ps(u3, 0);
3640 __m128 res7 = _mm256_extractf128_ps(u3, 1);
3647 res0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), res0);
3648 _mm_storeu_ps(out + 4 * i + offsets[0], res0);
3649 res1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), res1);
3650 _mm_storeu_ps(out + 4 * i + offsets[1], res1);
3651 res2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), res2);
3652 _mm_storeu_ps(out + 4 * i + offsets[2], res2);
3653 res3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), res3);
3654 _mm_storeu_ps(out + 4 * i + offsets[3], res3);
3655 res4 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[4]), res4);
3656 _mm_storeu_ps(out + 4 * i + offsets[4], res4);
3657 res5 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[5]), res5);
3658 _mm_storeu_ps(out + 4 * i + offsets[5], res5);
3659 res6 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[6]), res6);
3660 _mm_storeu_ps(out + 4 * i + offsets[6], res6);
3661 res7 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[7]), res7);
3662 _mm_storeu_ps(out + 4 * i + offsets[7], res7);
3666 _mm_storeu_ps(out + 4 * i + offsets[0], res0);
3667 _mm_storeu_ps(out + 4 * i + offsets[1], res1);
3668 _mm_storeu_ps(out + 4 * i + offsets[2], res2);
3669 _mm_storeu_ps(out + 4 * i + offsets[3], res3);
3670 _mm_storeu_ps(out + 4 * i + offsets[4], res4);
3671 _mm_storeu_ps(out + 4 * i + offsets[5], res5);
3672 _mm_storeu_ps(out + 4 * i + offsets[6], res6);
3673 _mm_storeu_ps(out + 4 * i + offsets[7], res7);
3679 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3680 for (
unsigned int v = 0; v < 8; ++v)
3681 out[offsets[v] + i] += in[i][v];
3683 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3684 for (
unsigned int v = 0; v < 8; ++v)
3685 out[offsets[v] + i] = in[i][v];
3696 const unsigned int n_entries,
3698 std::array<float *, 8> &out)
3702 const unsigned int n_chunks = n_entries / 4;
3703 for (
unsigned int i = 0; i < n_chunks; ++i)
3705 __m256 u0 = in[4 * i + 0].
data;
3706 __m256 u1 = in[4 * i + 1].
data;
3707 __m256 u2 = in[4 * i + 2].
data;
3708 __m256 u3 = in[4 * i + 3].
data;
3709 __m256 t0 = _mm256_shuffle_ps(u0, u1, 0x44);
3710 __m256 t1 = _mm256_shuffle_ps(u0, u1, 0xee);
3711 __m256 t2 = _mm256_shuffle_ps(u2, u3, 0x44);
3712 __m256 t3 = _mm256_shuffle_ps(u2, u3, 0xee);
3713 u0 = _mm256_shuffle_ps(t0, t2, 0x88);
3714 u1 = _mm256_shuffle_ps(t0, t2, 0xdd);
3715 u2 = _mm256_shuffle_ps(t1, t3, 0x88);
3716 u3 = _mm256_shuffle_ps(t1, t3, 0xdd);
3717 __m128 res0 = _mm256_extractf128_ps(u0, 0);
3718 __m128 res4 = _mm256_extractf128_ps(u0, 1);
3719 __m128 res1 = _mm256_extractf128_ps(u1, 0);
3720 __m128 res5 = _mm256_extractf128_ps(u1, 1);
3721 __m128 res2 = _mm256_extractf128_ps(u2, 0);
3722 __m128 res6 = _mm256_extractf128_ps(u2, 1);
3723 __m128 res3 = _mm256_extractf128_ps(u3, 0);
3724 __m128 res7 = _mm256_extractf128_ps(u3, 1);
3728 res0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), res0);
3729 _mm_storeu_ps(out[0] + 4 * i, res0);
3730 res1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), res1);
3731 _mm_storeu_ps(out[1] + 4 * i, res1);
3732 res2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), res2);
3733 _mm_storeu_ps(out[2] + 4 * i, res2);
3734 res3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), res3);
3735 _mm_storeu_ps(out[3] + 4 * i, res3);
3736 res4 = _mm_add_ps(_mm_loadu_ps(out[4] + 4 * i), res4);
3737 _mm_storeu_ps(out[4] + 4 * i, res4);
3738 res5 = _mm_add_ps(_mm_loadu_ps(out[5] + 4 * i), res5);
3739 _mm_storeu_ps(out[5] + 4 * i, res5);
3740 res6 = _mm_add_ps(_mm_loadu_ps(out[6] + 4 * i), res6);
3741 _mm_storeu_ps(out[6] + 4 * i, res6);
3742 res7 = _mm_add_ps(_mm_loadu_ps(out[7] + 4 * i), res7);
3743 _mm_storeu_ps(out[7] + 4 * i, res7);
3747 _mm_storeu_ps(out[0] + 4 * i, res0);
3748 _mm_storeu_ps(out[1] + 4 * i, res1);
3749 _mm_storeu_ps(out[2] + 4 * i, res2);
3750 _mm_storeu_ps(out[3] + 4 * i, res3);
3751 _mm_storeu_ps(out[4] + 4 * i, res4);
3752 _mm_storeu_ps(out[5] + 4 * i, res5);
3753 _mm_storeu_ps(out[6] + 4 * i, res6);
3754 _mm_storeu_ps(out[7] + 4 * i, res7);
3759 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3760 for (
unsigned int v = 0; v < 8; ++v)
3761 out[v][i] += in[i][v];
3763 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3764 for (
unsigned int v = 0; v < 8; ++v)
3765 out[v][i] = in[i][v];
3773# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && defined(__AVX512F__)
3811 template <
typename U>
3823 data = _mm512_set1_pd(x);
3834 operator=(
const double scalar) && =
delete;
3844 return *(
reinterpret_cast<double *
>(&
data) + comp);
3855 return *(
reinterpret_cast<const double *
>(&
data) + comp);
3870# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3885# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3899# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3914# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3929 load(
const double *ptr)
3931 data = _mm512_loadu_pd(ptr);
3936 load(
const float *ptr)
3938 data = _mm512_cvtps_pd(_mm256_loadu_ps(ptr));
3949 store(
double *ptr)
const
3951 _mm512_storeu_pd(ptr,
data);
3956 store(
float *ptr)
const
3958 _mm256_storeu_ps(ptr, _mm512_cvtpd_ps(
data));
3969 Assert(
reinterpret_cast<std::size_t
>(ptr) % 64 == 0,
3971 _mm512_stream_pd(ptr,
data);
3988 gather(
const double *base_ptr,
const unsigned int *offsets)
3990# ifdef DEAL_II_USE_VECTORIZATION_GATHER
3994 const __m256 index_val =
3995 _mm256_loadu_ps(
reinterpret_cast<const float *
>(offsets));
3996 const __m256i
index = *
reinterpret_cast<const __m256i *
>(&index_val);
4002 __mmask8
mask = 0xFF;
4004 data = _mm512_mask_i32gather_pd(zero, mask, index, base_ptr, 8);
4006 for (
unsigned int i = 0; i < 8; ++i)
4007 *(
reinterpret_cast<double *
>(&
data) + i) = base_ptr[offsets[i]];
4025 scatter(
const unsigned int *offsets,
double *base_ptr)
const
4027# ifdef DEAL_II_USE_VECTORIZATION_GATHER
4028 for (
unsigned int i = 0; i < 8; ++i)
4029 for (
unsigned int j = i + 1; j < 8; ++j)
4030 Assert(offsets[i] != offsets[j],
4031 ExcMessage(
"Result of scatter undefined if two offset elements"
4032 " point to the same position"));
4037 const __m256 index_val =
4038 _mm256_loadu_ps(
reinterpret_cast<const float *
>(offsets));
4039 const __m256i
index = *
reinterpret_cast<const __m256i *
>(&index_val);
4040 _mm512_i32scatter_pd(base_ptr, index,
data, 8);
4042 for (
unsigned int i = 0; i < 8; ++i)
4043 base_ptr[offsets[i]] = *(
reinterpret_cast<const double *
>(&
data) + i);
4055 t1.
data = _mm256_add_pd(this->get_lower(), this->get_upper());
4074 return _mm512_castpd512_pd256(
data);
4084 return _mm512_extractf64x4_pd(
data, 1);
4113 __m512d
mask = _mm512_set1_pd(-0.);
4115 res.
data =
reinterpret_cast<__m512d
>(
4116 _mm512_andnot_epi64(
reinterpret_cast<__m512i
>(mask),
4117 reinterpret_cast<__m512i
>(
data)));
4148 template <
typename Number2, std::
size_t w
idth2>
4151 template <
typename Number2, std::
size_t w
idth2>
4154 template <
typename Number2, std::
size_t w
idth2>
4158 template <
typename Number2, std::
size_t w
idth2>
4173 const unsigned int *offsets,
4181 const unsigned int n_chunks = n_entries / 4;
4182 for (
unsigned int i = 0; i < n_chunks; ++i)
4184 __m512d t0, t1, t2, t3 = {};
4186 t0 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[0] + 4 * i), 0);
4187 t0 = _mm512_insertf64x4(t0, _mm256_loadu_pd(in + offsets[2] + 4 * i), 1);
4188 t1 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[1] + 4 * i), 0);
4189 t1 = _mm512_insertf64x4(t1, _mm256_loadu_pd(in + offsets[3] + 4 * i), 1);
4190 t2 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[4] + 4 * i), 0);
4191 t2 = _mm512_insertf64x4(t2, _mm256_loadu_pd(in + offsets[6] + 4 * i), 1);
4192 t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[5] + 4 * i), 0);
4193 t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[7] + 4 * i), 1);
4195 __m512d
v0 = _mm512_shuffle_f64x2(t0, t2, 0x88);
4196 __m512d
v1 = _mm512_shuffle_f64x2(t0, t2, 0xdd);
4197 __m512d v2 = _mm512_shuffle_f64x2(t1, t3, 0x88);
4198 __m512d v3 = _mm512_shuffle_f64x2(t1, t3, 0xdd);
4199 out[4 * i + 0].
data = _mm512_unpacklo_pd(
v0, v2);
4200 out[4 * i + 1].
data = _mm512_unpackhi_pd(
v0, v2);
4201 out[4 * i + 2].
data = _mm512_unpacklo_pd(
v1, v3);
4202 out[4 * i + 3].
data = _mm512_unpackhi_pd(
v1, v3);
4205 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4206 out[i].
gather(in + i, offsets);
4217 const std::array<double *, 8> &in,
4220 const unsigned int n_chunks = n_entries / 4;
4221 for (
unsigned int i = 0; i < n_chunks; ++i)
4223 __m512d t0, t1, t2, t3 = {};
4225 t0 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[0] + 4 * i), 0);
4226 t0 = _mm512_insertf64x4(t0, _mm256_loadu_pd(in[2] + 4 * i), 1);
4227 t1 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[1] + 4 * i), 0);
4228 t1 = _mm512_insertf64x4(t1, _mm256_loadu_pd(in[3] + 4 * i), 1);
4229 t2 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[4] + 4 * i), 0);
4230 t2 = _mm512_insertf64x4(t2, _mm256_loadu_pd(in[6] + 4 * i), 1);
4231 t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[5] + 4 * i), 0);
4232 t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[7] + 4 * i), 1);
4234 __m512d
v0 = _mm512_shuffle_f64x2(t0, t2, 0x88);
4235 __m512d
v1 = _mm512_shuffle_f64x2(t0, t2, 0xdd);
4236 __m512d v2 = _mm512_shuffle_f64x2(t1, t3, 0x88);
4237 __m512d v3 = _mm512_shuffle_f64x2(t1, t3, 0xdd);
4238 out[4 * i + 0].
data = _mm512_unpacklo_pd(
v0, v2);
4239 out[4 * i + 1].
data = _mm512_unpackhi_pd(
v0, v2);
4240 out[4 * i + 2].
data = _mm512_unpacklo_pd(
v1, v3);
4241 out[4 * i + 3].
data = _mm512_unpackhi_pd(
v1, v3);
4244 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4256 const unsigned int n_entries,
4258 const unsigned int *offsets,
4263 const unsigned int n_chunks = n_entries / 4;
4264 __m512i mask1 = _mm512_set_epi64(0xd, 0xc, 0x5, 0x4, 0x9, 0x8, 0x1, 0x0);
4265 __m512i mask2 = _mm512_set_epi64(0xf, 0xe, 0x7, 0x6, 0xb, 0xa, 0x3, 0x2);
4266 for (
unsigned int i = 0; i < n_chunks; ++i)
4268 __m512d t0 = _mm512_unpacklo_pd(in[i * 4].
data, in[i * 4 + 1].
data);
4269 __m512d t1 = _mm512_unpackhi_pd(in[i * 4].
data, in[i * 4 + 1].
data);
4270 __m512d t2 = _mm512_unpacklo_pd(in[i * 4 + 2].
data, in[i * 4 + 3].
data);
4271 __m512d t3 = _mm512_unpackhi_pd(in[i * 4 + 2].
data, in[i * 4 + 3].
data);
4272 __m512d
v0 = _mm512_permutex2var_pd(t0, mask1, t2);
4273 __m512d
v1 = _mm512_permutex2var_pd(t0, mask2, t2);
4274 __m512d v2 = _mm512_permutex2var_pd(t1, mask1, t3);
4275 __m512d v3 = _mm512_permutex2var_pd(t1, mask2, t3);
4276 __m256d res0 = _mm512_extractf64x4_pd(
v0, 0);
4277 __m256d res4 = _mm512_extractf64x4_pd(
v0, 1);
4278 __m256d res1 = _mm512_extractf64x4_pd(v2, 0);
4279 __m256d res5 = _mm512_extractf64x4_pd(v2, 1);
4280 __m256d res2 = _mm512_extractf64x4_pd(
v1, 0);
4281 __m256d res6 = _mm512_extractf64x4_pd(
v1, 1);
4282 __m256d res3 = _mm512_extractf64x4_pd(v3, 0);
4283 __m256d res7 = _mm512_extractf64x4_pd(v3, 1);
4290 res0 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[0]), res0);
4291 _mm256_storeu_pd(out + 4 * i + offsets[0], res0);
4292 res1 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[1]), res1);
4293 _mm256_storeu_pd(out + 4 * i + offsets[1], res1);
4294 res2 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[2]), res2);
4295 _mm256_storeu_pd(out + 4 * i + offsets[2], res2);
4296 res3 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[3]), res3);
4297 _mm256_storeu_pd(out + 4 * i + offsets[3], res3);
4298 res4 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[4]), res4);
4299 _mm256_storeu_pd(out + 4 * i + offsets[4], res4);
4300 res5 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[5]), res5);
4301 _mm256_storeu_pd(out + 4 * i + offsets[5], res5);
4302 res6 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[6]), res6);
4303 _mm256_storeu_pd(out + 4 * i + offsets[6], res6);
4304 res7 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[7]), res7);
4305 _mm256_storeu_pd(out + 4 * i + offsets[7], res7);
4309 _mm256_storeu_pd(out + 4 * i + offsets[0], res0);
4310 _mm256_storeu_pd(out + 4 * i + offsets[1], res1);
4311 _mm256_storeu_pd(out + 4 * i + offsets[2], res2);
4312 _mm256_storeu_pd(out + 4 * i + offsets[3], res3);
4313 _mm256_storeu_pd(out + 4 * i + offsets[4], res4);
4314 _mm256_storeu_pd(out + 4 * i + offsets[5], res5);
4315 _mm256_storeu_pd(out + 4 * i + offsets[6], res6);
4316 _mm256_storeu_pd(out + 4 * i + offsets[7], res7);
4322 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4323 for (
unsigned int v = 0; v < 8; ++v)
4324 out[offsets[v] + i] += in[i][v];
4326 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4327 for (
unsigned int v = 0; v < 8; ++v)
4328 out[offsets[v] + i] = in[i][v];
4339 const unsigned int n_entries,
4341 std::array<double *, 8> &out)
4345 const unsigned int n_chunks = n_entries / 4;
4346 __m512i mask1 = _mm512_set_epi64(0xd, 0xc, 0x5, 0x4, 0x9, 0x8, 0x1, 0x0);
4347 __m512i mask2 = _mm512_set_epi64(0xf, 0xe, 0x7, 0x6, 0xb, 0xa, 0x3, 0x2);
4348 for (
unsigned int i = 0; i < n_chunks; ++i)
4350 __m512d t0 = _mm512_unpacklo_pd(in[i * 4].
data, in[i * 4 + 1].
data);
4351 __m512d t1 = _mm512_unpackhi_pd(in[i * 4].
data, in[i * 4 + 1].
data);
4352 __m512d t2 = _mm512_unpacklo_pd(in[i * 4 + 2].
data, in[i * 4 + 3].
data);
4353 __m512d t3 = _mm512_unpackhi_pd(in[i * 4 + 2].
data, in[i * 4 + 3].
data);
4354 __m512d
v0 = _mm512_permutex2var_pd(t0, mask1, t2);
4355 __m512d
v1 = _mm512_permutex2var_pd(t0, mask2, t2);
4356 __m512d v2 = _mm512_permutex2var_pd(t1, mask1, t3);
4357 __m512d v3 = _mm512_permutex2var_pd(t1, mask2, t3);
4358 __m256d res0 = _mm512_extractf64x4_pd(
v0, 0);
4359 __m256d res4 = _mm512_extractf64x4_pd(
v0, 1);
4360 __m256d res1 = _mm512_extractf64x4_pd(v2, 0);
4361 __m256d res5 = _mm512_extractf64x4_pd(v2, 1);
4362 __m256d res2 = _mm512_extractf64x4_pd(
v1, 0);
4363 __m256d res6 = _mm512_extractf64x4_pd(
v1, 1);
4364 __m256d res3 = _mm512_extractf64x4_pd(v3, 0);
4365 __m256d res7 = _mm512_extractf64x4_pd(v3, 1);
4369 res0 = _mm256_add_pd(_mm256_loadu_pd(out[0] + 4 * i), res0);
4370 _mm256_storeu_pd(out[0] + 4 * i, res0);
4371 res1 = _mm256_add_pd(_mm256_loadu_pd(out[1] + 4 * i), res1);
4372 _mm256_storeu_pd(out[1] + 4 * i, res1);
4373 res2 = _mm256_add_pd(_mm256_loadu_pd(out[2] + 4 * i), res2);
4374 _mm256_storeu_pd(out[2] + 4 * i, res2);
4375 res3 = _mm256_add_pd(_mm256_loadu_pd(out[3] + 4 * i), res3);
4376 _mm256_storeu_pd(out[3] + 4 * i, res3);
4377 res4 = _mm256_add_pd(_mm256_loadu_pd(out[4] + 4 * i), res4);
4378 _mm256_storeu_pd(out[4] + 4 * i, res4);
4379 res5 = _mm256_add_pd(_mm256_loadu_pd(out[5] + 4 * i), res5);
4380 _mm256_storeu_pd(out[5] + 4 * i, res5);
4381 res6 = _mm256_add_pd(_mm256_loadu_pd(out[6] + 4 * i), res6);
4382 _mm256_storeu_pd(out[6] + 4 * i, res6);
4383 res7 = _mm256_add_pd(_mm256_loadu_pd(out[7] + 4 * i), res7);
4384 _mm256_storeu_pd(out[7] + 4 * i, res7);
4388 _mm256_storeu_pd(out[0] + 4 * i, res0);
4389 _mm256_storeu_pd(out[1] + 4 * i, res1);
4390 _mm256_storeu_pd(out[2] + 4 * i, res2);
4391 _mm256_storeu_pd(out[3] + 4 * i, res3);
4392 _mm256_storeu_pd(out[4] + 4 * i, res4);
4393 _mm256_storeu_pd(out[5] + 4 * i, res5);
4394 _mm256_storeu_pd(out[6] + 4 * i, res6);
4395 _mm256_storeu_pd(out[7] + 4 * i, res7);
4400 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4401 for (
unsigned int v = 0; v < 8; ++v)
4402 out[v][i] += in[i][v];
4404 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4405 for (
unsigned int v = 0; v < 8; ++v)
4406 out[v][i] = in[i][v];
4447 template <
typename U>
4459 data = _mm512_set1_ps(x);
4469 operator=(
const float scalar) && =
delete;
4479 return *(
reinterpret_cast<float *
>(&
data) + comp);
4490 return *(
reinterpret_cast<const float *
>(&
data) + comp);
4505# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
4520# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
4534# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
4549# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
4564 load(
const float *ptr)
4566 data = _mm512_loadu_ps(ptr);
4577 store(
float *ptr)
const
4579 _mm512_storeu_ps(ptr,
data);
4590 Assert(
reinterpret_cast<std::size_t
>(ptr) % 64 == 0,
4592 _mm512_stream_ps(ptr,
data);
4609 gather(
const float *base_ptr,
const unsigned int *offsets)
4611# ifdef DEAL_II_USE_VECTORIZATION_GATHER
4615 const __m512 index_val =
4616 _mm512_loadu_ps(
reinterpret_cast<const float *
>(offsets));
4617 const __m512i
index = *
reinterpret_cast<const __m512i *
>(&index_val);
4623 __mmask16
mask = 0xFFFF;
4625 data = _mm512_mask_i32gather_ps(zero, mask, index, base_ptr, 4);
4627 for (
unsigned int i = 0; i < 16; ++i)
4628 *(
reinterpret_cast<float *
>(&
data) + i) = base_ptr[offsets[i]];
4646 scatter(
const unsigned int *offsets,
float *base_ptr)
const
4648# ifdef DEAL_II_USE_VECTORIZATION_GATHER
4649 for (
unsigned int i = 0; i < 16; ++i)
4650 for (
unsigned int j = i + 1; j < 16; ++j)
4651 Assert(offsets[i] != offsets[j],
4652 ExcMessage(
"Result of scatter undefined if two offset elements"
4653 " point to the same position"));
4658 const __m512 index_val =
4659 _mm512_loadu_ps(
reinterpret_cast<const float *
>(offsets));
4660 const __m512i
index = *
reinterpret_cast<const __m512i *
>(&index_val);
4661 _mm512_i32scatter_ps(base_ptr, index,
data, 4);
4663 for (
unsigned int i = 0; i < 16; ++i)
4664 base_ptr[offsets[i]] = *(
reinterpret_cast<const float *
>(&
data) + i);
4676 t1.
data = _mm256_add_ps(this->get_lower(), this->get_upper());
4695 return _mm512_castps512_ps256(
data);
4705 return _mm256_castpd_ps(_mm512_extractf64x4_pd(_mm512_castps_pd(
data), 1));
4734 __m512
mask = _mm512_set1_ps(-0.f);
4736 res.
data =
reinterpret_cast<__m512
>(
4737 _mm512_andnot_epi32(
reinterpret_cast<__m512i
>(mask),
4738 reinterpret_cast<__m512i
>(
data)));
4769 template <
typename Number2, std::
size_t w
idth2>
4772 template <
typename Number2, std::
size_t w
idth2>
4775 template <
typename Number2, std::
size_t w
idth2>
4779 template <
typename Number2, std::
size_t w
idth2>
4794 const unsigned int *offsets,
4801 const unsigned int n_chunks = n_entries / 4;
4809 __m512 t0, t1, t2, t3;
4812 for (
unsigned int i = 0; i < n_chunks; ++i)
4814 t0 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[0] + 4 * i), 0);
4815 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[4] + 4 * i), 1);
4816 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[8] + 4 * i), 2);
4817 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[12] + 4 * i), 3);
4818 t1 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[1] + 4 * i), 0);
4819 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[5] + 4 * i), 1);
4820 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[9] + 4 * i), 2);
4821 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[13] + 4 * i), 3);
4822 t2 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[2] + 4 * i), 0);
4823 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[6] + 4 * i), 1);
4824 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[10] + 4 * i), 2);
4825 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[14] + 4 * i), 3);
4826 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[3] + 4 * i), 0);
4827 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[7] + 4 * i), 1);
4828 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[11] + 4 * i), 2);
4829 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[15] + 4 * i), 3);
4831 __m512
v0 = _mm512_shuffle_ps(t0, t1, 0x44);
4832 __m512
v1 = _mm512_shuffle_ps(t0, t1, 0xee);
4833 __m512 v2 = _mm512_shuffle_ps(t2, t3, 0x44);
4834 __m512 v3 = _mm512_shuffle_ps(t2, t3, 0xee);
4836 out[4 * i + 0].
data = _mm512_shuffle_ps(
v0, v2, 0x88);
4837 out[4 * i + 1].
data = _mm512_shuffle_ps(
v0, v2, 0xdd);
4838 out[4 * i + 2].
data = _mm512_shuffle_ps(
v1, v3, 0x88);
4839 out[4 * i + 3].
data = _mm512_shuffle_ps(
v1, v3, 0xdd);
4843 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4844 out[i].
gather(in + i, offsets);
4855 const std::array<float *, 16> &in,
4860 const unsigned int n_chunks = n_entries / 4;
4862 __m512 t0, t1, t2, t3;
4865 for (
unsigned int i = 0; i < n_chunks; ++i)
4867 t0 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[0] + 4 * i), 0);
4868 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[4] + 4 * i), 1);
4869 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[8] + 4 * i), 2);
4870 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[12] + 4 * i), 3);
4871 t1 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[1] + 4 * i), 0);
4872 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[5] + 4 * i), 1);
4873 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[9] + 4 * i), 2);
4874 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[13] + 4 * i), 3);
4875 t2 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[2] + 4 * i), 0);
4876 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[6] + 4 * i), 1);
4877 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[10] + 4 * i), 2);
4878 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[14] + 4 * i), 3);
4879 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[3] + 4 * i), 0);
4880 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[7] + 4 * i), 1);
4881 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[11] + 4 * i), 2);
4882 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[15] + 4 * i), 3);
4884 __m512
v0 = _mm512_shuffle_ps(t0, t1, 0x44);
4885 __m512
v1 = _mm512_shuffle_ps(t0, t1, 0xee);
4886 __m512 v2 = _mm512_shuffle_ps(t2, t3, 0x44);
4887 __m512 v3 = _mm512_shuffle_ps(t2, t3, 0xee);
4889 out[4 * i + 0].
data = _mm512_shuffle_ps(
v0, v2, 0x88);
4890 out[4 * i + 1].
data = _mm512_shuffle_ps(
v0, v2, 0xdd);
4891 out[4 * i + 2].
data = _mm512_shuffle_ps(
v1, v3, 0x88);
4892 out[4 * i + 3].
data = _mm512_shuffle_ps(
v1, v3, 0xdd);
4895 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4907 const unsigned int n_entries,
4909 const unsigned int *offsets,
4912 const unsigned int n_chunks = n_entries / 4;
4913 for (
unsigned int i = 0; i < n_chunks; ++i)
4915 __m512 t0 = _mm512_shuffle_ps(in[4 * i].
data, in[1 + 4 * i].
data, 0x44);
4916 __m512 t1 = _mm512_shuffle_ps(in[4 * i].
data, in[1 + 4 * i].
data, 0xee);
4918 _mm512_shuffle_ps(in[2 + 4 * i].
data, in[3 + 4 * i].
data, 0x44);
4920 _mm512_shuffle_ps(in[2 + 4 * i].
data, in[3 + 4 * i].
data, 0xee);
4921 __m512 u0 = _mm512_shuffle_ps(t0, t2, 0x88);
4922 __m512 u1 = _mm512_shuffle_ps(t0, t2, 0xdd);
4923 __m512 u2 = _mm512_shuffle_ps(t1, t3, 0x88);
4924 __m512 u3 = _mm512_shuffle_ps(t1, t3, 0xdd);
4926 __m128 res0 = _mm512_extractf32x4_ps(u0, 0);
4927 __m128 res4 = _mm512_extractf32x4_ps(u0, 1);
4928 __m128 res8 = _mm512_extractf32x4_ps(u0, 2);
4929 __m128 res12 = _mm512_extractf32x4_ps(u0, 3);
4930 __m128 res1 = _mm512_extractf32x4_ps(u1, 0);
4931 __m128 res5 = _mm512_extractf32x4_ps(u1, 1);
4932 __m128 res9 = _mm512_extractf32x4_ps(u1, 2);
4933 __m128 res13 = _mm512_extractf32x4_ps(u1, 3);
4934 __m128 res2 = _mm512_extractf32x4_ps(u2, 0);
4935 __m128 res6 = _mm512_extractf32x4_ps(u2, 1);
4936 __m128 res10 = _mm512_extractf32x4_ps(u2, 2);
4937 __m128 res14 = _mm512_extractf32x4_ps(u2, 3);
4938 __m128 res3 = _mm512_extractf32x4_ps(u3, 0);
4939 __m128 res7 = _mm512_extractf32x4_ps(u3, 1);
4940 __m128 res11 = _mm512_extractf32x4_ps(u3, 2);
4941 __m128 res15 = _mm512_extractf32x4_ps(u3, 3);
4948 res0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), res0);
4949 _mm_storeu_ps(out + 4 * i + offsets[0], res0);
4950 res1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), res1);
4951 _mm_storeu_ps(out + 4 * i + offsets[1], res1);
4952 res2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), res2);
4953 _mm_storeu_ps(out + 4 * i + offsets[2], res2);
4954 res3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), res3);
4955 _mm_storeu_ps(out + 4 * i + offsets[3], res3);
4956 res4 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[4]), res4);
4957 _mm_storeu_ps(out + 4 * i + offsets[4], res4);
4958 res5 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[5]), res5);
4959 _mm_storeu_ps(out + 4 * i + offsets[5], res5);
4960 res6 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[6]), res6);
4961 _mm_storeu_ps(out + 4 * i + offsets[6], res6);
4962 res7 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[7]), res7);
4963 _mm_storeu_ps(out + 4 * i + offsets[7], res7);
4964 res8 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[8]), res8);
4965 _mm_storeu_ps(out + 4 * i + offsets[8], res8);
4966 res9 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[9]), res9);
4967 _mm_storeu_ps(out + 4 * i + offsets[9], res9);
4968 res10 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[10]), res10);
4969 _mm_storeu_ps(out + 4 * i + offsets[10], res10);
4970 res11 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[11]), res11);
4971 _mm_storeu_ps(out + 4 * i + offsets[11], res11);
4972 res12 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[12]), res12);
4973 _mm_storeu_ps(out + 4 * i + offsets[12], res12);
4974 res13 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[13]), res13);
4975 _mm_storeu_ps(out + 4 * i + offsets[13], res13);
4976 res14 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[14]), res14);
4977 _mm_storeu_ps(out + 4 * i + offsets[14], res14);
4978 res15 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[15]), res15);
4979 _mm_storeu_ps(out + 4 * i + offsets[15], res15);
4983 _mm_storeu_ps(out + 4 * i + offsets[0], res0);
4984 _mm_storeu_ps(out + 4 * i + offsets[1], res1);
4985 _mm_storeu_ps(out + 4 * i + offsets[2], res2);
4986 _mm_storeu_ps(out + 4 * i + offsets[3], res3);
4987 _mm_storeu_ps(out + 4 * i + offsets[4], res4);
4988 _mm_storeu_ps(out + 4 * i + offsets[5], res5);
4989 _mm_storeu_ps(out + 4 * i + offsets[6], res6);
4990 _mm_storeu_ps(out + 4 * i + offsets[7], res7);
4991 _mm_storeu_ps(out + 4 * i + offsets[8], res8);
4992 _mm_storeu_ps(out + 4 * i + offsets[9], res9);
4993 _mm_storeu_ps(out + 4 * i + offsets[10], res10);
4994 _mm_storeu_ps(out + 4 * i + offsets[11], res11);
4995 _mm_storeu_ps(out + 4 * i + offsets[12], res12);
4996 _mm_storeu_ps(out + 4 * i + offsets[13], res13);
4997 _mm_storeu_ps(out + 4 * i + offsets[14], res14);
4998 _mm_storeu_ps(out + 4 * i + offsets[15], res15);
5004 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
5005 for (
unsigned int v = 0; v < 16; ++v)
5006 out[offsets[v] + i] += in[i][v];
5008 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
5009 for (
unsigned int v = 0; v < 16; ++v)
5010 out[offsets[v] + i] = in[i][v];
5021 const unsigned int n_entries,
5023 std::array<float *, 16> &out)
5027 const unsigned int n_chunks = n_entries / 4;
5028 for (
unsigned int i = 0; i < n_chunks; ++i)
5030 __m512 t0 = _mm512_shuffle_ps(in[4 * i].
data, in[1 + 4 * i].
data, 0x44);
5031 __m512 t1 = _mm512_shuffle_ps(in[4 * i].
data, in[1 + 4 * i].
data, 0xee);
5033 _mm512_shuffle_ps(in[2 + 4 * i].
data, in[3 + 4 * i].
data, 0x44);
5035 _mm512_shuffle_ps(in[2 + 4 * i].
data, in[3 + 4 * i].
data, 0xee);
5036 __m512 u0 = _mm512_shuffle_ps(t0, t2, 0x88);
5037 __m512 u1 = _mm512_shuffle_ps(t0, t2, 0xdd);
5038 __m512 u2 = _mm512_shuffle_ps(t1, t3, 0x88);
5039 __m512 u3 = _mm512_shuffle_ps(t1, t3, 0xdd);
5041 __m128 res0 = _mm512_extractf32x4_ps(u0, 0);
5042 __m128 res4 = _mm512_extractf32x4_ps(u0, 1);
5043 __m128 res8 = _mm512_extractf32x4_ps(u0, 2);
5044 __m128 res12 = _mm512_extractf32x4_ps(u0, 3);
5045 __m128 res1 = _mm512_extractf32x4_ps(u1, 0);
5046 __m128 res5 = _mm512_extractf32x4_ps(u1, 1);
5047 __m128 res9 = _mm512_extractf32x4_ps(u1, 2);
5048 __m128 res13 = _mm512_extractf32x4_ps(u1, 3);
5049 __m128 res2 = _mm512_extractf32x4_ps(u2, 0);
5050 __m128 res6 = _mm512_extractf32x4_ps(u2, 1);
5051 __m128 res10 = _mm512_extractf32x4_ps(u2, 2);
5052 __m128 res14 = _mm512_extractf32x4_ps(u2, 3);
5053 __m128 res3 = _mm512_extractf32x4_ps(u3, 0);
5054 __m128 res7 = _mm512_extractf32x4_ps(u3, 1);
5055 __m128 res11 = _mm512_extractf32x4_ps(u3, 2);
5056 __m128 res15 = _mm512_extractf32x4_ps(u3, 3);
5060 res0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), res0);
5061 _mm_storeu_ps(out[0] + 4 * i, res0);
5062 res1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), res1);
5063 _mm_storeu_ps(out[1] + 4 * i, res1);
5064 res2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), res2);
5065 _mm_storeu_ps(out[2] + 4 * i, res2);
5066 res3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), res3);
5067 _mm_storeu_ps(out[3] + 4 * i, res3);
5068 res4 = _mm_add_ps(_mm_loadu_ps(out[4] + 4 * i), res4);
5069 _mm_storeu_ps(out[4] + 4 * i, res4);
5070 res5 = _mm_add_ps(_mm_loadu_ps(out[5] + 4 * i), res5);
5071 _mm_storeu_ps(out[5] + 4 * i, res5);
5072 res6 = _mm_add_ps(_mm_loadu_ps(out[6] + 4 * i), res6);
5073 _mm_storeu_ps(out[6] + 4 * i, res6);
5074 res7 = _mm_add_ps(_mm_loadu_ps(out[7] + 4 * i), res7);
5075 _mm_storeu_ps(out[7] + 4 * i, res7);
5076 res8 = _mm_add_ps(_mm_loadu_ps(out[8] + 4 * i), res8);
5077 _mm_storeu_ps(out[8] + 4 * i, res8);
5078 res9 = _mm_add_ps(_mm_loadu_ps(out[9] + 4 * i), res9);
5079 _mm_storeu_ps(out[9] + 4 * i, res9);
5080 res10 = _mm_add_ps(_mm_loadu_ps(out[10] + 4 * i), res10);
5081 _mm_storeu_ps(out[10] + 4 * i, res10);
5082 res11 = _mm_add_ps(_mm_loadu_ps(out[11] + 4 * i), res11);
5083 _mm_storeu_ps(out[11] + 4 * i, res11);
5084 res12 = _mm_add_ps(_mm_loadu_ps(out[12] + 4 * i), res12);
5085 _mm_storeu_ps(out[12] + 4 * i, res12);
5086 res13 = _mm_add_ps(_mm_loadu_ps(out[13] + 4 * i), res13);
5087 _mm_storeu_ps(out[13] + 4 * i, res13);
5088 res14 = _mm_add_ps(_mm_loadu_ps(out[14] + 4 * i), res14);
5089 _mm_storeu_ps(out[14] + 4 * i, res14);
5090 res15 = _mm_add_ps(_mm_loadu_ps(out[15] + 4 * i), res15);
5091 _mm_storeu_ps(out[15] + 4 * i, res15);
5095 _mm_storeu_ps(out[0] + 4 * i, res0);
5096 _mm_storeu_ps(out[1] + 4 * i, res1);
5097 _mm_storeu_ps(out[2] + 4 * i, res2);
5098 _mm_storeu_ps(out[3] + 4 * i, res3);
5099 _mm_storeu_ps(out[4] + 4 * i, res4);
5100 _mm_storeu_ps(out[5] + 4 * i, res5);
5101 _mm_storeu_ps(out[6] + 4 * i, res6);
5102 _mm_storeu_ps(out[7] + 4 * i, res7);
5103 _mm_storeu_ps(out[8] + 4 * i, res8);
5104 _mm_storeu_ps(out[9] + 4 * i, res9);
5105 _mm_storeu_ps(out[10] + 4 * i, res10);
5106 _mm_storeu_ps(out[11] + 4 * i, res11);
5107 _mm_storeu_ps(out[12] + 4 * i, res12);
5108 _mm_storeu_ps(out[13] + 4 * i, res13);
5109 _mm_storeu_ps(out[14] + 4 * i, res14);
5110 _mm_storeu_ps(out[15] + 4 * i, res15);
5115 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
5116 for (
unsigned int v = 0; v < 16; ++v)
5117 out[v][i] += in[i][v];
5119 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
5120 for (
unsigned int v = 0; v < 16; ++v)
5121 out[v][i] = in[i][v];
5126# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__ALTIVEC__) && \
5162 template <
typename U>
5174 data = vec_splats(x);
5189 operator=(
const double scalar) && =
delete;
5199 return *(
reinterpret_cast<double *
>(&
data) + comp);
5210 return *(
reinterpret_cast<const double *
>(&
data) + comp);
5263 load(
const double *ptr)
5265 data = vec_vsx_ld(0, ptr);
5274 store(
double *ptr)
const
5276 vec_vsx_st(
data, 0, ptr);
5294 gather(
const double *base_ptr,
const unsigned int *offsets)
5296 for (
unsigned int i = 0; i < 2; ++i)
5297 *(
reinterpret_cast<double *
>(&
data) + i) = base_ptr[offsets[i]];
5305 scatter(
const unsigned int *offsets,
double *base_ptr)
const
5307 for (
unsigned int i = 0; i < 2; ++i)
5308 base_ptr[offsets[i]] = *(
reinterpret_cast<const double *
>(&
data) + i);
5316 __vector
double data;
5372 template <
typename Number2, std::
size_t w
idth2>
5375 template <
typename Number2, std::
size_t w
idth2>
5378 template <
typename Number2, std::
size_t w
idth2>
5382 template <
typename Number2, std::
size_t w
idth2>
5423 template <
typename U>
5435 data = vec_splats(x);
5450 operator=(
const float scalar) && =
delete;
5460 return *(
reinterpret_cast<float *
>(&
data) + comp);
5471 return *(
reinterpret_cast<const float *
>(&
data) + comp);
5524 load(
const float *ptr)
5526 data = vec_vsx_ld(0, ptr);
5535 store(
float *ptr)
const
5537 vec_vsx_st(
data, 0, ptr);
5555 gather(
const float *base_ptr,
const unsigned int *offsets)
5557 for (
unsigned int i = 0; i < 4; ++i)
5558 *(
reinterpret_cast<float *
>(&
data) + i) = base_ptr[offsets[i]];
5566 scatter(
const unsigned int *offsets,
float *base_ptr)
const
5568 for (
unsigned int i = 0; i < 4; ++i)
5569 base_ptr[offsets[i]] = *(
reinterpret_cast<const float *
>(&
data) + i);
5577 __vector
float data;
5633 template <
typename Number2, std::
size_t w
idth2>
5636 template <
typename Number2, std::
size_t w
idth2>
5639 template <
typename Number2, std::
size_t w
idth2>
5643 template <
typename Number2, std::
size_t w
idth2>
5667template <
typename Number, std::
size_t w
idth>
5672 for (
unsigned int i = 0; i < VectorizedArray<Number, width>::size(); ++i)
5673 if (lhs[i] != rhs[i])
5685template <
typename Number, std::
size_t w
idth>
5699template <
typename Number, std::
size_t w
idth>
5713template <
typename Number, std::
size_t w
idth>
5727template <
typename Number, std::
size_t w
idth>
5742template <
typename Number, std::
size_t w
idth>
5758template <std::
size_t w
idth>
5772template <
typename Number, std::
size_t w
idth>
5787template <std::
size_t w
idth>
5800template <
typename Number, std::
size_t w
idth>
5816template <std::
size_t w
idth>
5830template <
typename Number, std::
size_t w
idth>
5846template <std::
size_t w
idth>
5860template <
typename Number, std::
size_t w
idth>
5876template <std::
size_t w
idth>
5890template <
typename Number, std::
size_t w
idth>
5905template <std::
size_t w
idth>
5918template <
typename Number, std::
size_t w
idth>
5934template <std::
size_t w
idth>
5948template <
typename Number, std::
size_t w
idth>
5964template <std::
size_t w
idth>
5977template <
typename Number, std::
size_t w
idth>
5989template <
typename Number, std::
size_t w
idth>
6003template <
typename Number, std::
size_t w
idth>
6004inline std::ostream &
6008 for (
unsigned int i = 0; i < n - 1; ++i)
6031#if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
6112template <SIMDComparison predicate,
typename Number>
6115 const Number &right,
6116 const Number &true_value,
6117 const Number &false_value)
6123 mask = (left == right);
6126 mask = (left != right);
6129 mask = (left < right);
6132 mask = (left <= right);
6135 mask = (left > right);
6138 mask = (left >= right);
6142 return mask ? true_value : false_value;
6150template <SIMDComparison predicate,
typename Number>
6158 result.
data = compare_and_apply_mask<predicate, Number>(left.
data,
6168# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && defined(__AVX512F__)
6170template <SIMDComparison predicate>
6177 const __mmask16
mask =
6178 _mm512_cmp_ps_mask(left.
data, right.
data,
static_cast<int>(predicate));
6180 result.
data = _mm512_mask_mov_ps(false_values.
data, mask, true_values.
data);
6186template <SIMDComparison predicate>
6193 const __mmask16
mask =
6194 _mm512_cmp_pd_mask(left.
data, right.
data,
static_cast<int>(predicate));
6196 result.
data = _mm512_mask_mov_pd(false_values.
data, mask, true_values.
data);
6202# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
6204template <SIMDComparison predicate>
6212 _mm256_cmp_ps(left.
data, right.
data,
static_cast<int>(predicate));
6215 result.
data = _mm256_blendv_ps(false_values.
data, true_values.
data, mask);
6220template <SIMDComparison predicate>
6228 _mm256_cmp_pd(left.
data, right.
data,
static_cast<int>(predicate));
6231 result.
data = _mm256_blendv_pd(false_values.
data, true_values.
data, mask);
6237# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__SSE2__)
6239template <SIMDComparison predicate>
6270 result.
data = _mm_or_ps(_mm_and_ps(mask, true_values.
data),
6271 _mm_andnot_ps(mask, false_values.
data));
6277template <SIMDComparison predicate>
6308 result.
data = _mm_or_pd(_mm_and_pd(mask, true_values.
data),
6309 _mm_andnot_pd(mask, false_values.
data));
6316# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__ARM_NEON)
6318template <SIMDComparison predicate>
6349 result.
data = vreinterpretq_f32_u32(vorrq_u32(
6350 vandq_u32(mask, vreinterpretq_u32_f32(true_values.
data)),
6351 vandq_u32(vmvnq_u32(mask), vreinterpretq_u32_f32(false_values.
data))));
6357template <SIMDComparison predicate>
6371 mask = vreinterpretq_u64_u32(
6372 vmvnq_u32(vreinterpretq_u32_u64(vceqq_f64(left.
data, right.
data))));
6389 result.
data = vreinterpretq_f64_u64(vorrq_u64(
6390 vandq_u64(mask, vreinterpretq_u64_f64(true_values.
data)),
6391 vandq_u64(vreinterpretq_u64_u32(vmvnq_u32(vreinterpretq_u32_u64(mask))),
6392 vreinterpretq_u64_f64(false_values.
data))));
6403 template <
typename T>
6414 static constexpr std::size_t
6431 static constexpr std::size_t
6485 template <
typename T, std::
size_t w
idth_>
6496 static constexpr std::size_t
6514 static constexpr std::size_t
6587 template <
typename Number, std::
size_t w
idth>
6588 inline ::VectorizedArray<Number, width>
6589 sin(const ::VectorizedArray<Number, width> &x)
6592 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6607 template <
typename Number, std::
size_t w
idth>
6608 inline ::VectorizedArray<Number, width>
6609 cos(const ::VectorizedArray<Number, width> &x)
6612 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6627 template <
typename Number, std::
size_t w
idth>
6628 inline ::VectorizedArray<Number, width>
6629 tan(const ::VectorizedArray<Number, width> &x)
6632 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6647 template <
typename Number, std::
size_t w
idth>
6648 inline ::VectorizedArray<Number, width>
6649 acos(const ::VectorizedArray<Number, width> &x)
6652 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6667 template <
typename Number, std::
size_t w
idth>
6668 inline ::VectorizedArray<Number, width>
6669 asin(const ::VectorizedArray<Number, width> &x)
6672 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6687 template <
typename Number, std::
size_t w
idth>
6688 inline ::VectorizedArray<Number, width>
6689 atan(const ::VectorizedArray<Number, width> &x)
6692 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6707 template <
typename Number, std::
size_t w
idth>
6708 inline ::VectorizedArray<Number, width>
6709 cosh(const ::VectorizedArray<Number, width> &x)
6712 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6727 template <
typename Number, std::
size_t w
idth>
6728 inline ::VectorizedArray<Number, width>
6729 sinh(const ::VectorizedArray<Number, width> &x)
6732 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6747 template <
typename Number, std::
size_t w
idth>
6748 inline ::VectorizedArray<Number, width>
6749 tanh(const ::VectorizedArray<Number, width> &x)
6752 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6767 template <
typename Number, std::
size_t w
idth>
6768 inline ::VectorizedArray<Number, width>
6769 acosh(const ::VectorizedArray<Number, width> &x)
6772 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6787 template <
typename Number, std::
size_t w
idth>
6788 inline ::VectorizedArray<Number, width>
6789 asinh(const ::VectorizedArray<Number, width> &x)
6792 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6807 template <
typename Number, std::
size_t w
idth>
6808 inline ::VectorizedArray<Number, width>
6809 atanh(const ::VectorizedArray<Number, width> &x)
6812 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6827 template <
typename Number, std::
size_t w
idth>
6828 inline ::VectorizedArray<Number, width>
6829 exp(const ::VectorizedArray<Number, width> &x)
6832 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6847 template <
typename Number, std::
size_t w
idth>
6848 inline ::VectorizedArray<Number, width>
6849 log(const ::VectorizedArray<Number, width> &x)
6852 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6867 template <
typename Number, std::
size_t w
idth>
6868 inline ::VectorizedArray<Number, width>
6869 sqrt(const ::VectorizedArray<Number, width> &x)
6871 return x.get_sqrt();
6883 template <
typename Number, std::
size_t w
idth>
6884 inline ::VectorizedArray<Number, width>
6885 pow(const ::VectorizedArray<Number, width> &x,
const Number p)
6888 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6904 template <
typename Number, std::
size_t w
idth>
6905 inline ::VectorizedArray<Number, width>
6906 pow(const ::VectorizedArray<Number, width> &x,
6907 const ::VectorizedArray<Number, width> &p)
6910 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6925 template <
typename Number, std::
size_t w
idth>
6926 inline ::VectorizedArray<Number, width>
6927 abs(const ::VectorizedArray<Number, width> &x)
6941 template <
typename Number, std::
size_t w
idth>
6942 inline ::VectorizedArray<Number, width>
6943 max(const ::VectorizedArray<Number, width> &x,
6944 const ::VectorizedArray<Number, width> &y)
6946 return x.get_max(y);
6958 template <
typename Number, std::
size_t w
idth>
6959 inline ::VectorizedArray<Number, width>
6960 min(const ::VectorizedArray<Number, width> &x,
6961 const ::VectorizedArray<Number, width> &y)
6963 return x.get_min(y);
6974#ifdef DEAL_II_HAVE_CXX20
constexpr VectorizedArrayBase()=default
constexpr VectorizedArrayIterator< const VectorizedArrayType > begin() const
constexpr VectorizedArrayBase(const std::initializer_list< U > &list)
constexpr VectorizedArrayIterator< const VectorizedArrayType > end() const
constexpr VectorizedArrayIterator< VectorizedArrayType > begin()
constexpr VectorizedArrayIterator< VectorizedArrayType > end()
static constexpr std::size_t size()
auto dot_product(const VectorizedArrayType &v) const
constexpr VectorizedArrayIterator< T > & operator++()
constexpr VectorizedArrayIterator(T &data, const std::size_t lane)
constexpr VectorizedArrayIterator< T > operator+(const std::size_t &offset) const
constexpr VectorizedArrayIterator< T > & operator+=(const std::size_t offset)
constexpr bool operator==(const VectorizedArrayIterator< T > &other) const
constexpr std::enable_if_t<!std::is_same_v< U, const U >, typename T::value_type > & operator*()
constexpr const T::value_type & operator*() const
constexpr VectorizedArrayIterator< T > & operator--()
constexpr std::ptrdiff_t operator-(const VectorizedArrayIterator< T > &other) const
constexpr bool operator!=(const VectorizedArrayIterator< T > &other) const
VectorizedArray< Number, width > operator-(const VectorizedArray< Number, width > &u)
VectorizedArray & operator=(const Number scalar) &
VectorizedArray< float, width > operator+(const VectorizedArray< float, width > &v, const double u)
VectorizedArray & operator/=(const VectorizedArray &vec)
void gather(const Number *base_ptr, const unsigned int *offsets)
void vectorized_load_and_transpose(const unsigned int n_entries, const Number *in, const unsigned int *offsets, VectorizedArray< Number, width > *out)
VectorizedArray< Number, width > operator+(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArrayType make_vectorized_array(const typename VectorizedArrayType::value_type &u)
VectorizedArray< Number, width > operator/(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArray< Number, width > asinh(const ::VectorizedArray< Number, width > &x)
VectorizedArray get_abs() const
VectorizedArray< float, width > operator/(const VectorizedArray< float, width > &v, const double u)
VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &x, const ::VectorizedArray< Number, width > &y)
VectorizedArray< Number, width > log(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > operator*(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArray< Number, width > exp(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > operator-(const VectorizedArray< Number, width > &v, const Number &u)
Number & operator[](const unsigned int comp)
VectorizedArray< float, width > operator-(const double u, const VectorizedArray< float, width > &v)
VectorizedArray< Number, width > operator+(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > operator+(const VectorizedArray< Number, width > &u)
VectorizedArray()=default
bool operator==(const VectorizedArray< Number, width > &lhs, const VectorizedArray< Number, width > &rhs)
VectorizedArray< Number, width > tan(const ::VectorizedArray< Number, width > &x)
VectorizedArray(const Number scalar)
VectorizedArray< Number, width > operator-(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray< float, width > operator*(const VectorizedArray< float, width > &v, const double u)
VectorizedArray & operator*=(const VectorizedArray &vec)
VectorizedArray get_max(const VectorizedArray &other) const
VectorizedArray< Number, width > atan(const ::VectorizedArray< Number, width > &x)
const Number & operator[](const unsigned int comp) const
VectorizedArray< Number, width > tanh(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &x, const ::VectorizedArray< Number, width > &y)
VectorizedArray get_min(const VectorizedArray &other) const
VectorizedArray< Number, width > cosh(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &x, const Number p)
VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &x, const ::VectorizedArray< Number, width > &p)
VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &x)
void store(OtherNumber *ptr) const
VectorizedArray< float, width > operator-(const VectorizedArray< float, width > &v, const double u)
void load(const OtherNumber *ptr)
void scatter(const unsigned int *offsets, Number *base_ptr) const
VectorizedArray< Number, width > atanh(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > operator-(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray & operator=(const Number scalar) &&=delete
VectorizedArray< Number, width > operator*(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray & operator-=(const VectorizedArray &vec)
VectorizedArray< Number, width > acos(const ::VectorizedArray< Number, width > &x)
VectorizedArray< float, width > operator+(const double u, const VectorizedArray< float, width > &v)
VectorizedArray< Number, width > operator*(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > sinh(const ::VectorizedArray< Number, width > &x)
VectorizedArray get_sqrt() const
VectorizedArray< Number, width > asin(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > operator/(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray & operator+=(const VectorizedArray &vec)
VectorizedArray< Number, width > make_vectorized_array(const Number &u)
VectorizedArray< Number, width > operator/(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > operator+(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > acosh(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > cos(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &x)
void streaming_store(Number *ptr) const
VectorizedArray(const std::initializer_list< U > &list)
void vectorized_transpose_and_store(const bool add_into, const unsigned int n_entries, const VectorizedArray< Number, width > *in, const unsigned int *offsets, Number *out)
VectorizedArray< float, width > operator/(const double u, const VectorizedArray< float, width > &v)
VectorizedArray< float, width > operator*(const double u, const VectorizedArray< float, width > &v)
static constexpr bool is_implemented
#define DEAL_II_ALWAYS_INLINE
#define DEAL_II_OPENMP_SIMD_PRAGMA
#define DEAL_II_NAMESPACE_OPEN
#define DEAL_II_NAMESPACE_CLOSE
#define Assert(cond, exc)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcMessage(std::string arg1)
std::vector< index_type > data
inline ::VectorizedArray< Number, width > acosh(const ::VectorizedArray< Number, width > &x)
::VectorizedArray< Number, width > log(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > exp(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > tan(const ::VectorizedArray< Number, width > &)
inline ::VectorizedArray< Number, width > asinh(const ::VectorizedArray< Number, width > &x)
inline ::VectorizedArray< Number, width > tanh(const ::VectorizedArray< Number, width > &x)
inline ::VectorizedArray< Number, width > sinh(const ::VectorizedArray< Number, width > &x)
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > cos(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &)
inline ::VectorizedArray< Number, width > cosh(const ::VectorizedArray< Number, width > &x)
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &, const Number p)
inline ::VectorizedArray< Number, width > atanh(const ::VectorizedArray< Number, width > &x)
inline ::VectorizedArray< Number, width > atan(const ::VectorizedArray< Number, width > &x)
::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)
inline ::VectorizedArray< Number, width > acos(const ::VectorizedArray< Number, width > &x)
inline ::VectorizedArray< Number, width > asin(const ::VectorizedArray< Number, width > &x)
static value_type & get(vectorized_value_type &values, unsigned int c)
static constexpr std::size_t stride()
static vectorized_value_type & get_from_vectorized(vectorized_value_type &values, unsigned int c)
static constexpr std::size_t width()
static const value_type & get(const vectorized_value_type &values, unsigned int c)
static const vectorized_value_type & get_from_vectorized(const vectorized_value_type &values, unsigned int c)
static constexpr std::size_t width()
static constexpr std::size_t stride()
static const value_type & get(const value_type &value, unsigned int c)
VectorizedArray< T > vectorized_value_type
static const value_type & get_from_vectorized(const vectorized_value_type &values, unsigned int c)
static value_type & get_from_vectorized(vectorized_value_type &values, unsigned int c)
static value_type & get(value_type &value, unsigned int c)
typename T::value_type value_type
std::ptrdiff_t difference_type
random_access_iterator_tag iterator_category
void gather(VectorizedArray< Number, width > &out, const std::array< Number *, width > &ptrs, const unsigned int offset)
void vectorized_load_and_transpose(const unsigned int n_entries, const Number *in, const unsigned int *offsets, VectorizedArray< Number, width > *out)
std::ostream & operator<<(std::ostream &out, const VectorizedArray< Number, width > &p)
Number compare_and_apply_mask(const Number &left, const Number &right, const Number &true_value, const Number &false_value)
void vectorized_transpose_and_store(const bool add_into, const unsigned int n_entries, const VectorizedArray< Number, width > *in, const unsigned int *offsets, Number *out)