17#ifndef dealii_vectorization_h
18#define dealii_vectorization_h
44#if DEAL_II_VECTORIZATION_WIDTH_IN_BITS > 0
53# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && !defined(__AVX__)
55 "Mismatch in vectorization capabilities: AVX was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native."
57# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && !defined(__AVX512F__)
59 "Mismatch in vectorization capabilities: AVX-512F was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native."
64# elif defined(__ALTIVEC__)
73# include <x86intrin.h>
85template <
typename Number, std::
size_t w
idth>
119 "You are trying to compare iterators into different arrays."));
131 "You are trying to compare iterators into different arrays."));
145 const typename T::value_type &
157 template <
typename U = T>
158 std::enable_if_t<!std::is_same<U, const U>::value,
typename T::value_type> &
201 "You can't decrement an iterator that is already at the beginning of the range."));
222 return static_cast<std::ptrdiff_t
>(
lane) -
223 static_cast<ptrdiff_t
>(other.
lane);
249template <
typename T, std::
size_t w
idth>
261 template <
typename U>
264 auto i0 = this->
begin();
265 auto i1 = list.begin();
267 for (; i1 != list.end(); ++i0, ++i1)
272 "Initializer list exceeds size of this VectorizedArray object."));
277 for (; i0 != this->
end(); ++i0)
286 static constexpr std::size_t
418template <
typename Number, std::
size_t w
idth>
428 static_assert(width == 1,
429 "You specified an illegal width that is not supported.");
448 template <
typename U>
548 template <
typename OtherNumber>
561 template <
typename OtherNumber>
635 gather(
const Number *base_ptr,
const unsigned int *offsets)
637 data = base_ptr[offsets[0]];
654 scatter(
const unsigned int *offsets, Number *base_ptr)
const
656 base_ptr[offsets[0]] =
data;
731 template <
typename Number2, std::
size_t w
idth2>
734 template <
typename Number2, std::
size_t w
idth2>
737 template <
typename Number2, std::
size_t w
idth2>
741 template <
typename Number2, std::
size_t w
idth2>
760template <
typename Number,
778template <
typename VectorizedArrayType>
783 std::is_same<VectorizedArrayType,
785 VectorizedArrayType::size()>>::value,
786 "VectorizedArrayType is not a VectorizedArray.");
788 VectorizedArrayType result = u;
805template <
typename Number, std::
size_t w
idth>
808 const std::array<Number *, width> &ptrs,
809 const unsigned int offset)
811 for (
unsigned int v = 0; v < width; ++v)
812 out.
data[v] = ptrs[v][offset];
842template <
typename Number, std::
size_t w
idth>
846 const unsigned int * offsets,
849 for (
unsigned int i = 0; i < n_entries; ++i)
850 for (
unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
851 out[i][v] = in[offsets[v] + i];
866template <
typename Number, std::
size_t w
idth>
869 const std::array<Number *, width> &in,
872 for (
unsigned int i = 0; i < n_entries; ++i)
873 for (
unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
874 out[i][v] = in[v][i];
917template <
typename Number, std::
size_t w
idth>
920 const unsigned int n_entries,
922 const unsigned int * offsets,
926 for (
unsigned int i = 0; i < n_entries; ++i)
927 for (
unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
928 out[offsets[v] + i] += in[i][v];
930 for (
unsigned int i = 0; i < n_entries; ++i)
931 for (
unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
932 out[offsets[v] + i] = in[i][v];
947template <
typename Number, std::
size_t w
idth>
950 const unsigned int n_entries,
952 std::array<Number *, width> & out)
955 for (
unsigned int i = 0; i < n_entries; ++i)
956 for (
unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
957 out[v][i] += in[i][v];
959 for (
unsigned int i = 0; i < n_entries; ++i)
960 for (
unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
961 out[v][i] = in[i][v];
969# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__SSE2__)
1001 template <
typename U>
1013 data = _mm_set1_pd(x);
1023 operator=(
const double scalar) && =
delete;
1033 return *(
reinterpret_cast<double *
>(&
data) + comp);
1044 return *(
reinterpret_cast<const double *
>(&
data) + comp);
1054# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1069# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1084# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1099# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1114 load(
const double *ptr)
1116 data = _mm_loadu_pd(ptr);
1121 load(
const float *ptr)
1124 for (
unsigned int i = 0; i < 2; ++i)
1136 store(
double *ptr)
const
1138 _mm_storeu_pd(ptr,
data);
1143 store(
float *ptr)
const
1146 for (
unsigned int i = 0; i < 2; ++i)
1158 Assert(
reinterpret_cast<std::size_t
>(ptr) % 16 == 0,
1160 _mm_stream_pd(ptr,
data);
1177 gather(
const double *base_ptr,
const unsigned int *offsets)
1179 for (
unsigned int i = 0; i < 2; ++i)
1180 *(
reinterpret_cast<double *
>(&
data) + i) = base_ptr[offsets[i]];
1197 scatter(
const unsigned int *offsets,
double *base_ptr)
const
1199 for (
unsigned int i = 0; i < 2; ++i)
1200 base_ptr[offsets[i]] = *(
reinterpret_cast<const double *
>(&
data) + i);
1210 __m128d t1 = _mm_unpackhi_pd(
data,
data);
1211 __m128d t2 = _mm_add_pd(
data, t1);
1212 return _mm_cvtsd_f64(t2);
1248 __m128d
mask = _mm_set1_pd(-0.);
1250 res.
data = _mm_andnot_pd(mask,
data);
1281 template <
typename Number2, std::
size_t w
idth2>
1284 template <
typename Number2, std::
size_t w
idth2>
1287 template <
typename Number2, std::
size_t w
idth2>
1291 template <
typename Number2, std::
size_t w
idth2>
1306 const unsigned int * offsets,
1309 const unsigned int n_chunks = n_entries / 2;
1310 for (
unsigned int i = 0; i < n_chunks; ++i)
1312 __m128d u0 = _mm_loadu_pd(in + 2 * i + offsets[0]);
1313 __m128d u1 = _mm_loadu_pd(in + 2 * i + offsets[1]);
1314 out[2 * i + 0].
data = _mm_unpacklo_pd(u0, u1);
1315 out[2 * i + 1].
data = _mm_unpackhi_pd(u0, u1);
1319 for (
unsigned int i = 2 * n_chunks; i < n_entries; ++i)
1320 for (
unsigned int v = 0; v < 2; ++v)
1321 out[i][v] = in[offsets[v] + i];
1332 const std::array<double *, 2> &in,
1337 const unsigned int n_chunks = n_entries / 2;
1338 for (
unsigned int i = 0; i < n_chunks; ++i)
1340 __m128d u0 = _mm_loadu_pd(in[0] + 2 * i);
1341 __m128d u1 = _mm_loadu_pd(in[1] + 2 * i);
1342 out[2 * i + 0].
data = _mm_unpacklo_pd(u0, u1);
1343 out[2 * i + 1].
data = _mm_unpackhi_pd(u0, u1);
1346 for (
unsigned int i = 2 * n_chunks; i < n_entries; ++i)
1347 for (
unsigned int v = 0; v < 2; ++v)
1348 out[i][v] = in[v][i];
1359 const unsigned int n_entries,
1361 const unsigned int * offsets,
1364 const unsigned int n_chunks = n_entries / 2;
1367 for (
unsigned int i = 0; i < n_chunks; ++i)
1369 __m128d u0 = in[2 * i + 0].
data;
1370 __m128d u1 = in[2 * i + 1].
data;
1371 __m128d res0 = _mm_unpacklo_pd(u0, u1);
1372 __m128d res1 = _mm_unpackhi_pd(u0, u1);
1373 _mm_storeu_pd(out + 2 * i + offsets[0],
1374 _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[0]),
1376 _mm_storeu_pd(out + 2 * i + offsets[1],
1377 _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[1]),
1381 for (
unsigned int i = 2 * n_chunks; i < n_entries; ++i)
1382 for (
unsigned int v = 0; v < 2; ++v)
1383 out[offsets[v] + i] += in[i][v];
1387 for (
unsigned int i = 0; i < n_chunks; ++i)
1389 __m128d u0 = in[2 * i + 0].
data;
1390 __m128d u1 = in[2 * i + 1].
data;
1391 __m128d res0 = _mm_unpacklo_pd(u0, u1);
1392 __m128d res1 = _mm_unpackhi_pd(u0, u1);
1393 _mm_storeu_pd(out + 2 * i + offsets[0], res0);
1394 _mm_storeu_pd(out + 2 * i + offsets[1], res1);
1397 for (
unsigned int i = 2 * n_chunks; i < n_entries; ++i)
1398 for (
unsigned int v = 0; v < 2; ++v)
1399 out[offsets[v] + i] = in[i][v];
1411 const unsigned int n_entries,
1413 std::array<double *, 2> & out)
1417 const unsigned int n_chunks = n_entries / 2;
1420 for (
unsigned int i = 0; i < n_chunks; ++i)
1422 __m128d u0 = in[2 * i + 0].
data;
1423 __m128d u1 = in[2 * i + 1].
data;
1424 __m128d res0 = _mm_unpacklo_pd(u0, u1);
1425 __m128d res1 = _mm_unpackhi_pd(u0, u1);
1426 _mm_storeu_pd(out[0] + 2 * i,
1427 _mm_add_pd(_mm_loadu_pd(out[0] + 2 * i), res0));
1428 _mm_storeu_pd(out[1] + 2 * i,
1429 _mm_add_pd(_mm_loadu_pd(out[1] + 2 * i), res1));
1432 for (
unsigned int i = 2 * n_chunks; i < n_entries; ++i)
1433 for (
unsigned int v = 0; v < 2; ++v)
1434 out[v][i] += in[i][v];
1438 for (
unsigned int i = 0; i < n_chunks; ++i)
1440 __m128d u0 = in[2 * i + 0].
data;
1441 __m128d u1 = in[2 * i + 1].
data;
1442 __m128d res0 = _mm_unpacklo_pd(u0, u1);
1443 __m128d res1 = _mm_unpackhi_pd(u0, u1);
1444 _mm_storeu_pd(out[0] + 2 * i, res0);
1445 _mm_storeu_pd(out[1] + 2 * i, res1);
1448 for (
unsigned int i = 2 * n_chunks; i < n_entries; ++i)
1449 for (
unsigned int v = 0; v < 2; ++v)
1450 out[v][i] = in[i][v];
1490 template <
typename U>
1499 data = _mm_set1_ps(x);
1509 operator=(
const float scalar) && =
delete;
1519 return *(
reinterpret_cast<float *
>(&
data) + comp);
1530 return *(
reinterpret_cast<const float *
>(&
data) + comp);
1540# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1555# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1570# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1585# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1600 load(
const float *ptr)
1602 data = _mm_loadu_ps(ptr);
1613 store(
float *ptr)
const
1615 _mm_storeu_ps(ptr,
data);
1626 Assert(
reinterpret_cast<std::size_t
>(ptr) % 16 == 0,
1628 _mm_stream_ps(ptr,
data);
1645 gather(
const float *base_ptr,
const unsigned int *offsets)
1647 for (
unsigned int i = 0; i < 4; ++i)
1648 *(
reinterpret_cast<float *
>(&
data) + i) = base_ptr[offsets[i]];
1665 scatter(
const unsigned int *offsets,
float *base_ptr)
const
1667 for (
unsigned int i = 0; i < 4; ++i)
1668 base_ptr[offsets[i]] = *(
reinterpret_cast<const float *
>(&
data) + i);
1678 __m128 t1 = _mm_movehl_ps(
data,
data);
1679 __m128 t2 = _mm_add_ps(
data, t1);
1680 __m128 t3 = _mm_shuffle_ps(t2, t2, 1);
1681 __m128 t4 = _mm_add_ss(t2, t3);
1682 return _mm_cvtss_f32(t4);
1717 __m128
mask = _mm_set1_ps(-0.f);
1719 res.
data = _mm_andnot_ps(mask,
data);
1750 template <
typename Number2, std::
size_t w
idth2>
1753 template <
typename Number2, std::
size_t w
idth2>
1756 template <
typename Number2, std::
size_t w
idth2>
1760 template <
typename Number2, std::
size_t w
idth2>
1775 const unsigned int * offsets,
1778 const unsigned int n_chunks = n_entries / 4;
1779 for (
unsigned int i = 0; i < n_chunks; ++i)
1781 __m128 u0 = _mm_loadu_ps(in + 4 * i + offsets[0]);
1782 __m128 u1 = _mm_loadu_ps(in + 4 * i + offsets[1]);
1783 __m128 u2 = _mm_loadu_ps(in + 4 * i + offsets[2]);
1784 __m128 u3 = _mm_loadu_ps(in + 4 * i + offsets[3]);
1785 __m128
v0 = _mm_shuffle_ps(u0, u1, 0x44);
1786 __m128
v1 = _mm_shuffle_ps(u0, u1, 0xee);
1787 __m128 v2 = _mm_shuffle_ps(u2, u3, 0x44);
1788 __m128 v3 = _mm_shuffle_ps(u2, u3, 0xee);
1789 out[4 * i + 0].
data = _mm_shuffle_ps(
v0, v2, 0x88);
1790 out[4 * i + 1].
data = _mm_shuffle_ps(
v0, v2, 0xdd);
1791 out[4 * i + 2].
data = _mm_shuffle_ps(
v1, v3, 0x88);
1792 out[4 * i + 3].
data = _mm_shuffle_ps(
v1, v3, 0xdd);
1796 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1797 for (
unsigned int v = 0; v < 4; ++v)
1798 out[i][v] = in[offsets[v] + i];
1809 const std::array<float *, 4> &in,
1814 const unsigned int n_chunks = n_entries / 4;
1815 for (
unsigned int i = 0; i < n_chunks; ++i)
1817 __m128 u0 = _mm_loadu_ps(in[0] + 4 * i);
1818 __m128 u1 = _mm_loadu_ps(in[1] + 4 * i);
1819 __m128 u2 = _mm_loadu_ps(in[2] + 4 * i);
1820 __m128 u3 = _mm_loadu_ps(in[3] + 4 * i);
1821 __m128
v0 = _mm_shuffle_ps(u0, u1, 0x44);
1822 __m128
v1 = _mm_shuffle_ps(u0, u1, 0xee);
1823 __m128 v2 = _mm_shuffle_ps(u2, u3, 0x44);
1824 __m128 v3 = _mm_shuffle_ps(u2, u3, 0xee);
1825 out[4 * i + 0].
data = _mm_shuffle_ps(
v0, v2, 0x88);
1826 out[4 * i + 1].
data = _mm_shuffle_ps(
v0, v2, 0xdd);
1827 out[4 * i + 2].
data = _mm_shuffle_ps(
v1, v3, 0x88);
1828 out[4 * i + 3].
data = _mm_shuffle_ps(
v1, v3, 0xdd);
1831 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1832 for (
unsigned int v = 0; v < 4; ++v)
1833 out[i][v] = in[v][i];
1844 const unsigned int n_entries,
1846 const unsigned int * offsets,
1849 const unsigned int n_chunks = n_entries / 4;
1850 for (
unsigned int i = 0; i < n_chunks; ++i)
1852 __m128 u0 = in[4 * i + 0].
data;
1853 __m128 u1 = in[4 * i + 1].
data;
1854 __m128 u2 = in[4 * i + 2].
data;
1855 __m128 u3 = in[4 * i + 3].
data;
1856 __m128 t0 = _mm_shuffle_ps(u0, u1, 0x44);
1857 __m128 t1 = _mm_shuffle_ps(u0, u1, 0xee);
1858 __m128 t2 = _mm_shuffle_ps(u2, u3, 0x44);
1859 __m128 t3 = _mm_shuffle_ps(u2, u3, 0xee);
1860 u0 = _mm_shuffle_ps(t0, t2, 0x88);
1861 u1 = _mm_shuffle_ps(t0, t2, 0xdd);
1862 u2 = _mm_shuffle_ps(t1, t3, 0x88);
1863 u3 = _mm_shuffle_ps(t1, t3, 0xdd);
1870 u0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), u0);
1871 _mm_storeu_ps(out + 4 * i + offsets[0], u0);
1872 u1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), u1);
1873 _mm_storeu_ps(out + 4 * i + offsets[1], u1);
1874 u2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), u2);
1875 _mm_storeu_ps(out + 4 * i + offsets[2], u2);
1876 u3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), u3);
1877 _mm_storeu_ps(out + 4 * i + offsets[3], u3);
1881 _mm_storeu_ps(out + 4 * i + offsets[0], u0);
1882 _mm_storeu_ps(out + 4 * i + offsets[1], u1);
1883 _mm_storeu_ps(out + 4 * i + offsets[2], u2);
1884 _mm_storeu_ps(out + 4 * i + offsets[3], u3);
1890 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1891 for (
unsigned int v = 0; v < 4; ++v)
1892 out[offsets[v] + i] += in[i][v];
1894 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1895 for (
unsigned int v = 0; v < 4; ++v)
1896 out[offsets[v] + i] = in[i][v];
1907 const unsigned int n_entries,
1909 std::array<float *, 4> & out)
1913 const unsigned int n_chunks = n_entries / 4;
1914 for (
unsigned int i = 0; i < n_chunks; ++i)
1916 __m128 u0 = in[4 * i + 0].
data;
1917 __m128 u1 = in[4 * i + 1].
data;
1918 __m128 u2 = in[4 * i + 2].
data;
1919 __m128 u3 = in[4 * i + 3].
data;
1920 __m128 t0 = _mm_shuffle_ps(u0, u1, 0x44);
1921 __m128 t1 = _mm_shuffle_ps(u0, u1, 0xee);
1922 __m128 t2 = _mm_shuffle_ps(u2, u3, 0x44);
1923 __m128 t3 = _mm_shuffle_ps(u2, u3, 0xee);
1924 u0 = _mm_shuffle_ps(t0, t2, 0x88);
1925 u1 = _mm_shuffle_ps(t0, t2, 0xdd);
1926 u2 = _mm_shuffle_ps(t1, t3, 0x88);
1927 u3 = _mm_shuffle_ps(t1, t3, 0xdd);
1931 u0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), u0);
1932 _mm_storeu_ps(out[0] + 4 * i, u0);
1933 u1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), u1);
1934 _mm_storeu_ps(out[1] + 4 * i, u1);
1935 u2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), u2);
1936 _mm_storeu_ps(out[2] + 4 * i, u2);
1937 u3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), u3);
1938 _mm_storeu_ps(out[3] + 4 * i, u3);
1942 _mm_storeu_ps(out[0] + 4 * i, u0);
1943 _mm_storeu_ps(out[1] + 4 * i, u1);
1944 _mm_storeu_ps(out[2] + 4 * i, u2);
1945 _mm_storeu_ps(out[3] + 4 * i, u3);
1950 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1951 for (
unsigned int v = 0; v < 4; ++v)
1952 out[v][i] += in[i][v];
1954 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1955 for (
unsigned int v = 0; v < 4; ++v)
1956 out[v][i] = in[i][v];
1963# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
1995 template <
typename U>
2007 data = _mm256_set1_pd(x);
2017 operator=(
const double scalar) && =
delete;
2027 return *(
reinterpret_cast<double *
>(&
data) + comp);
2038 return *(
reinterpret_cast<const double *
>(&
data) + comp);
2053# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2068# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2082# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2097# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2112 load(
const double *ptr)
2114 data = _mm256_loadu_pd(ptr);
2119 load(
const float *ptr)
2121 data = _mm256_cvtps_pd(_mm_loadu_ps(ptr));
2132 store(
double *ptr)
const
2134 _mm256_storeu_pd(ptr,
data);
2139 store(
float *ptr)
const
2141 _mm_storeu_ps(ptr, _mm256_cvtpd_ps(
data));
2152 Assert(
reinterpret_cast<std::size_t
>(ptr) % 32 == 0,
2154 _mm256_stream_pd(ptr,
data);
2171 gather(
const double *base_ptr,
const unsigned int *offsets)
2177 const __m128 index_val =
2178 _mm_loadu_ps(
reinterpret_cast<const float *
>(offsets));
2179 const __m128i
index = *
reinterpret_cast<const __m128i *
>(&index_val);
2184 __m256d zero = _mm256_setzero_pd();
2185 __m256d
mask = _mm256_cmp_pd(zero, zero, _CMP_EQ_OQ);
2187 data = _mm256_mask_i32gather_pd(zero, base_ptr, index, mask, 8);
2189 for (
unsigned int i = 0; i < 4; ++i)
2190 *(
reinterpret_cast<double *
>(&
data) + i) = base_ptr[offsets[i]];
2208 scatter(
const unsigned int *offsets,
double *base_ptr)
const
2211 for (
unsigned int i = 0; i < 4; ++i)
2212 base_ptr[offsets[i]] = *(
reinterpret_cast<const double *
>(&
data) + i);
2223 t1.
data = _mm_add_pd(this->get_lower(), this->get_upper());
2242 return _mm256_castpd256_pd128(
data);
2252 return _mm256_extractf128_pd(
data, 1);
2279 __m256d
mask = _mm256_set1_pd(-0.);
2281 res.
data = _mm256_andnot_pd(mask,
data);
2312 template <
typename Number2, std::
size_t w
idth2>
2315 template <
typename Number2, std::
size_t w
idth2>
2318 template <
typename Number2, std::
size_t w
idth2>
2322 template <
typename Number2, std::
size_t w
idth2>
2337 const unsigned int * offsets,
2340 const unsigned int n_chunks = n_entries / 4;
2341 const double * in0 = in + offsets[0];
2342 const double * in1 = in + offsets[1];
2343 const double * in2 = in + offsets[2];
2344 const double * in3 = in + offsets[3];
2346 for (
unsigned int i = 0; i < n_chunks; ++i)
2348 __m256d u0 = _mm256_loadu_pd(in0 + 4 * i);
2349 __m256d u1 = _mm256_loadu_pd(in1 + 4 * i);
2350 __m256d u2 = _mm256_loadu_pd(in2 + 4 * i);
2351 __m256d u3 = _mm256_loadu_pd(in3 + 4 * i);
2352 __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2353 __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2354 __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2355 __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2356 out[4 * i + 0].
data = _mm256_unpacklo_pd(t0, t1);
2357 out[4 * i + 1].
data = _mm256_unpackhi_pd(t0, t1);
2358 out[4 * i + 2].
data = _mm256_unpacklo_pd(t2, t3);
2359 out[4 * i + 3].
data = _mm256_unpackhi_pd(t2, t3);
2363 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2364 out[i].
gather(in + i, offsets);
2375 const std::array<double *, 4> &in,
2380 const unsigned int n_chunks = n_entries / 4;
2381 const double * in0 = in[0];
2382 const double * in1 = in[1];
2383 const double * in2 = in[2];
2384 const double * in3 = in[3];
2386 for (
unsigned int i = 0; i < n_chunks; ++i)
2388 __m256d u0 = _mm256_loadu_pd(in0 + 4 * i);
2389 __m256d u1 = _mm256_loadu_pd(in1 + 4 * i);
2390 __m256d u2 = _mm256_loadu_pd(in2 + 4 * i);
2391 __m256d u3 = _mm256_loadu_pd(in3 + 4 * i);
2392 __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2393 __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2394 __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2395 __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2396 out[4 * i + 0].
data = _mm256_unpacklo_pd(t0, t1);
2397 out[4 * i + 1].
data = _mm256_unpackhi_pd(t0, t1);
2398 out[4 * i + 2].
data = _mm256_unpacklo_pd(t2, t3);
2399 out[4 * i + 3].
data = _mm256_unpackhi_pd(t2, t3);
2402 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2414 const unsigned int n_entries,
2416 const unsigned int * offsets,
2419 const unsigned int n_chunks = n_entries / 4;
2420 double * out0 = out + offsets[0];
2421 double * out1 = out + offsets[1];
2422 double * out2 = out + offsets[2];
2423 double * out3 = out + offsets[3];
2424 for (
unsigned int i = 0; i < n_chunks; ++i)
2426 __m256d u0 = in[4 * i + 0].
data;
2427 __m256d u1 = in[4 * i + 1].
data;
2428 __m256d u2 = in[4 * i + 2].
data;
2429 __m256d u3 = in[4 * i + 3].
data;
2430 __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2431 __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2432 __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2433 __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2434 __m256d res0 = _mm256_unpacklo_pd(t0, t1);
2435 __m256d res1 = _mm256_unpackhi_pd(t0, t1);
2436 __m256d res2 = _mm256_unpacklo_pd(t2, t3);
2437 __m256d res3 = _mm256_unpackhi_pd(t2, t3);
2444 res0 = _mm256_add_pd(_mm256_loadu_pd(out0 + 4 * i), res0);
2445 _mm256_storeu_pd(out0 + 4 * i, res0);
2446 res1 = _mm256_add_pd(_mm256_loadu_pd(out1 + 4 * i), res1);
2447 _mm256_storeu_pd(out1 + 4 * i, res1);
2448 res2 = _mm256_add_pd(_mm256_loadu_pd(out2 + 4 * i), res2);
2449 _mm256_storeu_pd(out2 + 4 * i, res2);
2450 res3 = _mm256_add_pd(_mm256_loadu_pd(out3 + 4 * i), res3);
2451 _mm256_storeu_pd(out3 + 4 * i, res3);
2455 _mm256_storeu_pd(out0 + 4 * i, res0);
2456 _mm256_storeu_pd(out1 + 4 * i, res1);
2457 _mm256_storeu_pd(out2 + 4 * i, res2);
2458 _mm256_storeu_pd(out3 + 4 * i, res3);
2464 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2465 for (
unsigned int v = 0; v < 4; ++v)
2466 out[offsets[v] + i] += in[i][v];
2468 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2469 for (
unsigned int v = 0; v < 4; ++v)
2470 out[offsets[v] + i] = in[i][v];
2481 const unsigned int n_entries,
2483 std::array<double *, 4> & out)
2487 const unsigned int n_chunks = n_entries / 4;
2488 double * out0 = out[0];
2489 double * out1 = out[1];
2490 double * out2 = out[2];
2491 double * out3 = out[3];
2492 for (
unsigned int i = 0; i < n_chunks; ++i)
2494 __m256d u0 = in[4 * i + 0].
data;
2495 __m256d u1 = in[4 * i + 1].
data;
2496 __m256d u2 = in[4 * i + 2].
data;
2497 __m256d u3 = in[4 * i + 3].
data;
2498 __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2499 __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2500 __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2501 __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2502 __m256d res0 = _mm256_unpacklo_pd(t0, t1);
2503 __m256d res1 = _mm256_unpackhi_pd(t0, t1);
2504 __m256d res2 = _mm256_unpacklo_pd(t2, t3);
2505 __m256d res3 = _mm256_unpackhi_pd(t2, t3);
2512 res0 = _mm256_add_pd(_mm256_loadu_pd(out0 + 4 * i), res0);
2513 _mm256_storeu_pd(out0 + 4 * i, res0);
2514 res1 = _mm256_add_pd(_mm256_loadu_pd(out1 + 4 * i), res1);
2515 _mm256_storeu_pd(out1 + 4 * i, res1);
2516 res2 = _mm256_add_pd(_mm256_loadu_pd(out2 + 4 * i), res2);
2517 _mm256_storeu_pd(out2 + 4 * i, res2);
2518 res3 = _mm256_add_pd(_mm256_loadu_pd(out3 + 4 * i), res3);
2519 _mm256_storeu_pd(out3 + 4 * i, res3);
2523 _mm256_storeu_pd(out0 + 4 * i, res0);
2524 _mm256_storeu_pd(out1 + 4 * i, res1);
2525 _mm256_storeu_pd(out2 + 4 * i, res2);
2526 _mm256_storeu_pd(out3 + 4 * i, res3);
2532 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2533 for (
unsigned int v = 0; v < 4; ++v)
2534 out[v][i] += in[i][v];
2536 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2537 for (
unsigned int v = 0; v < 4; ++v)
2538 out[v][i] = in[i][v];
2573 template <
typename U>
2585 data = _mm256_set1_ps(x);
2595 operator=(
const float scalar) && =
delete;
2605 return *(
reinterpret_cast<float *
>(&
data) + comp);
2616 return *(
reinterpret_cast<const float *
>(&
data) + comp);
2631# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2646# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2660# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2675# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2690 load(
const float *ptr)
2692 data = _mm256_loadu_ps(ptr);
2703 store(
float *ptr)
const
2705 _mm256_storeu_ps(ptr,
data);
2716 Assert(
reinterpret_cast<std::size_t
>(ptr) % 32 == 0,
2718 _mm256_stream_ps(ptr,
data);
2735 gather(
const float *base_ptr,
const unsigned int *offsets)
2741 const __m256 index_val =
2742 _mm256_loadu_ps(
reinterpret_cast<const float *
>(offsets));
2743 const __m256i
index = *
reinterpret_cast<const __m256i *
>(&index_val);
2748 __m256 zero = _mm256_setzero_ps();
2749 __m256
mask = _mm256_cmp_ps(zero, zero, _CMP_EQ_OQ);
2751 data = _mm256_mask_i32gather_ps(zero, base_ptr, index, mask, 4);
2753 for (
unsigned int i = 0; i < 8; ++i)
2754 *(
reinterpret_cast<float *
>(&
data) + i) = base_ptr[offsets[i]];
2772 scatter(
const unsigned int *offsets,
float *base_ptr)
const
2775 for (
unsigned int i = 0; i < 8; ++i)
2776 base_ptr[offsets[i]] = *(
reinterpret_cast<const float *
>(&
data) + i);
2787 t1.
data = _mm_add_ps(this->get_lower(), this->get_upper());
2806 return _mm256_castps256_ps128(
data);
2816 return _mm256_extractf128_ps(
data, 1);
2843 __m256
mask = _mm256_set1_ps(-0.f);
2845 res.
data = _mm256_andnot_ps(mask,
data);
2876 template <
typename Number2, std::
size_t w
idth2>
2879 template <
typename Number2, std::
size_t w
idth2>
2882 template <
typename Number2, std::
size_t w
idth2>
2886 template <
typename Number2, std::
size_t w
idth2>
2901 const unsigned int * offsets,
2904 const unsigned int n_chunks = n_entries / 4;
2905 for (
unsigned int i = 0; i < n_chunks; ++i)
2909 __m256 t0, t1, t2, t3 = {};
2910 t0 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[0]), 0);
2911 t0 = _mm256_insertf128_ps(t0, _mm_loadu_ps(in + 4 * i + offsets[4]), 1);
2912 t1 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[1]), 0);
2913 t1 = _mm256_insertf128_ps(t1, _mm_loadu_ps(in + 4 * i + offsets[5]), 1);
2914 t2 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[2]), 0);
2915 t2 = _mm256_insertf128_ps(t2, _mm_loadu_ps(in + 4 * i + offsets[6]), 1);
2916 t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[3]), 0);
2917 t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[7]), 1);
2919 __m256
v0 = _mm256_shuffle_ps(t0, t1, 0x44);
2920 __m256
v1 = _mm256_shuffle_ps(t0, t1, 0xee);
2921 __m256 v2 = _mm256_shuffle_ps(t2, t3, 0x44);
2922 __m256 v3 = _mm256_shuffle_ps(t2, t3, 0xee);
2923 out[4 * i + 0].
data = _mm256_shuffle_ps(
v0, v2, 0x88);
2924 out[4 * i + 1].
data = _mm256_shuffle_ps(
v0, v2, 0xdd);
2925 out[4 * i + 2].
data = _mm256_shuffle_ps(
v1, v3, 0x88);
2926 out[4 * i + 3].
data = _mm256_shuffle_ps(
v1, v3, 0xdd);
2930 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2931 out[i].
gather(in + i, offsets);
2942 const std::array<float *, 8> &in,
2947 const unsigned int n_chunks = n_entries / 4;
2948 for (
unsigned int i = 0; i < n_chunks; ++i)
2950 __m256 t0, t1, t2, t3 = {};
2951 t0 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[0] + 4 * i), 0);
2952 t0 = _mm256_insertf128_ps(t0, _mm_loadu_ps(in[4] + 4 * i), 1);
2953 t1 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[1] + 4 * i), 0);
2954 t1 = _mm256_insertf128_ps(t1, _mm_loadu_ps(in[5] + 4 * i), 1);
2955 t2 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[2] + 4 * i), 0);
2956 t2 = _mm256_insertf128_ps(t2, _mm_loadu_ps(in[6] + 4 * i), 1);
2957 t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[3] + 4 * i), 0);
2958 t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[7] + 4 * i), 1);
2960 __m256
v0 = _mm256_shuffle_ps(t0, t1, 0x44);
2961 __m256
v1 = _mm256_shuffle_ps(t0, t1, 0xee);
2962 __m256 v2 = _mm256_shuffle_ps(t2, t3, 0x44);
2963 __m256 v3 = _mm256_shuffle_ps(t2, t3, 0xee);
2964 out[4 * i + 0].
data = _mm256_shuffle_ps(
v0, v2, 0x88);
2965 out[4 * i + 1].
data = _mm256_shuffle_ps(
v0, v2, 0xdd);
2966 out[4 * i + 2].
data = _mm256_shuffle_ps(
v1, v3, 0x88);
2967 out[4 * i + 3].
data = _mm256_shuffle_ps(
v1, v3, 0xdd);
2970 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2982 const unsigned int n_entries,
2984 const unsigned int * offsets,
2987 const unsigned int n_chunks = n_entries / 4;
2988 for (
unsigned int i = 0; i < n_chunks; ++i)
2990 __m256 u0 = in[4 * i + 0].
data;
2991 __m256 u1 = in[4 * i + 1].
data;
2992 __m256 u2 = in[4 * i + 2].
data;
2993 __m256 u3 = in[4 * i + 3].
data;
2994 __m256 t0 = _mm256_shuffle_ps(u0, u1, 0x44);
2995 __m256 t1 = _mm256_shuffle_ps(u0, u1, 0xee);
2996 __m256 t2 = _mm256_shuffle_ps(u2, u3, 0x44);
2997 __m256 t3 = _mm256_shuffle_ps(u2, u3, 0xee);
2998 u0 = _mm256_shuffle_ps(t0, t2, 0x88);
2999 u1 = _mm256_shuffle_ps(t0, t2, 0xdd);
3000 u2 = _mm256_shuffle_ps(t1, t3, 0x88);
3001 u3 = _mm256_shuffle_ps(t1, t3, 0xdd);
3002 __m128 res0 = _mm256_extractf128_ps(u0, 0);
3003 __m128 res4 = _mm256_extractf128_ps(u0, 1);
3004 __m128 res1 = _mm256_extractf128_ps(u1, 0);
3005 __m128 res5 = _mm256_extractf128_ps(u1, 1);
3006 __m128 res2 = _mm256_extractf128_ps(u2, 0);
3007 __m128 res6 = _mm256_extractf128_ps(u2, 1);
3008 __m128 res3 = _mm256_extractf128_ps(u3, 0);
3009 __m128 res7 = _mm256_extractf128_ps(u3, 1);
3016 res0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), res0);
3017 _mm_storeu_ps(out + 4 * i + offsets[0], res0);
3018 res1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), res1);
3019 _mm_storeu_ps(out + 4 * i + offsets[1], res1);
3020 res2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), res2);
3021 _mm_storeu_ps(out + 4 * i + offsets[2], res2);
3022 res3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), res3);
3023 _mm_storeu_ps(out + 4 * i + offsets[3], res3);
3024 res4 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[4]), res4);
3025 _mm_storeu_ps(out + 4 * i + offsets[4], res4);
3026 res5 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[5]), res5);
3027 _mm_storeu_ps(out + 4 * i + offsets[5], res5);
3028 res6 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[6]), res6);
3029 _mm_storeu_ps(out + 4 * i + offsets[6], res6);
3030 res7 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[7]), res7);
3031 _mm_storeu_ps(out + 4 * i + offsets[7], res7);
3035 _mm_storeu_ps(out + 4 * i + offsets[0], res0);
3036 _mm_storeu_ps(out + 4 * i + offsets[1], res1);
3037 _mm_storeu_ps(out + 4 * i + offsets[2], res2);
3038 _mm_storeu_ps(out + 4 * i + offsets[3], res3);
3039 _mm_storeu_ps(out + 4 * i + offsets[4], res4);
3040 _mm_storeu_ps(out + 4 * i + offsets[5], res5);
3041 _mm_storeu_ps(out + 4 * i + offsets[6], res6);
3042 _mm_storeu_ps(out + 4 * i + offsets[7], res7);
3048 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3049 for (
unsigned int v = 0; v < 8; ++v)
3050 out[offsets[v] + i] += in[i][v];
3052 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3053 for (
unsigned int v = 0; v < 8; ++v)
3054 out[offsets[v] + i] = in[i][v];
3065 const unsigned int n_entries,
3067 std::array<float *, 8> & out)
3071 const unsigned int n_chunks = n_entries / 4;
3072 for (
unsigned int i = 0; i < n_chunks; ++i)
3074 __m256 u0 = in[4 * i + 0].
data;
3075 __m256 u1 = in[4 * i + 1].
data;
3076 __m256 u2 = in[4 * i + 2].
data;
3077 __m256 u3 = in[4 * i + 3].
data;
3078 __m256 t0 = _mm256_shuffle_ps(u0, u1, 0x44);
3079 __m256 t1 = _mm256_shuffle_ps(u0, u1, 0xee);
3080 __m256 t2 = _mm256_shuffle_ps(u2, u3, 0x44);
3081 __m256 t3 = _mm256_shuffle_ps(u2, u3, 0xee);
3082 u0 = _mm256_shuffle_ps(t0, t2, 0x88);
3083 u1 = _mm256_shuffle_ps(t0, t2, 0xdd);
3084 u2 = _mm256_shuffle_ps(t1, t3, 0x88);
3085 u3 = _mm256_shuffle_ps(t1, t3, 0xdd);
3086 __m128 res0 = _mm256_extractf128_ps(u0, 0);
3087 __m128 res4 = _mm256_extractf128_ps(u0, 1);
3088 __m128 res1 = _mm256_extractf128_ps(u1, 0);
3089 __m128 res5 = _mm256_extractf128_ps(u1, 1);
3090 __m128 res2 = _mm256_extractf128_ps(u2, 0);
3091 __m128 res6 = _mm256_extractf128_ps(u2, 1);
3092 __m128 res3 = _mm256_extractf128_ps(u3, 0);
3093 __m128 res7 = _mm256_extractf128_ps(u3, 1);
3097 res0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), res0);
3098 _mm_storeu_ps(out[0] + 4 * i, res0);
3099 res1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), res1);
3100 _mm_storeu_ps(out[1] + 4 * i, res1);
3101 res2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), res2);
3102 _mm_storeu_ps(out[2] + 4 * i, res2);
3103 res3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), res3);
3104 _mm_storeu_ps(out[3] + 4 * i, res3);
3105 res4 = _mm_add_ps(_mm_loadu_ps(out[4] + 4 * i), res4);
3106 _mm_storeu_ps(out[4] + 4 * i, res4);
3107 res5 = _mm_add_ps(_mm_loadu_ps(out[5] + 4 * i), res5);
3108 _mm_storeu_ps(out[5] + 4 * i, res5);
3109 res6 = _mm_add_ps(_mm_loadu_ps(out[6] + 4 * i), res6);
3110 _mm_storeu_ps(out[6] + 4 * i, res6);
3111 res7 = _mm_add_ps(_mm_loadu_ps(out[7] + 4 * i), res7);
3112 _mm_storeu_ps(out[7] + 4 * i, res7);
3116 _mm_storeu_ps(out[0] + 4 * i, res0);
3117 _mm_storeu_ps(out[1] + 4 * i, res1);
3118 _mm_storeu_ps(out[2] + 4 * i, res2);
3119 _mm_storeu_ps(out[3] + 4 * i, res3);
3120 _mm_storeu_ps(out[4] + 4 * i, res4);
3121 _mm_storeu_ps(out[5] + 4 * i, res5);
3122 _mm_storeu_ps(out[6] + 4 * i, res6);
3123 _mm_storeu_ps(out[7] + 4 * i, res7);
3128 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3129 for (
unsigned int v = 0; v < 8; ++v)
3130 out[v][i] += in[i][v];
3132 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3133 for (
unsigned int v = 0; v < 8; ++v)
3134 out[v][i] = in[i][v];
3142# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && defined(__AVX512F__)
3174 template <
typename U>
3186 data = _mm512_set1_pd(x);
3197 operator=(
const double scalar) && =
delete;
3207 return *(
reinterpret_cast<double *
>(&
data) + comp);
3218 return *(
reinterpret_cast<const double *
>(&
data) + comp);
3233# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3248# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3262# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3277# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3292 load(
const double *ptr)
3294 data = _mm512_loadu_pd(ptr);
3299 load(
const float *ptr)
3301 data = _mm512_cvtps_pd(_mm256_loadu_ps(ptr));
3312 store(
double *ptr)
const
3314 _mm512_storeu_pd(ptr,
data);
3319 store(
float *ptr)
const
3321 _mm256_storeu_ps(ptr, _mm512_cvtpd_ps(
data));
3332 Assert(
reinterpret_cast<std::size_t
>(ptr) % 64 == 0,
3334 _mm512_stream_pd(ptr,
data);
3351 gather(
const double *base_ptr,
const unsigned int *offsets)
3356 const __m256 index_val =
3357 _mm256_loadu_ps(
reinterpret_cast<const float *
>(offsets));
3358 const __m256i
index = *
reinterpret_cast<const __m256i *
>(&index_val);
3364 __mmask8
mask = 0xFF;
3366 data = _mm512_mask_i32gather_pd(zero, mask, index, base_ptr, 8);
3383 scatter(
const unsigned int *offsets,
double *base_ptr)
const
3385 for (
unsigned int i = 0; i < 8; ++i)
3386 for (
unsigned int j = i + 1; j < 8; ++j)
3387 Assert(offsets[i] != offsets[j],
3388 ExcMessage(
"Result of scatter undefined if two offset elements"
3389 " point to the same position"));
3394 const __m256 index_val =
3395 _mm256_loadu_ps(
reinterpret_cast<const float *
>(offsets));
3396 const __m256i
index = *
reinterpret_cast<const __m256i *
>(&index_val);
3397 _mm512_i32scatter_pd(base_ptr, index,
data, 8);
3408 t1.
data = _mm256_add_pd(this->get_lower(), this->get_upper());
3427 return _mm512_castpd512_pd256(
data);
3437 return _mm512_extractf64x4_pd(
data, 1);
3466 __m512d
mask = _mm512_set1_pd(-0.);
3468 res.
data =
reinterpret_cast<__m512d
>(
3469 _mm512_andnot_epi64(
reinterpret_cast<__m512i
>(mask),
3470 reinterpret_cast<__m512i
>(
data)));
3501 template <
typename Number2, std::
size_t w
idth2>
3504 template <
typename Number2, std::
size_t w
idth2>
3507 template <
typename Number2, std::
size_t w
idth2>
3511 template <
typename Number2, std::
size_t w
idth2>
3526 const unsigned int * offsets,
3534 const unsigned int n_chunks = n_entries / 4;
3535 for (
unsigned int i = 0; i < n_chunks; ++i)
3537 __m512d t0, t1, t2, t3 = {};
3539 t0 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[0] + 4 * i), 0);
3540 t0 = _mm512_insertf64x4(t0, _mm256_loadu_pd(in + offsets[2] + 4 * i), 1);
3541 t1 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[1] + 4 * i), 0);
3542 t1 = _mm512_insertf64x4(t1, _mm256_loadu_pd(in + offsets[3] + 4 * i), 1);
3543 t2 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[4] + 4 * i), 0);
3544 t2 = _mm512_insertf64x4(t2, _mm256_loadu_pd(in + offsets[6] + 4 * i), 1);
3545 t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[5] + 4 * i), 0);
3546 t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[7] + 4 * i), 1);
3548 __m512d
v0 = _mm512_shuffle_f64x2(t0, t2, 0x88);
3549 __m512d
v1 = _mm512_shuffle_f64x2(t0, t2, 0xdd);
3550 __m512d v2 = _mm512_shuffle_f64x2(t1, t3, 0x88);
3551 __m512d v3 = _mm512_shuffle_f64x2(t1, t3, 0xdd);
3552 out[4 * i + 0].
data = _mm512_unpacklo_pd(
v0, v2);
3553 out[4 * i + 1].
data = _mm512_unpackhi_pd(
v0, v2);
3554 out[4 * i + 2].
data = _mm512_unpacklo_pd(
v1, v3);
3555 out[4 * i + 3].
data = _mm512_unpackhi_pd(
v1, v3);
3558 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3559 out[i].
gather(in + i, offsets);
3570 const std::array<double *, 8> &in,
3573 const unsigned int n_chunks = n_entries / 4;
3574 for (
unsigned int i = 0; i < n_chunks; ++i)
3576 __m512d t0, t1, t2, t3 = {};
3578 t0 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[0] + 4 * i), 0);
3579 t0 = _mm512_insertf64x4(t0, _mm256_loadu_pd(in[2] + 4 * i), 1);
3580 t1 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[1] + 4 * i), 0);
3581 t1 = _mm512_insertf64x4(t1, _mm256_loadu_pd(in[3] + 4 * i), 1);
3582 t2 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[4] + 4 * i), 0);
3583 t2 = _mm512_insertf64x4(t2, _mm256_loadu_pd(in[6] + 4 * i), 1);
3584 t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[5] + 4 * i), 0);
3585 t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[7] + 4 * i), 1);
3587 __m512d
v0 = _mm512_shuffle_f64x2(t0, t2, 0x88);
3588 __m512d
v1 = _mm512_shuffle_f64x2(t0, t2, 0xdd);
3589 __m512d v2 = _mm512_shuffle_f64x2(t1, t3, 0x88);
3590 __m512d v3 = _mm512_shuffle_f64x2(t1, t3, 0xdd);
3591 out[4 * i + 0].
data = _mm512_unpacklo_pd(
v0, v2);
3592 out[4 * i + 1].
data = _mm512_unpackhi_pd(
v0, v2);
3593 out[4 * i + 2].
data = _mm512_unpacklo_pd(
v1, v3);
3594 out[4 * i + 3].
data = _mm512_unpackhi_pd(
v1, v3);
3597 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3609 const unsigned int n_entries,
3611 const unsigned int * offsets,
3616 const unsigned int n_chunks = n_entries / 4;
3617 __m512i mask1 = _mm512_set_epi64(0xd, 0xc, 0x5, 0x4, 0x9, 0x8, 0x1, 0x0);
3618 __m512i mask2 = _mm512_set_epi64(0xf, 0xe, 0x7, 0x6, 0xb, 0xa, 0x3, 0x2);
3619 for (
unsigned int i = 0; i < n_chunks; ++i)
3621 __m512d t0 = _mm512_unpacklo_pd(in[i * 4].data, in[i * 4 + 1].data);
3622 __m512d t1 = _mm512_unpackhi_pd(in[i * 4].data, in[i * 4 + 1].data);
3623 __m512d t2 = _mm512_unpacklo_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
3624 __m512d t3 = _mm512_unpackhi_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
3625 __m512d
v0 = _mm512_permutex2var_pd(t0, mask1, t2);
3626 __m512d
v1 = _mm512_permutex2var_pd(t0, mask2, t2);
3627 __m512d v2 = _mm512_permutex2var_pd(t1, mask1, t3);
3628 __m512d v3 = _mm512_permutex2var_pd(t1, mask2, t3);
3629 __m256d res0 = _mm512_extractf64x4_pd(
v0, 0);
3630 __m256d res4 = _mm512_extractf64x4_pd(
v0, 1);
3631 __m256d res1 = _mm512_extractf64x4_pd(v2, 0);
3632 __m256d res5 = _mm512_extractf64x4_pd(v2, 1);
3633 __m256d res2 = _mm512_extractf64x4_pd(
v1, 0);
3634 __m256d res6 = _mm512_extractf64x4_pd(
v1, 1);
3635 __m256d res3 = _mm512_extractf64x4_pd(v3, 0);
3636 __m256d res7 = _mm512_extractf64x4_pd(v3, 1);
3643 res0 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[0]), res0);
3644 _mm256_storeu_pd(out + 4 * i + offsets[0], res0);
3645 res1 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[1]), res1);
3646 _mm256_storeu_pd(out + 4 * i + offsets[1], res1);
3647 res2 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[2]), res2);
3648 _mm256_storeu_pd(out + 4 * i + offsets[2], res2);
3649 res3 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[3]), res3);
3650 _mm256_storeu_pd(out + 4 * i + offsets[3], res3);
3651 res4 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[4]), res4);
3652 _mm256_storeu_pd(out + 4 * i + offsets[4], res4);
3653 res5 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[5]), res5);
3654 _mm256_storeu_pd(out + 4 * i + offsets[5], res5);
3655 res6 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[6]), res6);
3656 _mm256_storeu_pd(out + 4 * i + offsets[6], res6);
3657 res7 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[7]), res7);
3658 _mm256_storeu_pd(out + 4 * i + offsets[7], res7);
3662 _mm256_storeu_pd(out + 4 * i + offsets[0], res0);
3663 _mm256_storeu_pd(out + 4 * i + offsets[1], res1);
3664 _mm256_storeu_pd(out + 4 * i + offsets[2], res2);
3665 _mm256_storeu_pd(out + 4 * i + offsets[3], res3);
3666 _mm256_storeu_pd(out + 4 * i + offsets[4], res4);
3667 _mm256_storeu_pd(out + 4 * i + offsets[5], res5);
3668 _mm256_storeu_pd(out + 4 * i + offsets[6], res6);
3669 _mm256_storeu_pd(out + 4 * i + offsets[7], res7);
3675 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3676 for (
unsigned int v = 0; v < 8; ++v)
3677 out[offsets[v] + i] += in[i][v];
3679 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3680 for (
unsigned int v = 0; v < 8; ++v)
3681 out[offsets[v] + i] = in[i][v];
3692 const unsigned int n_entries,
3694 std::array<double *, 8> & out)
3698 const unsigned int n_chunks = n_entries / 4;
3699 __m512i mask1 = _mm512_set_epi64(0xd, 0xc, 0x5, 0x4, 0x9, 0x8, 0x1, 0x0);
3700 __m512i mask2 = _mm512_set_epi64(0xf, 0xe, 0x7, 0x6, 0xb, 0xa, 0x3, 0x2);
3701 for (
unsigned int i = 0; i < n_chunks; ++i)
3703 __m512d t0 = _mm512_unpacklo_pd(in[i * 4].data, in[i * 4 + 1].data);
3704 __m512d t1 = _mm512_unpackhi_pd(in[i * 4].data, in[i * 4 + 1].data);
3705 __m512d t2 = _mm512_unpacklo_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
3706 __m512d t3 = _mm512_unpackhi_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
3707 __m512d
v0 = _mm512_permutex2var_pd(t0, mask1, t2);
3708 __m512d
v1 = _mm512_permutex2var_pd(t0, mask2, t2);
3709 __m512d v2 = _mm512_permutex2var_pd(t1, mask1, t3);
3710 __m512d v3 = _mm512_permutex2var_pd(t1, mask2, t3);
3711 __m256d res0 = _mm512_extractf64x4_pd(
v0, 0);
3712 __m256d res4 = _mm512_extractf64x4_pd(
v0, 1);
3713 __m256d res1 = _mm512_extractf64x4_pd(v2, 0);
3714 __m256d res5 = _mm512_extractf64x4_pd(v2, 1);
3715 __m256d res2 = _mm512_extractf64x4_pd(
v1, 0);
3716 __m256d res6 = _mm512_extractf64x4_pd(
v1, 1);
3717 __m256d res3 = _mm512_extractf64x4_pd(v3, 0);
3718 __m256d res7 = _mm512_extractf64x4_pd(v3, 1);
3722 res0 = _mm256_add_pd(_mm256_loadu_pd(out[0] + 4 * i), res0);
3723 _mm256_storeu_pd(out[0] + 4 * i, res0);
3724 res1 = _mm256_add_pd(_mm256_loadu_pd(out[1] + 4 * i), res1);
3725 _mm256_storeu_pd(out[1] + 4 * i, res1);
3726 res2 = _mm256_add_pd(_mm256_loadu_pd(out[2] + 4 * i), res2);
3727 _mm256_storeu_pd(out[2] + 4 * i, res2);
3728 res3 = _mm256_add_pd(_mm256_loadu_pd(out[3] + 4 * i), res3);
3729 _mm256_storeu_pd(out[3] + 4 * i, res3);
3730 res4 = _mm256_add_pd(_mm256_loadu_pd(out[4] + 4 * i), res4);
3731 _mm256_storeu_pd(out[4] + 4 * i, res4);
3732 res5 = _mm256_add_pd(_mm256_loadu_pd(out[5] + 4 * i), res5);
3733 _mm256_storeu_pd(out[5] + 4 * i, res5);
3734 res6 = _mm256_add_pd(_mm256_loadu_pd(out[6] + 4 * i), res6);
3735 _mm256_storeu_pd(out[6] + 4 * i, res6);
3736 res7 = _mm256_add_pd(_mm256_loadu_pd(out[7] + 4 * i), res7);
3737 _mm256_storeu_pd(out[7] + 4 * i, res7);
3741 _mm256_storeu_pd(out[0] + 4 * i, res0);
3742 _mm256_storeu_pd(out[1] + 4 * i, res1);
3743 _mm256_storeu_pd(out[2] + 4 * i, res2);
3744 _mm256_storeu_pd(out[3] + 4 * i, res3);
3745 _mm256_storeu_pd(out[4] + 4 * i, res4);
3746 _mm256_storeu_pd(out[5] + 4 * i, res5);
3747 _mm256_storeu_pd(out[6] + 4 * i, res6);
3748 _mm256_storeu_pd(out[7] + 4 * i, res7);
3753 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3754 for (
unsigned int v = 0; v < 8; ++v)
3755 out[v][i] += in[i][v];
3757 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3758 for (
unsigned int v = 0; v < 8; ++v)
3759 out[v][i] = in[i][v];
3794 template <
typename U>
3806 data = _mm512_set1_ps(x);
3816 operator=(
const float scalar) && =
delete;
3826 return *(
reinterpret_cast<float *
>(&
data) + comp);
3837 return *(
reinterpret_cast<const float *
>(&
data) + comp);
3852# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3867# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3881# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3896# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3911 load(
const float *ptr)
3913 data = _mm512_loadu_ps(ptr);
3924 store(
float *ptr)
const
3926 _mm512_storeu_ps(ptr,
data);
3937 Assert(
reinterpret_cast<std::size_t
>(ptr) % 64 == 0,
3939 _mm512_stream_ps(ptr,
data);
3956 gather(
const float *base_ptr,
const unsigned int *offsets)
3961 const __m512 index_val =
3962 _mm512_loadu_ps(
reinterpret_cast<const float *
>(offsets));
3963 const __m512i
index = *
reinterpret_cast<const __m512i *
>(&index_val);
3969 __mmask16
mask = 0xFFFF;
3971 data = _mm512_mask_i32gather_ps(zero, mask, index, base_ptr, 4);
3988 scatter(
const unsigned int *offsets,
float *base_ptr)
const
3990 for (
unsigned int i = 0; i < 16; ++i)
3991 for (
unsigned int j = i + 1; j < 16; ++j)
3992 Assert(offsets[i] != offsets[j],
3993 ExcMessage(
"Result of scatter undefined if two offset elements"
3994 " point to the same position"));
3999 const __m512 index_val =
4000 _mm512_loadu_ps(
reinterpret_cast<const float *
>(offsets));
4001 const __m512i
index = *
reinterpret_cast<const __m512i *
>(&index_val);
4002 _mm512_i32scatter_ps(base_ptr, index,
data, 4);
4013 t1.
data = _mm256_add_ps(this->get_lower(), this->get_upper());
4032 return _mm512_castps512_ps256(
data);
4042 return _mm256_castpd_ps(_mm512_extractf64x4_pd(_mm512_castps_pd(
data), 1));
4071 __m512
mask = _mm512_set1_ps(-0.f);
4073 res.
data =
reinterpret_cast<__m512
>(
4074 _mm512_andnot_epi32(
reinterpret_cast<__m512i
>(mask),
4075 reinterpret_cast<__m512i
>(
data)));
4106 template <
typename Number2, std::
size_t w
idth2>
4109 template <
typename Number2, std::
size_t w
idth2>
4112 template <
typename Number2, std::
size_t w
idth2>
4116 template <
typename Number2, std::
size_t w
idth2>
4131 const unsigned int * offsets,
4138 const unsigned int n_chunks = n_entries / 4;
4146 __m512 t0, t1, t2, t3;
4149 for (
unsigned int i = 0; i < n_chunks; ++i)
4151 t0 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[0] + 4 * i), 0);
4152 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[4] + 4 * i), 1);
4153 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[8] + 4 * i), 2);
4154 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[12] + 4 * i), 3);
4155 t1 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[1] + 4 * i), 0);
4156 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[5] + 4 * i), 1);
4157 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[9] + 4 * i), 2);
4158 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[13] + 4 * i), 3);
4159 t2 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[2] + 4 * i), 0);
4160 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[6] + 4 * i), 1);
4161 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[10] + 4 * i), 2);
4162 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[14] + 4 * i), 3);
4163 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[3] + 4 * i), 0);
4164 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[7] + 4 * i), 1);
4165 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[11] + 4 * i), 2);
4166 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[15] + 4 * i), 3);
4168 __m512
v0 = _mm512_shuffle_ps(t0, t1, 0x44);
4169 __m512
v1 = _mm512_shuffle_ps(t0, t1, 0xee);
4170 __m512 v2 = _mm512_shuffle_ps(t2, t3, 0x44);
4171 __m512 v3 = _mm512_shuffle_ps(t2, t3, 0xee);
4173 out[4 * i + 0].
data = _mm512_shuffle_ps(
v0, v2, 0x88);
4174 out[4 * i + 1].
data = _mm512_shuffle_ps(
v0, v2, 0xdd);
4175 out[4 * i + 2].
data = _mm512_shuffle_ps(
v1, v3, 0x88);
4176 out[4 * i + 3].
data = _mm512_shuffle_ps(
v1, v3, 0xdd);
4180 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4181 out[i].
gather(in + i, offsets);
4192 const std::array<float *, 16> &in,
4197 const unsigned int n_chunks = n_entries / 4;
4199 __m512 t0, t1, t2, t3;
4202 for (
unsigned int i = 0; i < n_chunks; ++i)
4204 t0 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[0] + 4 * i), 0);
4205 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[4] + 4 * i), 1);
4206 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[8] + 4 * i), 2);
4207 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[12] + 4 * i), 3);
4208 t1 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[1] + 4 * i), 0);
4209 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[5] + 4 * i), 1);
4210 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[9] + 4 * i), 2);
4211 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[13] + 4 * i), 3);
4212 t2 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[2] + 4 * i), 0);
4213 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[6] + 4 * i), 1);
4214 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[10] + 4 * i), 2);
4215 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[14] + 4 * i), 3);
4216 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[3] + 4 * i), 0);
4217 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[7] + 4 * i), 1);
4218 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[11] + 4 * i), 2);
4219 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[15] + 4 * i), 3);
4221 __m512
v0 = _mm512_shuffle_ps(t0, t1, 0x44);
4222 __m512
v1 = _mm512_shuffle_ps(t0, t1, 0xee);
4223 __m512 v2 = _mm512_shuffle_ps(t2, t3, 0x44);
4224 __m512 v3 = _mm512_shuffle_ps(t2, t3, 0xee);
4226 out[4 * i + 0].
data = _mm512_shuffle_ps(
v0, v2, 0x88);
4227 out[4 * i + 1].
data = _mm512_shuffle_ps(
v0, v2, 0xdd);
4228 out[4 * i + 2].
data = _mm512_shuffle_ps(
v1, v3, 0x88);
4229 out[4 * i + 3].
data = _mm512_shuffle_ps(
v1, v3, 0xdd);
4232 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4244 const unsigned int n_entries,
4246 const unsigned int * offsets,
4249 const unsigned int n_chunks = n_entries / 4;
4250 for (
unsigned int i = 0; i < n_chunks; ++i)
4252 __m512 t0 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0x44);
4253 __m512 t1 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0xee);
4255 _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0x44);
4257 _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0xee);
4258 __m512 u0 = _mm512_shuffle_ps(t0, t2, 0x88);
4259 __m512 u1 = _mm512_shuffle_ps(t0, t2, 0xdd);
4260 __m512 u2 = _mm512_shuffle_ps(t1, t3, 0x88);
4261 __m512 u3 = _mm512_shuffle_ps(t1, t3, 0xdd);
4263 __m128 res0 = _mm512_extractf32x4_ps(u0, 0);
4264 __m128 res4 = _mm512_extractf32x4_ps(u0, 1);
4265 __m128 res8 = _mm512_extractf32x4_ps(u0, 2);
4266 __m128 res12 = _mm512_extractf32x4_ps(u0, 3);
4267 __m128 res1 = _mm512_extractf32x4_ps(u1, 0);
4268 __m128 res5 = _mm512_extractf32x4_ps(u1, 1);
4269 __m128 res9 = _mm512_extractf32x4_ps(u1, 2);
4270 __m128 res13 = _mm512_extractf32x4_ps(u1, 3);
4271 __m128 res2 = _mm512_extractf32x4_ps(u2, 0);
4272 __m128 res6 = _mm512_extractf32x4_ps(u2, 1);
4273 __m128 res10 = _mm512_extractf32x4_ps(u2, 2);
4274 __m128 res14 = _mm512_extractf32x4_ps(u2, 3);
4275 __m128 res3 = _mm512_extractf32x4_ps(u3, 0);
4276 __m128 res7 = _mm512_extractf32x4_ps(u3, 1);
4277 __m128 res11 = _mm512_extractf32x4_ps(u3, 2);
4278 __m128 res15 = _mm512_extractf32x4_ps(u3, 3);
4285 res0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), res0);
4286 _mm_storeu_ps(out + 4 * i + offsets[0], res0);
4287 res1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), res1);
4288 _mm_storeu_ps(out + 4 * i + offsets[1], res1);
4289 res2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), res2);
4290 _mm_storeu_ps(out + 4 * i + offsets[2], res2);
4291 res3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), res3);
4292 _mm_storeu_ps(out + 4 * i + offsets[3], res3);
4293 res4 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[4]), res4);
4294 _mm_storeu_ps(out + 4 * i + offsets[4], res4);
4295 res5 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[5]), res5);
4296 _mm_storeu_ps(out + 4 * i + offsets[5], res5);
4297 res6 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[6]), res6);
4298 _mm_storeu_ps(out + 4 * i + offsets[6], res6);
4299 res7 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[7]), res7);
4300 _mm_storeu_ps(out + 4 * i + offsets[7], res7);
4301 res8 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[8]), res8);
4302 _mm_storeu_ps(out + 4 * i + offsets[8], res8);
4303 res9 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[9]), res9);
4304 _mm_storeu_ps(out + 4 * i + offsets[9], res9);
4305 res10 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[10]), res10);
4306 _mm_storeu_ps(out + 4 * i + offsets[10], res10);
4307 res11 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[11]), res11);
4308 _mm_storeu_ps(out + 4 * i + offsets[11], res11);
4309 res12 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[12]), res12);
4310 _mm_storeu_ps(out + 4 * i + offsets[12], res12);
4311 res13 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[13]), res13);
4312 _mm_storeu_ps(out + 4 * i + offsets[13], res13);
4313 res14 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[14]), res14);
4314 _mm_storeu_ps(out + 4 * i + offsets[14], res14);
4315 res15 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[15]), res15);
4316 _mm_storeu_ps(out + 4 * i + offsets[15], res15);
4320 _mm_storeu_ps(out + 4 * i + offsets[0], res0);
4321 _mm_storeu_ps(out + 4 * i + offsets[1], res1);
4322 _mm_storeu_ps(out + 4 * i + offsets[2], res2);
4323 _mm_storeu_ps(out + 4 * i + offsets[3], res3);
4324 _mm_storeu_ps(out + 4 * i + offsets[4], res4);
4325 _mm_storeu_ps(out + 4 * i + offsets[5], res5);
4326 _mm_storeu_ps(out + 4 * i + offsets[6], res6);
4327 _mm_storeu_ps(out + 4 * i + offsets[7], res7);
4328 _mm_storeu_ps(out + 4 * i + offsets[8], res8);
4329 _mm_storeu_ps(out + 4 * i + offsets[9], res9);
4330 _mm_storeu_ps(out + 4 * i + offsets[10], res10);
4331 _mm_storeu_ps(out + 4 * i + offsets[11], res11);
4332 _mm_storeu_ps(out + 4 * i + offsets[12], res12);
4333 _mm_storeu_ps(out + 4 * i + offsets[13], res13);
4334 _mm_storeu_ps(out + 4 * i + offsets[14], res14);
4335 _mm_storeu_ps(out + 4 * i + offsets[15], res15);
4341 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4342 for (
unsigned int v = 0; v < 16; ++v)
4343 out[offsets[v] + i] += in[i][v];
4345 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4346 for (
unsigned int v = 0; v < 16; ++v)
4347 out[offsets[v] + i] = in[i][v];
4358 const unsigned int n_entries,
4360 std::array<float *, 16> & out)
4364 const unsigned int n_chunks = n_entries / 4;
4365 for (
unsigned int i = 0; i < n_chunks; ++i)
4367 __m512 t0 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0x44);
4368 __m512 t1 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0xee);
4370 _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0x44);
4372 _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0xee);
4373 __m512 u0 = _mm512_shuffle_ps(t0, t2, 0x88);
4374 __m512 u1 = _mm512_shuffle_ps(t0, t2, 0xdd);
4375 __m512 u2 = _mm512_shuffle_ps(t1, t3, 0x88);
4376 __m512 u3 = _mm512_shuffle_ps(t1, t3, 0xdd);
4378 __m128 res0 = _mm512_extractf32x4_ps(u0, 0);
4379 __m128 res4 = _mm512_extractf32x4_ps(u0, 1);
4380 __m128 res8 = _mm512_extractf32x4_ps(u0, 2);
4381 __m128 res12 = _mm512_extractf32x4_ps(u0, 3);
4382 __m128 res1 = _mm512_extractf32x4_ps(u1, 0);
4383 __m128 res5 = _mm512_extractf32x4_ps(u1, 1);
4384 __m128 res9 = _mm512_extractf32x4_ps(u1, 2);
4385 __m128 res13 = _mm512_extractf32x4_ps(u1, 3);
4386 __m128 res2 = _mm512_extractf32x4_ps(u2, 0);
4387 __m128 res6 = _mm512_extractf32x4_ps(u2, 1);
4388 __m128 res10 = _mm512_extractf32x4_ps(u2, 2);
4389 __m128 res14 = _mm512_extractf32x4_ps(u2, 3);
4390 __m128 res3 = _mm512_extractf32x4_ps(u3, 0);
4391 __m128 res7 = _mm512_extractf32x4_ps(u3, 1);
4392 __m128 res11 = _mm512_extractf32x4_ps(u3, 2);
4393 __m128 res15 = _mm512_extractf32x4_ps(u3, 3);
4397 res0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), res0);
4398 _mm_storeu_ps(out[0] + 4 * i, res0);
4399 res1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), res1);
4400 _mm_storeu_ps(out[1] + 4 * i, res1);
4401 res2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), res2);
4402 _mm_storeu_ps(out[2] + 4 * i, res2);
4403 res3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), res3);
4404 _mm_storeu_ps(out[3] + 4 * i, res3);
4405 res4 = _mm_add_ps(_mm_loadu_ps(out[4] + 4 * i), res4);
4406 _mm_storeu_ps(out[4] + 4 * i, res4);
4407 res5 = _mm_add_ps(_mm_loadu_ps(out[5] + 4 * i), res5);
4408 _mm_storeu_ps(out[5] + 4 * i, res5);
4409 res6 = _mm_add_ps(_mm_loadu_ps(out[6] + 4 * i), res6);
4410 _mm_storeu_ps(out[6] + 4 * i, res6);
4411 res7 = _mm_add_ps(_mm_loadu_ps(out[7] + 4 * i), res7);
4412 _mm_storeu_ps(out[7] + 4 * i, res7);
4413 res8 = _mm_add_ps(_mm_loadu_ps(out[8] + 4 * i), res8);
4414 _mm_storeu_ps(out[8] + 4 * i, res8);
4415 res9 = _mm_add_ps(_mm_loadu_ps(out[9] + 4 * i), res9);
4416 _mm_storeu_ps(out[9] + 4 * i, res9);
4417 res10 = _mm_add_ps(_mm_loadu_ps(out[10] + 4 * i), res10);
4418 _mm_storeu_ps(out[10] + 4 * i, res10);
4419 res11 = _mm_add_ps(_mm_loadu_ps(out[11] + 4 * i), res11);
4420 _mm_storeu_ps(out[11] + 4 * i, res11);
4421 res12 = _mm_add_ps(_mm_loadu_ps(out[12] + 4 * i), res12);
4422 _mm_storeu_ps(out[12] + 4 * i, res12);
4423 res13 = _mm_add_ps(_mm_loadu_ps(out[13] + 4 * i), res13);
4424 _mm_storeu_ps(out[13] + 4 * i, res13);
4425 res14 = _mm_add_ps(_mm_loadu_ps(out[14] + 4 * i), res14);
4426 _mm_storeu_ps(out[14] + 4 * i, res14);
4427 res15 = _mm_add_ps(_mm_loadu_ps(out[15] + 4 * i), res15);
4428 _mm_storeu_ps(out[15] + 4 * i, res15);
4432 _mm_storeu_ps(out[0] + 4 * i, res0);
4433 _mm_storeu_ps(out[1] + 4 * i, res1);
4434 _mm_storeu_ps(out[2] + 4 * i, res2);
4435 _mm_storeu_ps(out[3] + 4 * i, res3);
4436 _mm_storeu_ps(out[4] + 4 * i, res4);
4437 _mm_storeu_ps(out[5] + 4 * i, res5);
4438 _mm_storeu_ps(out[6] + 4 * i, res6);
4439 _mm_storeu_ps(out[7] + 4 * i, res7);
4440 _mm_storeu_ps(out[8] + 4 * i, res8);
4441 _mm_storeu_ps(out[9] + 4 * i, res9);
4442 _mm_storeu_ps(out[10] + 4 * i, res10);
4443 _mm_storeu_ps(out[11] + 4 * i, res11);
4444 _mm_storeu_ps(out[12] + 4 * i, res12);
4445 _mm_storeu_ps(out[13] + 4 * i, res13);
4446 _mm_storeu_ps(out[14] + 4 * i, res14);
4447 _mm_storeu_ps(out[15] + 4 * i, res15);
4452 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4453 for (
unsigned int v = 0; v < 16; ++v)
4454 out[v][i] += in[i][v];
4456 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4457 for (
unsigned int v = 0; v < 16; ++v)
4458 out[v][i] = in[i][v];
4463# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__ALTIVEC__) && \
4493 template <
typename U>
4505 data = vec_splats(x);
4520 operator=(
const double scalar) && =
delete;
4530 return *(
reinterpret_cast<double *
>(&
data) + comp);
4541 return *(
reinterpret_cast<const double *
>(&
data) + comp);
4594 load(
const double *ptr)
4596 data = vec_vsx_ld(0, ptr);
4605 store(
double *ptr)
const
4607 vec_vsx_st(
data, 0, ptr);
4625 gather(
const double *base_ptr,
const unsigned int *offsets)
4627 for (
unsigned int i = 0; i < 2; ++i)
4628 *(
reinterpret_cast<double *
>(&
data) + i) = base_ptr[offsets[i]];
4636 scatter(
const unsigned int *offsets,
double *base_ptr)
const
4638 for (
unsigned int i = 0; i < 2; ++i)
4639 base_ptr[offsets[i]] = *(
reinterpret_cast<const double *
>(&
data) + i);
4647 __vector
double data;
4703 template <
typename Number2, std::
size_t w
idth2>
4706 template <
typename Number2, std::
size_t w
idth2>
4709 template <
typename Number2, std::
size_t w
idth2>
4713 template <
typename Number2, std::
size_t w
idth2>
4748 template <
typename U>
4760 data = vec_splats(x);
4775 operator=(
const float scalar) && =
delete;
4785 return *(
reinterpret_cast<float *
>(&
data) + comp);
4796 return *(
reinterpret_cast<const float *
>(&
data) + comp);
4849 load(
const float *ptr)
4851 data = vec_vsx_ld(0, ptr);
4860 store(
float *ptr)
const
4862 vec_vsx_st(
data, 0, ptr);
4880 gather(
const float *base_ptr,
const unsigned int *offsets)
4882 for (
unsigned int i = 0; i < 4; ++i)
4883 *(
reinterpret_cast<float *
>(&
data) + i) = base_ptr[offsets[i]];
4891 scatter(
const unsigned int *offsets,
float *base_ptr)
const
4893 for (
unsigned int i = 0; i < 4; ++i)
4894 base_ptr[offsets[i]] = *(
reinterpret_cast<const float *
>(&
data) + i);
4902 __vector
float data;
4958 template <
typename Number2, std::
size_t w
idth2>
4961 template <
typename Number2, std::
size_t w
idth2>
4964 template <
typename Number2, std::
size_t w
idth2>
4968 template <
typename Number2, std::
size_t w
idth2>
4992template <
typename Number, std::
size_t w
idth>
4997 for (
unsigned int i = 0; i < VectorizedArray<Number, width>::size(); ++i)
4998 if (lhs[i] != rhs[i])
5010template <
typename Number, std::
size_t w
idth>
5024template <
typename Number, std::
size_t w
idth>
5038template <
typename Number, std::
size_t w
idth>
5052template <
typename Number, std::
size_t w
idth>
5067template <
typename Number, std::
size_t w
idth>
5083template <std::
size_t w
idth>
5097template <
typename Number, std::
size_t w
idth>
5112template <std::
size_t w
idth>
5125template <
typename Number, std::
size_t w
idth>
5141template <std::
size_t w
idth>
5155template <
typename Number, std::
size_t w
idth>
5171template <std::
size_t w
idth>
5185template <
typename Number, std::
size_t w
idth>
5201template <std::
size_t w
idth>
5215template <
typename Number, std::
size_t w
idth>
5230template <std::
size_t w
idth>
5243template <
typename Number, std::
size_t w
idth>
5259template <std::
size_t w
idth>
5273template <
typename Number, std::
size_t w
idth>
5289template <std::
size_t w
idth>
5302template <
typename Number, std::
size_t w
idth>
5314template <
typename Number, std::
size_t w
idth>
5328template <
typename Number, std::
size_t w
idth>
5329inline std::ostream &
5333 for (
unsigned int i = 0; i < n - 1; ++i)
5356#if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
5437template <SIMDComparison predicate,
typename Number>
5440 const Number &right,
5441 const Number &true_value,
5442 const Number &false_value)
5448 mask = (left == right);
5451 mask = (left != right);
5454 mask = (left < right);
5457 mask = (left <= right);
5460 mask = (left > right);
5463 mask = (left >= right);
5467 return mask ? true_value : false_value;
5475template <SIMDComparison predicate,
typename Number>
5483 result.
data = compare_and_apply_mask<predicate, Number>(left.
data,
5493# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && defined(__AVX512F__)
5495template <SIMDComparison predicate>
5502 const __mmask16
mask =
5503 _mm512_cmp_ps_mask(left.
data, right.
data,
static_cast<int>(predicate));
5505 result.
data = _mm512_mask_mov_ps(false_values.
data, mask, true_values.
data);
5511template <SIMDComparison predicate>
5518 const __mmask16
mask =
5519 _mm512_cmp_pd_mask(left.
data, right.
data,
static_cast<int>(predicate));
5521 result.
data = _mm512_mask_mov_pd(false_values.
data, mask, true_values.
data);
5527# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
5529template <SIMDComparison predicate>
5537 _mm256_cmp_ps(left.
data, right.
data,
static_cast<int>(predicate));
5540 result.
data = _mm256_blendv_ps(false_values.
data, true_values.
data, mask);
5545template <SIMDComparison predicate>
5553 _mm256_cmp_pd(left.
data, right.
data,
static_cast<int>(predicate));
5556 result.
data = _mm256_blendv_pd(false_values.
data, true_values.
data, mask);
5562# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__SSE2__)
5564template <SIMDComparison predicate>
5595 result.
data = _mm_or_ps(_mm_and_ps(mask, true_values.
data),
5596 _mm_andnot_ps(mask, false_values.
data));
5602template <SIMDComparison predicate>
5633 result.
data = _mm_or_pd(_mm_and_pd(mask, true_values.
data),
5634 _mm_andnot_pd(mask, false_values.
data));
5645 template <
typename T>
5656 static constexpr std::size_t
5673 static constexpr std::size_t
5727 template <
typename T, std::
size_t w
idth_>
5738 static constexpr std::size_t
5756 static constexpr std::size_t
5829 template <
typename Number, std::
size_t w
idth>
5830 inline ::VectorizedArray<Number, width>
5831 sin(const ::VectorizedArray<Number, width> &x)
5839 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5843 out.
load(&values[0]);
5856 template <
typename Number, std::
size_t w
idth>
5857 inline ::VectorizedArray<Number, width>
5858 cos(const ::VectorizedArray<Number, width> &x)
5861 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5865 out.
load(&values[0]);
5878 template <
typename Number, std::
size_t w
idth>
5879 inline ::VectorizedArray<Number, width>
5880 tan(const ::VectorizedArray<Number, width> &x)
5883 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5887 out.
load(&values[0]);
5900 template <
typename Number, std::
size_t w
idth>
5901 inline ::VectorizedArray<Number, width>
5902 exp(const ::VectorizedArray<Number, width> &x)
5905 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5909 out.
load(&values[0]);
5922 template <
typename Number, std::
size_t w
idth>
5923 inline ::VectorizedArray<Number, width>
5924 log(const ::VectorizedArray<Number, width> &x)
5927 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5931 out.
load(&values[0]);
5944 template <
typename Number, std::
size_t w
idth>
5945 inline ::VectorizedArray<Number, width>
5946 sqrt(const ::VectorizedArray<Number, width> &x)
5948 return x.get_sqrt();
5960 template <
typename Number, std::
size_t w
idth>
5961 inline ::VectorizedArray<Number, width>
5962 pow(const ::VectorizedArray<Number, width> &x,
const Number p)
5965 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5969 out.
load(&values[0]);
5983 template <
typename Number, std::
size_t w
idth>
5984 inline ::VectorizedArray<Number, width>
5985 pow(const ::VectorizedArray<Number, width> &x,
5986 const ::VectorizedArray<Number, width> &p)
5989 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5993 out.
load(&values[0]);
6006 template <
typename Number, std::
size_t w
idth>
6007 inline ::VectorizedArray<Number, width>
6008 abs(const ::VectorizedArray<Number, width> &x)
6022 template <
typename Number, std::
size_t w
idth>
6023 inline ::VectorizedArray<Number, width>
6024 max(const ::VectorizedArray<Number, width> &x,
6025 const ::VectorizedArray<Number, width> &y)
6027 return x.get_max(y);
6039 template <
typename Number, std::
size_t w
idth>
6040 inline ::VectorizedArray<Number, width>
6041 min(const ::VectorizedArray<Number, width> &x,
6042 const ::VectorizedArray<Number, width> &y)
6044 return x.get_min(y);
6055#ifdef DEAL_II_HAVE_CXX20
VectorizedArrayBase()=default
VectorizedArrayIterator< const T > begin() const
VectorizedArrayIterator< const T > end() const
static constexpr std::size_t size()
VectorizedArrayBase(const std::initializer_list< U > &list)
VectorizedArrayIterator< T > end()
VectorizedArrayIterator< T > begin()
VectorizedArrayIterator< T > & operator+=(const std::size_t offset)
VectorizedArrayIterator< T > & operator=(const VectorizedArrayIterator< T > &other)=default
VectorizedArrayIterator< T > & operator--()
VectorizedArrayIterator< T > & operator++()
std::enable_if_t<!std::is_same< U, const U >::value, typename T::value_type > & operator*()
std::ptrdiff_t operator-(const VectorizedArrayIterator< T > &other) const
bool operator==(const VectorizedArrayIterator< T > &other) const
VectorizedArrayIterator(T &data, const std::size_t lane)
const T::value_type & operator*() const
bool operator!=(const VectorizedArrayIterator< T > &other) const
VectorizedArrayIterator< T > operator+(const std::size_t &offset) const
VectorizedArray< Number, width > operator-(const VectorizedArray< Number, width > &u)
VectorizedArray & operator=(const Number scalar) &
VectorizedArray< float, width > operator+(const VectorizedArray< float, width > &v, const double u)
VectorizedArray & operator/=(const VectorizedArray &vec)
void gather(const Number *base_ptr, const unsigned int *offsets)
void vectorized_load_and_transpose(const unsigned int n_entries, const Number *in, const unsigned int *offsets, VectorizedArray< Number, width > *out)
VectorizedArray< Number, width > operator+(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArrayType make_vectorized_array(const typename VectorizedArrayType::value_type &u)
VectorizedArray< Number, width > operator/(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArray get_abs() const
VectorizedArray< float, width > operator/(const VectorizedArray< float, width > &v, const double u)
VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &x, const ::VectorizedArray< Number, width > &y)
VectorizedArray< Number, width > log(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > operator*(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArray< Number, width > exp(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > operator-(const VectorizedArray< Number, width > &v, const Number &u)
Number & operator[](const unsigned int comp)
VectorizedArray< float, width > operator-(const double u, const VectorizedArray< float, width > &v)
VectorizedArray< Number, width > operator+(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > operator+(const VectorizedArray< Number, width > &u)
VectorizedArray()=default
bool operator==(const VectorizedArray< Number, width > &lhs, const VectorizedArray< Number, width > &rhs)
VectorizedArray< Number, width > tan(const ::VectorizedArray< Number, width > &x)
VectorizedArray(const Number scalar)
VectorizedArray< Number, width > operator-(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray< float, width > operator*(const VectorizedArray< float, width > &v, const double u)
VectorizedArray & operator*=(const VectorizedArray &vec)
VectorizedArray get_max(const VectorizedArray &other) const
const Number & operator[](const unsigned int comp) const
VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &x, const ::VectorizedArray< Number, width > &y)
VectorizedArray get_min(const VectorizedArray &other) const
VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &x, const Number p)
VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &x, const ::VectorizedArray< Number, width > &p)
VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &x)
void store(OtherNumber *ptr) const
VectorizedArray< float, width > operator-(const VectorizedArray< float, width > &v, const double u)
void load(const OtherNumber *ptr)
void scatter(const unsigned int *offsets, Number *base_ptr) const
VectorizedArray< Number, width > operator-(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray & operator=(const Number scalar) &&=delete
VectorizedArray< Number, width > operator*(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray & operator-=(const VectorizedArray &vec)
VectorizedArray< float, width > operator+(const double u, const VectorizedArray< float, width > &v)
VectorizedArray< Number, width > operator*(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray get_sqrt() const
VectorizedArray< Number, width > operator/(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray & operator+=(const VectorizedArray &vec)
VectorizedArray< Number, width > make_vectorized_array(const Number &u)
VectorizedArray< Number, width > operator/(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > operator+(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > cos(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &x)
void streaming_store(Number *ptr) const
VectorizedArray(const std::initializer_list< U > &list)
void vectorized_transpose_and_store(const bool add_into, const unsigned int n_entries, const VectorizedArray< Number, width > *in, const unsigned int *offsets, Number *out)
VectorizedArray< float, width > operator/(const double u, const VectorizedArray< float, width > &v)
VectorizedArray< float, width > operator*(const double u, const VectorizedArray< float, width > &v)
#define DEAL_II_ALWAYS_INLINE
#define DEAL_II_OPENMP_SIMD_PRAGMA
#define DEAL_II_NAMESPACE_OPEN
#define DEAL_II_NAMESPACE_CLOSE
__global__ void vec_add(Number *val, const Number a, const size_type N)
#define Assert(cond, exc)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcMessage(std::string arg1)
::VectorizedArray< Number, width > log(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > exp(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > tan(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > cos(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &, const Number p)
::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)
static value_type & get(vectorized_value_type &values, unsigned int c)
static constexpr std::size_t stride()
static vectorized_value_type & get_from_vectorized(vectorized_value_type &values, unsigned int c)
static constexpr std::size_t width()
static const value_type & get(const vectorized_value_type &values, unsigned int c)
static const vectorized_value_type & get_from_vectorized(const vectorized_value_type &values, unsigned int c)
static constexpr std::size_t width()
static constexpr std::size_t stride()
static const value_type & get(const value_type &value, unsigned int c)
VectorizedArray< T > vectorized_value_type
static const value_type & get_from_vectorized(const vectorized_value_type &values, unsigned int c)
static value_type & get_from_vectorized(vectorized_value_type &values, unsigned int c)
static value_type & get(value_type &value, unsigned int c)
typename T::value_type value_type
std::ptrdiff_t difference_type
random_access_iterator_tag iterator_category
void gather(VectorizedArray< Number, width > &out, const std::array< Number *, width > &ptrs, const unsigned int offset)
void vectorized_load_and_transpose(const unsigned int n_entries, const Number *in, const unsigned int *offsets, VectorizedArray< Number, width > *out)
std::ostream & operator<<(std::ostream &out, const VectorizedArray< Number, width > &p)
Number compare_and_apply_mask(const Number &left, const Number &right, const Number &true_value, const Number &false_value)
void vectorized_transpose_and_store(const bool add_into, const unsigned int n_entries, const VectorizedArray< Number, width > *in, const unsigned int *offsets, Number *out)