Reference documentation for deal.II version 9.6.0
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
vectorization.h
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2012 - 2024 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
15
16#ifndef dealii_vectorization_h
17#define dealii_vectorization_h
18
19#include <deal.II/base/config.h>
20
23
24#include <algorithm>
25#include <array>
26#include <cmath>
27
28// Note:
29// The flag DEAL_II_VECTORIZATION_WIDTH_IN_BITS is essentially constructed
30// according to the following scheme (on x86-based architectures)
31// #ifdef __AVX512F__
32// #define DEAL_II_VECTORIZATION_WIDTH_IN_BITS 512
33// #elif defined (__AVX__)
34// #define DEAL_II_VECTORIZATION_WIDTH_IN_BITS 256
35// #elif defined (__SSE2__)
36// #define DEAL_II_VECTORIZATION_WIDTH_IN_BITS 128
37// #else
38// #define DEAL_II_VECTORIZATION_WIDTH_IN_BITS 0
39// #endif
40// In addition to checking the flags __AVX512F__, __AVX__ and __SSE2__, a CMake
41// test, 'check_01_cpu_features.cmake', ensures that these feature are not only
42// present in the compilation unit but also working properly.
43
44#if DEAL_II_VECTORIZATION_WIDTH_IN_BITS > 0
45
46// These error messages try to detect the case that deal.II was compiled with
47// a wider instruction set extension as the current compilation unit, for
48// example because deal.II was compiled with AVX, but a user project does not
49// add -march=native or similar flags, making it fall to SSE2. This leads to
50// very strange errors as the size of data structures differs between the
51// compiled deal.II code sitting in libdeal_II.so and the user code if not
52// detected.
53# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && !defined(__AVX__)
54# error \
55 "Mismatch in vectorization capabilities: AVX was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native."
56# endif
57# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && !defined(__AVX512F__)
58# error \
59 "Mismatch in vectorization capabilities: AVX-512F was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native."
60# endif
61
62# ifdef _MSC_VER
63# include <intrin.h>
64# elif defined(__ALTIVEC__)
65# include <altivec.h>
66
67// altivec.h defines vector, pixel, bool, but we do not use them, so undefine
68// them before they make trouble
69# undef vector
70# undef pixel
71# undef bool
72# elif defined(__ARM_NEON)
73# include <arm_neon.h>
74# elif defined(__x86_64__)
75# include <x86intrin.h>
76# endif
77
78#endif
79
80
82
83
84// Enable the EnableIfScalar type trait for VectorizedArray<Number> such
85// that it can be used as a Number type in Tensor<rank,dim,Number>, etc.
86
87template <typename Number, std::size_t width>
92
93
94
98template <typename T>
100{
101public:
108 constexpr VectorizedArrayIterator(T &data, const std::size_t lane)
109 : data(&data)
110 , lane(lane)
111 {}
112
116 constexpr bool
118 {
119 Assert(this->data == other.data,
121 "You are trying to compare iterators into different arrays."));
122 return this->lane == other.lane;
123 }
124
128 constexpr bool
130 {
131 Assert(this->data == other.data,
133 "You are trying to compare iterators into different arrays."));
134 return this->lane != other.lane;
135 }
136
141 constexpr const typename T::value_type &
142 operator*() const
143 {
144 AssertIndexRange(lane, T::size());
145 return (*data)[lane];
146 }
147
148
153 template <typename U = T>
154 constexpr std::enable_if_t<!std::is_same_v<U, const U>,
155 typename T::value_type> &
157 {
158 AssertIndexRange(lane, T::size());
159 return (*data)[lane];
160 }
161
169 {
170 AssertIndexRange(lane + 1, T::size() + 1);
171 ++lane;
172 return *this;
173 }
174
180 operator+=(const std::size_t offset)
181 {
182 AssertIndexRange(lane + offset, T::size() + 1);
183 lane += offset;
184 return *this;
185 }
186
194 {
195 Assert(
196 lane > 0,
198 "You can't decrement an iterator that is already at the beginning of the range."));
199 --lane;
200 return *this;
201 }
202
207 operator+(const std::size_t &offset) const
208 {
209 AssertIndexRange(lane + offset, T::size() + 1);
210 return VectorizedArrayIterator<T>(*data, lane + offset);
211 }
212
216 constexpr std::ptrdiff_t
218 {
219 return static_cast<std::ptrdiff_t>(lane) -
220 static_cast<ptrdiff_t>(other.lane);
221 }
222
223private:
228
232 std::size_t lane;
233};
234
235
236
249template <typename VectorizedArrayType, std::size_t width>
251{
252public:
256 constexpr VectorizedArrayBase() = default;
257
265 template <typename U>
266 constexpr VectorizedArrayBase(const std::initializer_list<U> &list)
267 {
268 const unsigned int n_initializers = list.size();
269 Assert(n_initializers <= size(),
270 ExcMessage("The initializer list must have at most "
271 "as many elements as the vector length."));
272
273 // Copy what's in the list.
274 std::copy_n(list.begin(), n_initializers, this->begin());
275
276 // Then add zero padding where necessary.
277 if (n_initializers < size())
278 std::fill(this->begin() + n_initializers, this->end(), 0.0);
279 }
280
284 static constexpr std::size_t
286 {
287 return width;
288 }
289
295 {
297 static_cast<VectorizedArrayType &>(*this), 0);
298 }
299
305 begin() const
306 {
308 static_cast<const VectorizedArrayType &>(*this), 0);
309 }
310
316 {
318 static_cast<VectorizedArrayType &>(*this), width);
319 }
320
326 end() const
327 {
329 static_cast<const VectorizedArrayType &>(*this), width);
330 }
331
343 auto
344 dot_product(const VectorizedArrayType &v) const
345 {
346 VectorizedArrayType p = static_cast<const VectorizedArrayType &>(*this);
347 p *= v;
348 return p.sum();
349 }
350};
351
352
353
442template <typename Number, std::size_t width>
444 : public VectorizedArrayBase<VectorizedArray<Number, width>, 1>
445{
446public:
450 using value_type = Number;
451
460 static constexpr bool is_implemented = (width == 1);
461
466 VectorizedArray() = default;
467
471 VectorizedArray(const Number scalar)
472 {
473 static_assert(width == 1,
474 "You specified an illegal width that is not supported.");
475
476 this->operator=(scalar);
477 }
478
482 template <typename U>
483 VectorizedArray(const std::initializer_list<U> &list)
484 : VectorizedArrayBase<VectorizedArray<Number, width>, 1>(list)
485 {
486 static_assert(width == 1,
487 "You specified an illegal width that is not supported.");
488 }
489
495 operator=(const Number scalar) &
496 {
497 data = scalar;
498 return *this;
499 }
500
507 operator=(const Number scalar) && = delete;
508
514 Number &
515 operator[](const unsigned int comp)
516 {
517 (void)comp;
518 AssertIndexRange(comp, 1);
519 return data;
520 }
521
527 const Number &
528 operator[](const unsigned int comp) const
529 {
530 (void)comp;
531 AssertIndexRange(comp, 1);
532 return data;
533 }
534
541 {
542 data += vec.data;
543 return *this;
544 }
545
552 {
553 data -= vec.data;
554 return *this;
555 }
556
563 {
564 data *= vec.data;
565 return *this;
566 }
567
574 {
575 data /= vec.data;
576 return *this;
577 }
578
585 template <typename OtherNumber>
587 load(const OtherNumber *ptr)
588 {
589 data = *ptr;
590 }
591
598 template <typename OtherNumber>
600 store(OtherNumber *ptr) const
601 {
602 *ptr = data;
603 }
604
652 void
653 streaming_store(Number *ptr) const
654 {
655 *ptr = data;
656 }
657
671 void
672 gather(const Number *base_ptr, const unsigned int *offsets)
673 {
674 data = base_ptr[offsets[0]];
675 }
676
690 void
691 scatter(const unsigned int *offsets, Number *base_ptr) const
692 {
693 base_ptr[offsets[0]] = data;
694 }
695
701 Number
702 sum() const
703 {
704 return data;
705 }
706
712 Number data;
713
714private:
721 get_sqrt() const
722 {
723 VectorizedArray res;
724 res.data = std::sqrt(data);
725 return res;
726 }
727
734 get_abs() const
735 {
736 VectorizedArray res;
737 res.data = std::fabs(data);
738 return res;
739 }
740
747 get_max(const VectorizedArray &other) const
748 {
749 VectorizedArray res;
750 res.data = std::max(data, other.data);
751 return res;
752 }
753
760 get_min(const VectorizedArray &other) const
761 {
762 VectorizedArray res;
763 res.data = std::min(data, other.data);
764 return res;
765 }
766
767 // Make a few functions friends.
768 template <typename Number2, std::size_t width2>
771 template <typename Number2, std::size_t width2>
774 template <typename Number2, std::size_t width2>
778 template <typename Number2, std::size_t width2>
782};
783
784
785
797template <typename Number,
798 std::size_t width =
801 make_vectorized_array(const Number &u)
802{
804 return result;
805}
806
807
808
815template <typename VectorizedArrayType>
816inline DEAL_II_ALWAYS_INLINE VectorizedArrayType
817make_vectorized_array(const typename VectorizedArrayType::value_type &u)
818{
819 static_assert(
820 std::is_same_v<VectorizedArrayType,
821 VectorizedArray<typename VectorizedArrayType::value_type,
822 VectorizedArrayType::size()>>,
823 "VectorizedArrayType is not a VectorizedArray.");
824
825 VectorizedArrayType result = u;
826 return result;
827}
828
829
830
842template <typename Number, std::size_t width>
843inline DEAL_II_ALWAYS_INLINE void
845 const std::array<Number *, width> &ptrs,
846 const unsigned int offset)
847{
848 for (unsigned int v = 0; v < width; ++v)
849 out.data[v] = ptrs[v][offset];
850}
851
852
853
879template <typename Number, std::size_t width>
880inline DEAL_II_ALWAYS_INLINE void
881vectorized_load_and_transpose(const unsigned int n_entries,
882 const Number *in,
883 const unsigned int *offsets,
885{
886 for (unsigned int i = 0; i < n_entries; ++i)
887 for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
888 out[i][v] = in[offsets[v] + i];
889}
890
891
903template <typename Number, std::size_t width>
904inline DEAL_II_ALWAYS_INLINE void
905vectorized_load_and_transpose(const unsigned int n_entries,
906 const std::array<Number *, width> &in,
908{
909 for (unsigned int i = 0; i < n_entries; ++i)
910 for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
911 out[i][v] = in[v][i];
912}
913
914
915
954template <typename Number, std::size_t width>
955inline DEAL_II_ALWAYS_INLINE void
957 const unsigned int n_entries,
959 const unsigned int *offsets,
960 Number *out)
961{
962 if (add_into)
963 for (unsigned int i = 0; i < n_entries; ++i)
964 for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
965 out[offsets[v] + i] += in[i][v];
966 else
967 for (unsigned int i = 0; i < n_entries; ++i)
968 for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
969 out[offsets[v] + i] = in[i][v];
970}
971
972
984template <typename Number, std::size_t width>
985inline DEAL_II_ALWAYS_INLINE void
987 const unsigned int n_entries,
989 std::array<Number *, width> &out)
990{
991 if (add_into)
992 for (unsigned int i = 0; i < n_entries; ++i)
993 for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
994 out[v][i] += in[i][v];
995 else
996 for (unsigned int i = 0; i < n_entries; ++i)
997 for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
998 out[v][i] = in[i][v];
999}
1000
1001
1004#ifndef DOXYGEN
1005
1006# if defined(DEAL_II_HAVE_ARM_NEON) && defined(__ARM_NEON)
1007
1011template <>
1012class VectorizedArray<double, 2>
1013 : public VectorizedArrayBase<VectorizedArray<double, 2>, 2>
1014{
1015public:
1019 using value_type = double;
1020
1025 static constexpr bool is_implemented = true;
1026
1031 VectorizedArray() = default;
1032
1036 VectorizedArray(const double scalar)
1037 {
1038 this->operator=(scalar);
1039 }
1040
1044 template <typename U>
1045 VectorizedArray(const std::initializer_list<U> &list)
1046 : VectorizedArrayBase<VectorizedArray<double, 2>, 2>(list)
1047 {}
1048
1053 operator=(const double x) &
1054 {
1055 data = vdupq_n_f64(x);
1056 return *this;
1057 }
1058
1065 operator=(const double scalar) && = delete;
1066
1070 double &
1071 operator[](const unsigned int comp)
1072 {
1073 return *(reinterpret_cast<double *>(&data) + comp);
1074 }
1075
1079 const double &
1080 operator[](const unsigned int comp) const
1081 {
1082 return *(reinterpret_cast<const double *>(&data) + comp);
1083 }
1084
1089 operator+=(const VectorizedArray &vec)
1090 {
1091 data = vaddq_f64(data, vec.data);
1092 return *this;
1093 }
1094
1099 operator-=(const VectorizedArray &vec)
1100 {
1101 data = vsubq_f64(data, vec.data);
1102 return *this;
1103 }
1104
1109 operator*=(const VectorizedArray &vec)
1110 {
1111 data = vmulq_f64(data, vec.data);
1112 return *this;
1113 }
1114
1119 operator/=(const VectorizedArray &vec)
1120 {
1121 data = vdivq_f64(data, vec.data);
1122 return *this;
1123 }
1124
1130 void
1131 load(const double *ptr)
1132 {
1133 data = vld1q_f64(ptr);
1134 }
1135
1137 void
1138 load(const float *ptr)
1139 {
1141 for (unsigned int i = 0; i < 2; ++i)
1142 data[i] = ptr[i];
1143 }
1144
1151 void
1152 store(double *ptr) const
1153 {
1154 vst1q_f64(ptr, data);
1155 }
1156
1158 void
1159 store(float *ptr) const
1160 {
1162 for (unsigned int i = 0; i < 2; ++i)
1163 ptr[i] = data[i];
1164 }
1165
1171 void
1172 streaming_store(double *ptr) const
1173 {
1174 Assert(reinterpret_cast<std::size_t>(ptr) % 16 == 0,
1175 ExcMessage("Memory not aligned"));
1176 vst1q_f64(ptr, data);
1177 }
1178
1191 void
1192 gather(const double *base_ptr, const unsigned int *offsets)
1193 {
1194 for (unsigned int i = 0; i < 2; ++i)
1195 *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
1196 }
1197
1210 void
1211 scatter(const unsigned int *offsets, double *base_ptr) const
1212 {
1213 for (unsigned int i = 0; i < 2; ++i)
1214 base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
1215 }
1216
1221 double
1222 sum() const
1223 {
1224 return vaddvq_f64(data);
1225 }
1226
1232 mutable float64x2_t data;
1233
1234private:
1240 get_sqrt() const
1241 {
1242 VectorizedArray res;
1243 res.data = vsqrtq_f64(data);
1244 return res;
1245 }
1246
1252 get_abs() const
1253 {
1254 VectorizedArray res;
1255 res.data = vabsq_f64(data);
1256 return res;
1257 }
1258
1264 get_max(const VectorizedArray &other) const
1265 {
1266 VectorizedArray res;
1267 res.data = vmaxq_f64(data, other.data);
1268 return res;
1269 }
1270
1276 get_min(const VectorizedArray &other) const
1277 {
1278 VectorizedArray res;
1279 res.data = vminq_f64(data, other.data);
1280 return res;
1281 }
1282
1283 // Make a few functions friends.
1284 template <typename Number2, std::size_t width2>
1287 template <typename Number2, std::size_t width2>
1290 template <typename Number2, std::size_t width2>
1294 template <typename Number2, std::size_t width2>
1298};
1299
1303template <>
1304class VectorizedArray<float, 4>
1305 : public VectorizedArrayBase<VectorizedArray<float, 4>, 4>
1306{
1307public:
1311 using value_type = float;
1312
1317 static constexpr bool is_implemented = true;
1318
1323 VectorizedArray() = default;
1324
1328 VectorizedArray(const float scalar)
1329 {
1330 this->operator=(scalar);
1331 }
1332
1336 template <typename U>
1337 VectorizedArray(const std::initializer_list<U> &list)
1338 : VectorizedArrayBase<VectorizedArray<float, 4>, 4>(list)
1339 {}
1340
1345 operator=(const float x) &
1346 {
1347 data = vdupq_n_f32(x);
1348 return *this;
1349 }
1350
1357 operator=(const float scalar) && = delete;
1358
1362 value_type &
1363 operator[](const unsigned int comp)
1364 {
1365 return *(reinterpret_cast<float *>(&data) + comp);
1366 }
1367
1371 const value_type &
1372 operator[](const unsigned int comp) const
1373 {
1374 return *(reinterpret_cast<const float *>(&data) + comp);
1375 }
1376
1381 operator+=(const VectorizedArray &vec)
1382 {
1383 data = vaddq_f32(data, vec.data);
1384 return *this;
1385 }
1386
1391 operator-=(const VectorizedArray &vec)
1392 {
1393 data = vsubq_f32(data, vec.data);
1394 return *this;
1395 }
1396
1401 operator*=(const VectorizedArray &vec)
1402 {
1403 data = vmulq_f32(data, vec.data);
1404 return *this;
1405 }
1406
1411 operator/=(const VectorizedArray &vec)
1412 {
1413 data = vdivq_f32(data, vec.data);
1414 return *this;
1415 }
1416
1422 void
1423 load(const float *ptr)
1424 {
1425 data = vld1q_f32(ptr);
1426 }
1427
1434 void
1435 store(float *ptr) const
1436 {
1437 vst1q_f32(ptr, data);
1438 }
1439
1445 void
1446 streaming_store(float *ptr) const
1447 {
1448 Assert(reinterpret_cast<std::size_t>(ptr) % 16 == 0,
1449 ExcMessage("Memory not aligned"));
1450 vst1q_f32(ptr, data);
1451 }
1452
1465 void
1466 gather(const float *base_ptr, const unsigned int *offsets)
1467 {
1468 for (unsigned int i = 0; i < 4; ++i)
1469 *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
1470 }
1471
1484 void
1485 scatter(const unsigned int *offsets, float *base_ptr) const
1486 {
1487 for (unsigned int i = 0; i < 4; ++i)
1488 base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
1489 }
1490
1495 float
1496 sum() const
1497 {
1498 return vaddvq_f32(data);
1499 }
1500
1506 mutable float32x4_t data;
1507
1508private:
1514 get_sqrt() const
1515 {
1516 VectorizedArray res;
1517 res.data = vsqrtq_f32(data);
1518 return res;
1519 }
1520
1526 get_abs() const
1527 {
1528 VectorizedArray res;
1529 res.data = vabsq_f32(data);
1530 return res;
1531 }
1532
1538 get_max(const VectorizedArray &other) const
1539 {
1540 VectorizedArray res;
1541 res.data = vmaxq_f32(data, other.data);
1542 return res;
1543 }
1544
1550 get_min(const VectorizedArray &other) const
1551 {
1552 VectorizedArray res;
1553 res.data = vminq_f32(data, other.data);
1554 return res;
1555 }
1556
1557 // Make a few functions friends.
1558 template <typename Number2, std::size_t width2>
1561 template <typename Number2, std::size_t width2>
1564 template <typename Number2, std::size_t width2>
1568 template <typename Number2, std::size_t width2>
1572};
1573
1574
1575# endif
1576
1577# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__SSE2__)
1578
1582template <>
1583class VectorizedArray<double, 2>
1584 : public VectorizedArrayBase<VectorizedArray<double, 2>, 2>
1585{
1586public:
1590 using value_type = double;
1591
1596 static constexpr bool is_implemented = true;
1597
1602 VectorizedArray() = default;
1603
1607 VectorizedArray(const double scalar)
1608 {
1609 this->operator=(scalar);
1610 }
1611
1615 template <typename U>
1616 VectorizedArray(const std::initializer_list<U> &list)
1617 : VectorizedArrayBase<VectorizedArray<double, 2>, 2>(list)
1618 {}
1619
1625 operator=(const double x) &
1626 {
1627 data = _mm_set1_pd(x);
1628 return *this;
1629 }
1630
1637 operator=(const double scalar) && = delete;
1638
1643 double &
1644 operator[](const unsigned int comp)
1645 {
1646 AssertIndexRange(comp, 2);
1647 return *(reinterpret_cast<double *>(&data) + comp);
1648 }
1649
1654 const double &
1655 operator[](const unsigned int comp) const
1656 {
1657 AssertIndexRange(comp, 2);
1658 return *(reinterpret_cast<const double *>(&data) + comp);
1659 }
1660
1666 operator+=(const VectorizedArray &vec)
1667 {
1668# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1669 data += vec.data;
1670# else
1671 data = _mm_add_pd(data, vec.data);
1672# endif
1673 return *this;
1674 }
1675
1681 operator-=(const VectorizedArray &vec)
1682 {
1683# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1684 data -= vec.data;
1685# else
1686 data = _mm_sub_pd(data, vec.data);
1687# endif
1688 return *this;
1689 }
1690
1696 operator*=(const VectorizedArray &vec)
1697 {
1698# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1699 data *= vec.data;
1700# else
1701 data = _mm_mul_pd(data, vec.data);
1702# endif
1703 return *this;
1704 }
1705
1711 operator/=(const VectorizedArray &vec)
1712 {
1713# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1714 data /= vec.data;
1715# else
1716 data = _mm_div_pd(data, vec.data);
1717# endif
1718 return *this;
1719 }
1720
1727 void
1728 load(const double *ptr)
1729 {
1730 data = _mm_loadu_pd(ptr);
1731 }
1732
1734 void
1735 load(const float *ptr)
1736 {
1738 for (unsigned int i = 0; i < 2; ++i)
1739 data[i] = ptr[i];
1740 }
1741
1749 void
1750 store(double *ptr) const
1751 {
1752 _mm_storeu_pd(ptr, data);
1753 }
1754
1756 void
1757 store(float *ptr) const
1758 {
1760 for (unsigned int i = 0; i < 2; ++i)
1761 ptr[i] = data[i];
1762 }
1763
1769 void
1770 streaming_store(double *ptr) const
1771 {
1772 Assert(reinterpret_cast<std::size_t>(ptr) % 16 == 0,
1773 ExcMessage("Memory not aligned"));
1774 _mm_stream_pd(ptr, data);
1775 }
1776
1790 void
1791 gather(const double *base_ptr, const unsigned int *offsets)
1792 {
1793 for (unsigned int i = 0; i < 2; ++i)
1794 *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
1795 }
1796
1810 void
1811 scatter(const unsigned int *offsets, double *base_ptr) const
1812 {
1813 for (unsigned int i = 0; i < 2; ++i)
1814 base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
1815 }
1816
1821 double
1822 sum() const
1823 {
1824 __m128d t1 = _mm_unpackhi_pd(data, data);
1825 __m128d t2 = _mm_add_pd(data, t1);
1826 return _mm_cvtsd_f64(t2);
1827 }
1828
1834 __m128d data;
1835
1836private:
1843 get_sqrt() const
1844 {
1845 VectorizedArray res;
1846 res.data = _mm_sqrt_pd(data);
1847 return res;
1848 }
1849
1856 get_abs() const
1857 {
1858 // to compute the absolute value, perform
1859 // bitwise andnot with -0. This will leave all
1860 // value and exponent bits unchanged but force
1861 // the sign value to +.
1862 __m128d mask = _mm_set1_pd(-0.);
1863 VectorizedArray res;
1864 res.data = _mm_andnot_pd(mask, data);
1865 return res;
1866 }
1867
1874 get_max(const VectorizedArray &other) const
1875 {
1876 VectorizedArray res;
1877 res.data = _mm_max_pd(data, other.data);
1878 return res;
1879 }
1880
1887 get_min(const VectorizedArray &other) const
1888 {
1889 VectorizedArray res;
1890 res.data = _mm_min_pd(data, other.data);
1891 return res;
1892 }
1893
1894 // Make a few functions friends.
1895 template <typename Number2, std::size_t width2>
1898 template <typename Number2, std::size_t width2>
1901 template <typename Number2, std::size_t width2>
1905 template <typename Number2, std::size_t width2>
1909};
1910
1911
1912
1916template <>
1917inline DEAL_II_ALWAYS_INLINE void
1918vectorized_load_and_transpose(const unsigned int n_entries,
1919 const double *in,
1920 const unsigned int *offsets,
1922{
1923 const unsigned int n_chunks = n_entries / 2;
1924 for (unsigned int i = 0; i < n_chunks; ++i)
1925 {
1926 __m128d u0 = _mm_loadu_pd(in + 2 * i + offsets[0]);
1927 __m128d u1 = _mm_loadu_pd(in + 2 * i + offsets[1]);
1928 out[2 * i + 0].data = _mm_unpacklo_pd(u0, u1);
1929 out[2 * i + 1].data = _mm_unpackhi_pd(u0, u1);
1930 }
1931
1932 // remainder loop of work that does not divide by 2
1933 for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
1934 for (unsigned int v = 0; v < 2; ++v)
1935 out[i][v] = in[offsets[v] + i];
1936}
1937
1938
1939
1943template <>
1944inline DEAL_II_ALWAYS_INLINE void
1945vectorized_load_and_transpose(const unsigned int n_entries,
1946 const std::array<double *, 2> &in,
1948{
1949 // see the comments in the vectorized_load_and_transpose above
1950
1951 const unsigned int n_chunks = n_entries / 2;
1952 for (unsigned int i = 0; i < n_chunks; ++i)
1953 {
1954 __m128d u0 = _mm_loadu_pd(in[0] + 2 * i);
1955 __m128d u1 = _mm_loadu_pd(in[1] + 2 * i);
1956 out[2 * i + 0].data = _mm_unpacklo_pd(u0, u1);
1957 out[2 * i + 1].data = _mm_unpackhi_pd(u0, u1);
1958 }
1959
1960 for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
1961 for (unsigned int v = 0; v < 2; ++v)
1962 out[i][v] = in[v][i];
1963}
1964
1965
1966
1970template <>
1971inline DEAL_II_ALWAYS_INLINE void
1972vectorized_transpose_and_store(const bool add_into,
1973 const unsigned int n_entries,
1975 const unsigned int *offsets,
1976 double *out)
1977{
1978 const unsigned int n_chunks = n_entries / 2;
1979 if (add_into)
1980 {
1981 for (unsigned int i = 0; i < n_chunks; ++i)
1982 {
1983 __m128d u0 = in[2 * i + 0].data;
1984 __m128d u1 = in[2 * i + 1].data;
1985 __m128d res0 = _mm_unpacklo_pd(u0, u1);
1986 __m128d res1 = _mm_unpackhi_pd(u0, u1);
1987 _mm_storeu_pd(out + 2 * i + offsets[0],
1988 _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[0]),
1989 res0));
1990 _mm_storeu_pd(out + 2 * i + offsets[1],
1991 _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[1]),
1992 res1));
1993 }
1994 // remainder loop of work that does not divide by 2
1995 for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
1996 for (unsigned int v = 0; v < 2; ++v)
1997 out[offsets[v] + i] += in[i][v];
1998 }
1999 else
2000 {
2001 for (unsigned int i = 0; i < n_chunks; ++i)
2002 {
2003 __m128d u0 = in[2 * i + 0].data;
2004 __m128d u1 = in[2 * i + 1].data;
2005 __m128d res0 = _mm_unpacklo_pd(u0, u1);
2006 __m128d res1 = _mm_unpackhi_pd(u0, u1);
2007 _mm_storeu_pd(out + 2 * i + offsets[0], res0);
2008 _mm_storeu_pd(out + 2 * i + offsets[1], res1);
2009 }
2010 // remainder loop of work that does not divide by 2
2011 for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
2012 for (unsigned int v = 0; v < 2; ++v)
2013 out[offsets[v] + i] = in[i][v];
2014 }
2015}
2016
2017
2018
2022template <>
2023inline DEAL_II_ALWAYS_INLINE void
2024vectorized_transpose_and_store(const bool add_into,
2025 const unsigned int n_entries,
2027 std::array<double *, 2> &out)
2028{
2029 // see the comments in the vectorized_transpose_and_store above
2030
2031 const unsigned int n_chunks = n_entries / 2;
2032 if (add_into)
2033 {
2034 for (unsigned int i = 0; i < n_chunks; ++i)
2035 {
2036 __m128d u0 = in[2 * i + 0].data;
2037 __m128d u1 = in[2 * i + 1].data;
2038 __m128d res0 = _mm_unpacklo_pd(u0, u1);
2039 __m128d res1 = _mm_unpackhi_pd(u0, u1);
2040 _mm_storeu_pd(out[0] + 2 * i,
2041 _mm_add_pd(_mm_loadu_pd(out[0] + 2 * i), res0));
2042 _mm_storeu_pd(out[1] + 2 * i,
2043 _mm_add_pd(_mm_loadu_pd(out[1] + 2 * i), res1));
2044 }
2045
2046 for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
2047 for (unsigned int v = 0; v < 2; ++v)
2048 out[v][i] += in[i][v];
2049 }
2050 else
2051 {
2052 for (unsigned int i = 0; i < n_chunks; ++i)
2053 {
2054 __m128d u0 = in[2 * i + 0].data;
2055 __m128d u1 = in[2 * i + 1].data;
2056 __m128d res0 = _mm_unpacklo_pd(u0, u1);
2057 __m128d res1 = _mm_unpackhi_pd(u0, u1);
2058 _mm_storeu_pd(out[0] + 2 * i, res0);
2059 _mm_storeu_pd(out[1] + 2 * i, res1);
2060 }
2061
2062 for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
2063 for (unsigned int v = 0; v < 2; ++v)
2064 out[v][i] = in[i][v];
2065 }
2066}
2067
2068
2069
2073template <>
2074class VectorizedArray<float, 4>
2075 : public VectorizedArrayBase<VectorizedArray<float, 4>, 4>
2076{
2077public:
2081 using value_type = float;
2082
2087 static constexpr bool is_implemented = true;
2088
2093 VectorizedArray() = default;
2094
2098 VectorizedArray(const float scalar)
2099 {
2100 this->operator=(scalar);
2101 }
2102
2106 template <typename U>
2107 VectorizedArray(const std::initializer_list<U> &list)
2108 : VectorizedArrayBase<VectorizedArray<float, 4>, 4>(list)
2109 {}
2110
2116 operator=(const float x) &
2117 {
2118 data = _mm_set1_ps(x);
2119 return *this;
2120 }
2121
2128 operator=(const float scalar) && = delete;
2129
2134 float &
2135 operator[](const unsigned int comp)
2136 {
2137 AssertIndexRange(comp, 4);
2138 return *(reinterpret_cast<float *>(&data) + comp);
2139 }
2140
2145 const float &
2146 operator[](const unsigned int comp) const
2147 {
2148 AssertIndexRange(comp, 4);
2149 return *(reinterpret_cast<const float *>(&data) + comp);
2150 }
2151
2157 operator+=(const VectorizedArray &vec)
2158 {
2159# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2160 data += vec.data;
2161# else
2162 data = _mm_add_ps(data, vec.data);
2163# endif
2164 return *this;
2165 }
2166
2172 operator-=(const VectorizedArray &vec)
2173 {
2174# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2175 data -= vec.data;
2176# else
2177 data = _mm_sub_ps(data, vec.data);
2178# endif
2179 return *this;
2180 }
2181
2187 operator*=(const VectorizedArray &vec)
2188 {
2189# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2190 data *= vec.data;
2191# else
2192 data = _mm_mul_ps(data, vec.data);
2193# endif
2194 return *this;
2195 }
2196
2202 operator/=(const VectorizedArray &vec)
2203 {
2204# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2205 data /= vec.data;
2206# else
2207 data = _mm_div_ps(data, vec.data);
2208# endif
2209 return *this;
2210 }
2211
2218 void
2219 load(const float *ptr)
2220 {
2221 data = _mm_loadu_ps(ptr);
2222 }
2223
2231 void
2232 store(float *ptr) const
2233 {
2234 _mm_storeu_ps(ptr, data);
2235 }
2236
2242 void
2243 streaming_store(float *ptr) const
2244 {
2245 Assert(reinterpret_cast<std::size_t>(ptr) % 16 == 0,
2246 ExcMessage("Memory not aligned"));
2247 _mm_stream_ps(ptr, data);
2248 }
2249
2263 void
2264 gather(const float *base_ptr, const unsigned int *offsets)
2265 {
2266 for (unsigned int i = 0; i < 4; ++i)
2267 *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
2268 }
2269
2283 void
2284 scatter(const unsigned int *offsets, float *base_ptr) const
2285 {
2286 for (unsigned int i = 0; i < 4; ++i)
2287 base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
2288 }
2289
2294 float
2295 sum() const
2296 {
2297 __m128 t1 = _mm_movehl_ps(data, data);
2298 __m128 t2 = _mm_add_ps(data, t1);
2299 __m128 t3 = _mm_shuffle_ps(t2, t2, 1);
2300 __m128 t4 = _mm_add_ss(t2, t3);
2301 return _mm_cvtss_f32(t4);
2302 }
2303
2309 __m128 data;
2310
2311private:
2318 get_sqrt() const
2319 {
2320 VectorizedArray res;
2321 res.data = _mm_sqrt_ps(data);
2322 return res;
2323 }
2324
2331 get_abs() const
2332 {
2333 // to compute the absolute value, perform bitwise andnot with -0. This
2334 // will leave all value and exponent bits unchanged but force the sign
2335 // value to +.
2336 __m128 mask = _mm_set1_ps(-0.f);
2337 VectorizedArray res;
2338 res.data = _mm_andnot_ps(mask, data);
2339 return res;
2340 }
2341
2348 get_max(const VectorizedArray &other) const
2349 {
2350 VectorizedArray res;
2351 res.data = _mm_max_ps(data, other.data);
2352 return res;
2353 }
2354
2361 get_min(const VectorizedArray &other) const
2362 {
2363 VectorizedArray res;
2364 res.data = _mm_min_ps(data, other.data);
2365 return res;
2366 }
2367
2368 // Make a few functions friends.
2369 template <typename Number2, std::size_t width2>
2372 template <typename Number2, std::size_t width2>
2375 template <typename Number2, std::size_t width2>
2379 template <typename Number2, std::size_t width2>
2383};
2384
2385
2386
2390template <>
2391inline DEAL_II_ALWAYS_INLINE void
2392vectorized_load_and_transpose(const unsigned int n_entries,
2393 const float *in,
2394 const unsigned int *offsets,
2396{
2397 const unsigned int n_chunks = n_entries / 4;
2398 for (unsigned int i = 0; i < n_chunks; ++i)
2399 {
2400 __m128 u0 = _mm_loadu_ps(in + 4 * i + offsets[0]);
2401 __m128 u1 = _mm_loadu_ps(in + 4 * i + offsets[1]);
2402 __m128 u2 = _mm_loadu_ps(in + 4 * i + offsets[2]);
2403 __m128 u3 = _mm_loadu_ps(in + 4 * i + offsets[3]);
2404 __m128 v0 = _mm_shuffle_ps(u0, u1, 0x44);
2405 __m128 v1 = _mm_shuffle_ps(u0, u1, 0xee);
2406 __m128 v2 = _mm_shuffle_ps(u2, u3, 0x44);
2407 __m128 v3 = _mm_shuffle_ps(u2, u3, 0xee);
2408 out[4 * i + 0].data = _mm_shuffle_ps(v0, v2, 0x88);
2409 out[4 * i + 1].data = _mm_shuffle_ps(v0, v2, 0xdd);
2410 out[4 * i + 2].data = _mm_shuffle_ps(v1, v3, 0x88);
2411 out[4 * i + 3].data = _mm_shuffle_ps(v1, v3, 0xdd);
2412 }
2413
2414 // remainder loop of work that does not divide by 4
2415 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2416 for (unsigned int v = 0; v < 4; ++v)
2417 out[i][v] = in[offsets[v] + i];
2418}
2419
2420
2421
2425template <>
2426inline DEAL_II_ALWAYS_INLINE void
2427vectorized_load_and_transpose(const unsigned int n_entries,
2428 const std::array<float *, 4> &in,
2430{
2431 // see the comments in the vectorized_load_and_transpose above
2432
2433 const unsigned int n_chunks = n_entries / 4;
2434 for (unsigned int i = 0; i < n_chunks; ++i)
2435 {
2436 __m128 u0 = _mm_loadu_ps(in[0] + 4 * i);
2437 __m128 u1 = _mm_loadu_ps(in[1] + 4 * i);
2438 __m128 u2 = _mm_loadu_ps(in[2] + 4 * i);
2439 __m128 u3 = _mm_loadu_ps(in[3] + 4 * i);
2440 __m128 v0 = _mm_shuffle_ps(u0, u1, 0x44);
2441 __m128 v1 = _mm_shuffle_ps(u0, u1, 0xee);
2442 __m128 v2 = _mm_shuffle_ps(u2, u3, 0x44);
2443 __m128 v3 = _mm_shuffle_ps(u2, u3, 0xee);
2444 out[4 * i + 0].data = _mm_shuffle_ps(v0, v2, 0x88);
2445 out[4 * i + 1].data = _mm_shuffle_ps(v0, v2, 0xdd);
2446 out[4 * i + 2].data = _mm_shuffle_ps(v1, v3, 0x88);
2447 out[4 * i + 3].data = _mm_shuffle_ps(v1, v3, 0xdd);
2448 }
2449
2450 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2451 for (unsigned int v = 0; v < 4; ++v)
2452 out[i][v] = in[v][i];
2453}
2454
2455
2456
2460template <>
2461inline DEAL_II_ALWAYS_INLINE void
2462vectorized_transpose_and_store(const bool add_into,
2463 const unsigned int n_entries,
2464 const VectorizedArray<float, 4> *in,
2465 const unsigned int *offsets,
2466 float *out)
2467{
2468 const unsigned int n_chunks = n_entries / 4;
2469 for (unsigned int i = 0; i < n_chunks; ++i)
2470 {
2471 __m128 u0 = in[4 * i + 0].data;
2472 __m128 u1 = in[4 * i + 1].data;
2473 __m128 u2 = in[4 * i + 2].data;
2474 __m128 u3 = in[4 * i + 3].data;
2475 __m128 t0 = _mm_shuffle_ps(u0, u1, 0x44);
2476 __m128 t1 = _mm_shuffle_ps(u0, u1, 0xee);
2477 __m128 t2 = _mm_shuffle_ps(u2, u3, 0x44);
2478 __m128 t3 = _mm_shuffle_ps(u2, u3, 0xee);
2479 u0 = _mm_shuffle_ps(t0, t2, 0x88);
2480 u1 = _mm_shuffle_ps(t0, t2, 0xdd);
2481 u2 = _mm_shuffle_ps(t1, t3, 0x88);
2482 u3 = _mm_shuffle_ps(t1, t3, 0xdd);
2483
2484 // Cannot use the same store instructions in both paths of the 'if'
2485 // because the compiler cannot know that there is no aliasing between
2486 // pointers
2487 if (add_into)
2488 {
2489 u0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), u0);
2490 _mm_storeu_ps(out + 4 * i + offsets[0], u0);
2491 u1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), u1);
2492 _mm_storeu_ps(out + 4 * i + offsets[1], u1);
2493 u2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), u2);
2494 _mm_storeu_ps(out + 4 * i + offsets[2], u2);
2495 u3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), u3);
2496 _mm_storeu_ps(out + 4 * i + offsets[3], u3);
2497 }
2498 else
2499 {
2500 _mm_storeu_ps(out + 4 * i + offsets[0], u0);
2501 _mm_storeu_ps(out + 4 * i + offsets[1], u1);
2502 _mm_storeu_ps(out + 4 * i + offsets[2], u2);
2503 _mm_storeu_ps(out + 4 * i + offsets[3], u3);
2504 }
2505 }
2506
2507 // remainder loop of work that does not divide by 4
2508 if (add_into)
2509 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2510 for (unsigned int v = 0; v < 4; ++v)
2511 out[offsets[v] + i] += in[i][v];
2512 else
2513 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2514 for (unsigned int v = 0; v < 4; ++v)
2515 out[offsets[v] + i] = in[i][v];
2516}
2517
2518
2519
2523template <>
2524inline DEAL_II_ALWAYS_INLINE void
2525vectorized_transpose_and_store(const bool add_into,
2526 const unsigned int n_entries,
2527 const VectorizedArray<float, 4> *in,
2528 std::array<float *, 4> &out)
2529{
2530 // see the comments in the vectorized_transpose_and_store above
2531
2532 const unsigned int n_chunks = n_entries / 4;
2533 for (unsigned int i = 0; i < n_chunks; ++i)
2534 {
2535 __m128 u0 = in[4 * i + 0].data;
2536 __m128 u1 = in[4 * i + 1].data;
2537 __m128 u2 = in[4 * i + 2].data;
2538 __m128 u3 = in[4 * i + 3].data;
2539 __m128 t0 = _mm_shuffle_ps(u0, u1, 0x44);
2540 __m128 t1 = _mm_shuffle_ps(u0, u1, 0xee);
2541 __m128 t2 = _mm_shuffle_ps(u2, u3, 0x44);
2542 __m128 t3 = _mm_shuffle_ps(u2, u3, 0xee);
2543 u0 = _mm_shuffle_ps(t0, t2, 0x88);
2544 u1 = _mm_shuffle_ps(t0, t2, 0xdd);
2545 u2 = _mm_shuffle_ps(t1, t3, 0x88);
2546 u3 = _mm_shuffle_ps(t1, t3, 0xdd);
2547
2548 if (add_into)
2549 {
2550 u0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), u0);
2551 _mm_storeu_ps(out[0] + 4 * i, u0);
2552 u1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), u1);
2553 _mm_storeu_ps(out[1] + 4 * i, u1);
2554 u2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), u2);
2555 _mm_storeu_ps(out[2] + 4 * i, u2);
2556 u3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), u3);
2557 _mm_storeu_ps(out[3] + 4 * i, u3);
2558 }
2559 else
2560 {
2561 _mm_storeu_ps(out[0] + 4 * i, u0);
2562 _mm_storeu_ps(out[1] + 4 * i, u1);
2563 _mm_storeu_ps(out[2] + 4 * i, u2);
2564 _mm_storeu_ps(out[3] + 4 * i, u3);
2565 }
2566 }
2567
2568 if (add_into)
2569 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2570 for (unsigned int v = 0; v < 4; ++v)
2571 out[v][i] += in[i][v];
2572 else
2573 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2574 for (unsigned int v = 0; v < 4; ++v)
2575 out[v][i] = in[i][v];
2576}
2577
2578
2579
2580# endif // if DEAL_II_VECTORIZATION_WIDTH_IN_BITS > 0 && defined(__SSE2__)
2581
2582# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
2583
2587template <>
2588class VectorizedArray<double, 4>
2589 : public VectorizedArrayBase<VectorizedArray<double, 4>, 4>
2590{
2591public:
2595 using value_type = double;
2596
2601 static constexpr bool is_implemented = true;
2602
2607 VectorizedArray() = default;
2608
2612 VectorizedArray(const double scalar)
2613 {
2614 this->operator=(scalar);
2615 }
2616
2620 template <typename U>
2621 VectorizedArray(const std::initializer_list<U> &list)
2622 : VectorizedArrayBase<VectorizedArray<double, 4>, 4>(list)
2623 {}
2624
2630 operator=(const double x) &
2631 {
2632 data = _mm256_set1_pd(x);
2633 return *this;
2634 }
2635
2642 operator=(const double scalar) && = delete;
2643
2648 double &
2649 operator[](const unsigned int comp)
2650 {
2651 AssertIndexRange(comp, 4);
2652 return *(reinterpret_cast<double *>(&data) + comp);
2653 }
2654
2659 const double &
2660 operator[](const unsigned int comp) const
2661 {
2662 AssertIndexRange(comp, 4);
2663 return *(reinterpret_cast<const double *>(&data) + comp);
2664 }
2665
2671 operator+=(const VectorizedArray &vec)
2672 {
2673 // if the compiler supports vector arithmetic, we can simply use +=
2674 // operator on the given data type. this allows the compiler to combine
2675 // additions with multiplication (fused multiply-add) if those
2676 // instructions are available. Otherwise, we need to use the built-in
2677 // intrinsic command for __m256d
2678# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2679 data += vec.data;
2680# else
2681 data = _mm256_add_pd(data, vec.data);
2682# endif
2683 return *this;
2684 }
2685
2691 operator-=(const VectorizedArray &vec)
2692 {
2693# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2694 data -= vec.data;
2695# else
2696 data = _mm256_sub_pd(data, vec.data);
2697# endif
2698 return *this;
2699 }
2705 operator*=(const VectorizedArray &vec)
2706 {
2707# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2708 data *= vec.data;
2709# else
2710 data = _mm256_mul_pd(data, vec.data);
2711# endif
2712 return *this;
2713 }
2714
2720 operator/=(const VectorizedArray &vec)
2721 {
2722# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2723 data /= vec.data;
2724# else
2725 data = _mm256_div_pd(data, vec.data);
2726# endif
2727 return *this;
2728 }
2729
2736 void
2737 load(const double *ptr)
2738 {
2739 data = _mm256_loadu_pd(ptr);
2740 }
2741
2743 void
2744 load(const float *ptr)
2745 {
2746 data = _mm256_cvtps_pd(_mm_loadu_ps(ptr));
2747 }
2748
2756 void
2757 store(double *ptr) const
2758 {
2759 _mm256_storeu_pd(ptr, data);
2760 }
2761
2763 void
2764 store(float *ptr) const
2765 {
2766 _mm_storeu_ps(ptr, _mm256_cvtpd_ps(data));
2767 }
2768
2774 void
2775 streaming_store(double *ptr) const
2776 {
2777 Assert(reinterpret_cast<std::size_t>(ptr) % 32 == 0,
2778 ExcMessage("Memory not aligned"));
2779 _mm256_stream_pd(ptr, data);
2780 }
2781
2795 void
2796 gather(const double *base_ptr, const unsigned int *offsets)
2797 {
2798# if defined(__AVX2__) && defined(DEAL_II_USE_VECTORIZATION_GATHER)
2799 // unfortunately, there does not appear to be a 128 bit integer load, so
2800 // do it by some reinterpret casts here. this is allowed because the Intel
2801 // API allows aliasing between different vector types.
2802 const __m128 index_val =
2803 _mm_loadu_ps(reinterpret_cast<const float *>(offsets));
2804 const __m128i index = *reinterpret_cast<const __m128i *>(&index_val);
2805
2806 // work around a warning with gcc-12 about an uninitialized initial state
2807 // for gather by starting with a zero guess, even though all lanes will be
2808 // overwritten
2809 __m256d zero = _mm256_setzero_pd();
2810 __m256d mask = _mm256_cmp_pd(zero, zero, _CMP_EQ_OQ);
2811
2812 data = _mm256_mask_i32gather_pd(zero, base_ptr, index, mask, 8);
2813# else
2814 for (unsigned int i = 0; i < 4; ++i)
2815 *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
2816# endif
2817 }
2818
2832 void
2833 scatter(const unsigned int *offsets, double *base_ptr) const
2834 {
2835 // no scatter operation in AVX/AVX2
2836 for (unsigned int i = 0; i < 4; ++i)
2837 base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
2838 }
2839
2844 double
2845 sum() const
2846 {
2848 t1.data = _mm_add_pd(this->get_lower(), this->get_upper());
2849 return t1.sum();
2850 }
2851
2857 __m256d data;
2858
2859private:
2864 __m128d
2865 get_lower() const
2866 {
2867 return _mm256_castpd256_pd128(data);
2868 }
2869
2874 __m128d
2875 get_upper() const
2876 {
2877 return _mm256_extractf128_pd(data, 1);
2878 }
2879
2886 get_sqrt() const
2887 {
2888 VectorizedArray res;
2889 res.data = _mm256_sqrt_pd(data);
2890 return res;
2891 }
2892
2899 get_abs() const
2900 {
2901 // to compute the absolute value, perform bitwise andnot with -0. This
2902 // will leave all value and exponent bits unchanged but force the sign
2903 // value to +.
2904 __m256d mask = _mm256_set1_pd(-0.);
2905 VectorizedArray res;
2906 res.data = _mm256_andnot_pd(mask, data);
2907 return res;
2908 }
2909
2916 get_max(const VectorizedArray &other) const
2917 {
2918 VectorizedArray res;
2919 res.data = _mm256_max_pd(data, other.data);
2920 return res;
2921 }
2922
2929 get_min(const VectorizedArray &other) const
2930 {
2931 VectorizedArray res;
2932 res.data = _mm256_min_pd(data, other.data);
2933 return res;
2934 }
2935
2936 // Make a few functions friends.
2937 template <typename Number2, std::size_t width2>
2940 template <typename Number2, std::size_t width2>
2943 template <typename Number2, std::size_t width2>
2947 template <typename Number2, std::size_t width2>
2951};
2952
2953
2954
2958template <>
2959inline DEAL_II_ALWAYS_INLINE void
2960vectorized_load_and_transpose(const unsigned int n_entries,
2961 const double *in,
2962 const unsigned int *offsets,
2964{
2965 const unsigned int n_chunks = n_entries / 4;
2966 const double *in0 = in + offsets[0];
2967 const double *in1 = in + offsets[1];
2968 const double *in2 = in + offsets[2];
2969 const double *in3 = in + offsets[3];
2970
2971 for (unsigned int i = 0; i < n_chunks; ++i)
2972 {
2973 __m256d u0 = _mm256_loadu_pd(in0 + 4 * i);
2974 __m256d u1 = _mm256_loadu_pd(in1 + 4 * i);
2975 __m256d u2 = _mm256_loadu_pd(in2 + 4 * i);
2976 __m256d u3 = _mm256_loadu_pd(in3 + 4 * i);
2977 __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2978 __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2979 __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2980 __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2981 out[4 * i + 0].data = _mm256_unpacklo_pd(t0, t1);
2982 out[4 * i + 1].data = _mm256_unpackhi_pd(t0, t1);
2983 out[4 * i + 2].data = _mm256_unpacklo_pd(t2, t3);
2984 out[4 * i + 3].data = _mm256_unpackhi_pd(t2, t3);
2985 }
2986
2987 // remainder loop of work that does not divide by 4
2988 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2989 out[i].gather(in + i, offsets);
2990}
2991
2992
2993
2997template <>
2998inline DEAL_II_ALWAYS_INLINE void
2999vectorized_load_and_transpose(const unsigned int n_entries,
3000 const std::array<double *, 4> &in,
3002{
3003 // see the comments in the vectorized_load_and_transpose above
3004
3005 const unsigned int n_chunks = n_entries / 4;
3006 const double *in0 = in[0];
3007 const double *in1 = in[1];
3008 const double *in2 = in[2];
3009 const double *in3 = in[3];
3010
3011 for (unsigned int i = 0; i < n_chunks; ++i)
3012 {
3013 __m256d u0 = _mm256_loadu_pd(in0 + 4 * i);
3014 __m256d u1 = _mm256_loadu_pd(in1 + 4 * i);
3015 __m256d u2 = _mm256_loadu_pd(in2 + 4 * i);
3016 __m256d u3 = _mm256_loadu_pd(in3 + 4 * i);
3017 __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
3018 __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
3019 __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
3020 __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
3021 out[4 * i + 0].data = _mm256_unpacklo_pd(t0, t1);
3022 out[4 * i + 1].data = _mm256_unpackhi_pd(t0, t1);
3023 out[4 * i + 2].data = _mm256_unpacklo_pd(t2, t3);
3024 out[4 * i + 3].data = _mm256_unpackhi_pd(t2, t3);
3025 }
3026
3027 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3028 gather(out[i], in, i);
3029}
3030
3031
3032
3036template <>
3037inline DEAL_II_ALWAYS_INLINE void
3038vectorized_transpose_and_store(const bool add_into,
3039 const unsigned int n_entries,
3041 const unsigned int *offsets,
3042 double *out)
3043{
3044 const unsigned int n_chunks = n_entries / 4;
3045 double *out0 = out + offsets[0];
3046 double *out1 = out + offsets[1];
3047 double *out2 = out + offsets[2];
3048 double *out3 = out + offsets[3];
3049 for (unsigned int i = 0; i < n_chunks; ++i)
3050 {
3051 __m256d u0 = in[4 * i + 0].data;
3052 __m256d u1 = in[4 * i + 1].data;
3053 __m256d u2 = in[4 * i + 2].data;
3054 __m256d u3 = in[4 * i + 3].data;
3055 __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
3056 __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
3057 __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
3058 __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
3059 __m256d res0 = _mm256_unpacklo_pd(t0, t1);
3060 __m256d res1 = _mm256_unpackhi_pd(t0, t1);
3061 __m256d res2 = _mm256_unpacklo_pd(t2, t3);
3062 __m256d res3 = _mm256_unpackhi_pd(t2, t3);
3063
3064 // Cannot use the same store instructions in both paths of the 'if'
3065 // because the compiler cannot know that there is no aliasing between
3066 // pointers
3067 if (add_into)
3068 {
3069 res0 = _mm256_add_pd(_mm256_loadu_pd(out0 + 4 * i), res0);
3070 _mm256_storeu_pd(out0 + 4 * i, res0);
3071 res1 = _mm256_add_pd(_mm256_loadu_pd(out1 + 4 * i), res1);
3072 _mm256_storeu_pd(out1 + 4 * i, res1);
3073 res2 = _mm256_add_pd(_mm256_loadu_pd(out2 + 4 * i), res2);
3074 _mm256_storeu_pd(out2 + 4 * i, res2);
3075 res3 = _mm256_add_pd(_mm256_loadu_pd(out3 + 4 * i), res3);
3076 _mm256_storeu_pd(out3 + 4 * i, res3);
3077 }
3078 else
3079 {
3080 _mm256_storeu_pd(out0 + 4 * i, res0);
3081 _mm256_storeu_pd(out1 + 4 * i, res1);
3082 _mm256_storeu_pd(out2 + 4 * i, res2);
3083 _mm256_storeu_pd(out3 + 4 * i, res3);
3084 }
3085 }
3086
3087 // remainder loop of work that does not divide by 4
3088 if (add_into)
3089 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3090 for (unsigned int v = 0; v < 4; ++v)
3091 out[offsets[v] + i] += in[i][v];
3092 else
3093 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3094 for (unsigned int v = 0; v < 4; ++v)
3095 out[offsets[v] + i] = in[i][v];
3096}
3097
3098
3099
3103template <>
3104inline DEAL_II_ALWAYS_INLINE void
3105vectorized_transpose_and_store(const bool add_into,
3106 const unsigned int n_entries,
3108 std::array<double *, 4> &out)
3109{
3110 // see the comments in the vectorized_transpose_and_store above
3111
3112 const unsigned int n_chunks = n_entries / 4;
3113 double *out0 = out[0];
3114 double *out1 = out[1];
3115 double *out2 = out[2];
3116 double *out3 = out[3];
3117 for (unsigned int i = 0; i < n_chunks; ++i)
3118 {
3119 __m256d u0 = in[4 * i + 0].data;
3120 __m256d u1 = in[4 * i + 1].data;
3121 __m256d u2 = in[4 * i + 2].data;
3122 __m256d u3 = in[4 * i + 3].data;
3123 __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
3124 __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
3125 __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
3126 __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
3127 __m256d res0 = _mm256_unpacklo_pd(t0, t1);
3128 __m256d res1 = _mm256_unpackhi_pd(t0, t1);
3129 __m256d res2 = _mm256_unpacklo_pd(t2, t3);
3130 __m256d res3 = _mm256_unpackhi_pd(t2, t3);
3131
3132 // Cannot use the same store instructions in both paths of the 'if'
3133 // because the compiler cannot know that there is no aliasing between
3134 // pointers
3135 if (add_into)
3136 {
3137 res0 = _mm256_add_pd(_mm256_loadu_pd(out0 + 4 * i), res0);
3138 _mm256_storeu_pd(out0 + 4 * i, res0);
3139 res1 = _mm256_add_pd(_mm256_loadu_pd(out1 + 4 * i), res1);
3140 _mm256_storeu_pd(out1 + 4 * i, res1);
3141 res2 = _mm256_add_pd(_mm256_loadu_pd(out2 + 4 * i), res2);
3142 _mm256_storeu_pd(out2 + 4 * i, res2);
3143 res3 = _mm256_add_pd(_mm256_loadu_pd(out3 + 4 * i), res3);
3144 _mm256_storeu_pd(out3 + 4 * i, res3);
3145 }
3146 else
3147 {
3148 _mm256_storeu_pd(out0 + 4 * i, res0);
3149 _mm256_storeu_pd(out1 + 4 * i, res1);
3150 _mm256_storeu_pd(out2 + 4 * i, res2);
3151 _mm256_storeu_pd(out3 + 4 * i, res3);
3152 }
3153 }
3154
3155 // remainder loop of work that does not divide by 4
3156 if (add_into)
3157 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3158 for (unsigned int v = 0; v < 4; ++v)
3159 out[v][i] += in[i][v];
3160 else
3161 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3162 for (unsigned int v = 0; v < 4; ++v)
3163 out[v][i] = in[i][v];
3164}
3165
3166
3167
3171template <>
3172class VectorizedArray<float, 8>
3173 : public VectorizedArrayBase<VectorizedArray<float, 8>, 8>
3174{
3175public:
3179 using value_type = float;
3180
3185 static constexpr bool is_implemented = true;
3186
3191 VectorizedArray() = default;
3192
3196 VectorizedArray(const float scalar)
3197 {
3198 this->operator=(scalar);
3199 }
3200
3204 template <typename U>
3205 VectorizedArray(const std::initializer_list<U> &list)
3206 : VectorizedArrayBase<VectorizedArray<float, 8>, 8>(list)
3207 {}
3208
3214 operator=(const float x) &
3215 {
3216 data = _mm256_set1_ps(x);
3217 return *this;
3218 }
3219
3226 operator=(const float scalar) && = delete;
3227
3232 float &
3233 operator[](const unsigned int comp)
3234 {
3235 AssertIndexRange(comp, 8);
3236 return *(reinterpret_cast<float *>(&data) + comp);
3237 }
3238
3243 const float &
3244 operator[](const unsigned int comp) const
3245 {
3246 AssertIndexRange(comp, 8);
3247 return *(reinterpret_cast<const float *>(&data) + comp);
3248 }
3249
3255 operator+=(const VectorizedArray &vec)
3256 {
3257 // if the compiler supports vector arithmetic, we can simply use +=
3258 // operator on the given data type. this allows the compiler to combine
3259 // additions with multiplication (fused multiply-add) if those
3260 // instructions are available. Otherwise, we need to use the built-in
3261 // intrinsic command for __m256d
3262# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3263 data += vec.data;
3264# else
3265 data = _mm256_add_ps(data, vec.data);
3266# endif
3267 return *this;
3268 }
3269
3275 operator-=(const VectorizedArray &vec)
3276 {
3277# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3278 data -= vec.data;
3279# else
3280 data = _mm256_sub_ps(data, vec.data);
3281# endif
3282 return *this;
3283 }
3289 operator*=(const VectorizedArray &vec)
3290 {
3291# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3292 data *= vec.data;
3293# else
3294 data = _mm256_mul_ps(data, vec.data);
3295# endif
3296 return *this;
3297 }
3298
3304 operator/=(const VectorizedArray &vec)
3305 {
3306# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3307 data /= vec.data;
3308# else
3309 data = _mm256_div_ps(data, vec.data);
3310# endif
3311 return *this;
3312 }
3313
3320 void
3321 load(const float *ptr)
3322 {
3323 data = _mm256_loadu_ps(ptr);
3324 }
3325
3333 void
3334 store(float *ptr) const
3335 {
3336 _mm256_storeu_ps(ptr, data);
3337 }
3338
3344 void
3345 streaming_store(float *ptr) const
3346 {
3347 Assert(reinterpret_cast<std::size_t>(ptr) % 32 == 0,
3348 ExcMessage("Memory not aligned"));
3349 _mm256_stream_ps(ptr, data);
3350 }
3351
3365 void
3366 gather(const float *base_ptr, const unsigned int *offsets)
3367 {
3368# if defined(__AVX2__) && defined(DEAL_II_USE_VECTORIZATION_GATHER)
3369 // unfortunately, there does not appear to be a 256 bit integer load, so
3370 // do it by some reinterpret casts here. this is allowed because the Intel
3371 // API allows aliasing between different vector types.
3372 const __m256 index_val =
3373 _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
3374 const __m256i index = *reinterpret_cast<const __m256i *>(&index_val);
3375
3376 // work around a warning with gcc-12 about an uninitialized initial state
3377 // for gather by starting with a zero guess, even though all lanes will be
3378 // overwritten
3379 __m256 zero = _mm256_setzero_ps();
3380 __m256 mask = _mm256_cmp_ps(zero, zero, _CMP_EQ_OQ);
3381
3382 data = _mm256_mask_i32gather_ps(zero, base_ptr, index, mask, 4);
3383# else
3384 for (unsigned int i = 0; i < 8; ++i)
3385 *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
3386# endif
3387 }
3388
3402 void
3403 scatter(const unsigned int *offsets, float *base_ptr) const
3404 {
3405 // no scatter operation in AVX/AVX2
3406 for (unsigned int i = 0; i < 8; ++i)
3407 base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
3408 }
3409
3414 float
3415 sum() const
3416 {
3418 t1.data = _mm_add_ps(this->get_lower(), this->get_upper());
3419 return t1.sum();
3420 }
3421
3427 __m256 data;
3428
3429private:
3434 __m128
3435 get_lower() const
3436 {
3437 return _mm256_castps256_ps128(data);
3438 }
3439
3444 __m128
3445 get_upper() const
3446 {
3447 return _mm256_extractf128_ps(data, 1);
3448 }
3449
3456 get_sqrt() const
3457 {
3458 VectorizedArray res;
3459 res.data = _mm256_sqrt_ps(data);
3460 return res;
3461 }
3462
3469 get_abs() const
3470 {
3471 // to compute the absolute value, perform bitwise andnot with -0. This
3472 // will leave all value and exponent bits unchanged but force the sign
3473 // value to +.
3474 __m256 mask = _mm256_set1_ps(-0.f);
3475 VectorizedArray res;
3476 res.data = _mm256_andnot_ps(mask, data);
3477 return res;
3478 }
3479
3486 get_max(const VectorizedArray &other) const
3487 {
3488 VectorizedArray res;
3489 res.data = _mm256_max_ps(data, other.data);
3490 return res;
3491 }
3492
3499 get_min(const VectorizedArray &other) const
3500 {
3501 VectorizedArray res;
3502 res.data = _mm256_min_ps(data, other.data);
3503 return res;
3504 }
3505
3506 // Make a few functions friends.
3507 template <typename Number2, std::size_t width2>
3510 template <typename Number2, std::size_t width2>
3513 template <typename Number2, std::size_t width2>
3517 template <typename Number2, std::size_t width2>
3521};
3522
3523
3524
3528template <>
3529inline DEAL_II_ALWAYS_INLINE void
3530vectorized_load_and_transpose(const unsigned int n_entries,
3531 const float *in,
3532 const unsigned int *offsets,
3534{
3535 const unsigned int n_chunks = n_entries / 4;
3536 for (unsigned int i = 0; i < n_chunks; ++i)
3537 {
3538 // To avoid warnings about uninitialized variables, need to initialize
3539 // one variable with zero before using it.
3540 __m256 t0, t1, t2, t3 = {};
3541 t0 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[0]), 0);
3542 t0 = _mm256_insertf128_ps(t0, _mm_loadu_ps(in + 4 * i + offsets[4]), 1);
3543 t1 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[1]), 0);
3544 t1 = _mm256_insertf128_ps(t1, _mm_loadu_ps(in + 4 * i + offsets[5]), 1);
3545 t2 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[2]), 0);
3546 t2 = _mm256_insertf128_ps(t2, _mm_loadu_ps(in + 4 * i + offsets[6]), 1);
3547 t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[3]), 0);
3548 t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[7]), 1);
3549
3550 __m256 v0 = _mm256_shuffle_ps(t0, t1, 0x44);
3551 __m256 v1 = _mm256_shuffle_ps(t0, t1, 0xee);
3552 __m256 v2 = _mm256_shuffle_ps(t2, t3, 0x44);
3553 __m256 v3 = _mm256_shuffle_ps(t2, t3, 0xee);
3554 out[4 * i + 0].data = _mm256_shuffle_ps(v0, v2, 0x88);
3555 out[4 * i + 1].data = _mm256_shuffle_ps(v0, v2, 0xdd);
3556 out[4 * i + 2].data = _mm256_shuffle_ps(v1, v3, 0x88);
3557 out[4 * i + 3].data = _mm256_shuffle_ps(v1, v3, 0xdd);
3558 }
3559
3560 // remainder loop of work that does not divide by 4
3561 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3562 out[i].gather(in + i, offsets);
3563}
3564
3565
3566
3570template <>
3571inline DEAL_II_ALWAYS_INLINE void
3572vectorized_load_and_transpose(const unsigned int n_entries,
3573 const std::array<float *, 8> &in,
3575{
3576 // see the comments in the vectorized_load_and_transpose above
3577
3578 const unsigned int n_chunks = n_entries / 4;
3579 for (unsigned int i = 0; i < n_chunks; ++i)
3580 {
3581 __m256 t0, t1, t2, t3 = {};
3582 t0 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[0] + 4 * i), 0);
3583 t0 = _mm256_insertf128_ps(t0, _mm_loadu_ps(in[4] + 4 * i), 1);
3584 t1 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[1] + 4 * i), 0);
3585 t1 = _mm256_insertf128_ps(t1, _mm_loadu_ps(in[5] + 4 * i), 1);
3586 t2 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[2] + 4 * i), 0);
3587 t2 = _mm256_insertf128_ps(t2, _mm_loadu_ps(in[6] + 4 * i), 1);
3588 t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[3] + 4 * i), 0);
3589 t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[7] + 4 * i), 1);
3590
3591 __m256 v0 = _mm256_shuffle_ps(t0, t1, 0x44);
3592 __m256 v1 = _mm256_shuffle_ps(t0, t1, 0xee);
3593 __m256 v2 = _mm256_shuffle_ps(t2, t3, 0x44);
3594 __m256 v3 = _mm256_shuffle_ps(t2, t3, 0xee);
3595 out[4 * i + 0].data = _mm256_shuffle_ps(v0, v2, 0x88);
3596 out[4 * i + 1].data = _mm256_shuffle_ps(v0, v2, 0xdd);
3597 out[4 * i + 2].data = _mm256_shuffle_ps(v1, v3, 0x88);
3598 out[4 * i + 3].data = _mm256_shuffle_ps(v1, v3, 0xdd);
3599 }
3600
3601 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3602 gather(out[i], in, i);
3603}
3604
3605
3606
3610template <>
3611inline DEAL_II_ALWAYS_INLINE void
3612vectorized_transpose_and_store(const bool add_into,
3613 const unsigned int n_entries,
3614 const VectorizedArray<float, 8> *in,
3615 const unsigned int *offsets,
3616 float *out)
3617{
3618 const unsigned int n_chunks = n_entries / 4;
3619 for (unsigned int i = 0; i < n_chunks; ++i)
3620 {
3621 __m256 u0 = in[4 * i + 0].data;
3622 __m256 u1 = in[4 * i + 1].data;
3623 __m256 u2 = in[4 * i + 2].data;
3624 __m256 u3 = in[4 * i + 3].data;
3625 __m256 t0 = _mm256_shuffle_ps(u0, u1, 0x44);
3626 __m256 t1 = _mm256_shuffle_ps(u0, u1, 0xee);
3627 __m256 t2 = _mm256_shuffle_ps(u2, u3, 0x44);
3628 __m256 t3 = _mm256_shuffle_ps(u2, u3, 0xee);
3629 u0 = _mm256_shuffle_ps(t0, t2, 0x88);
3630 u1 = _mm256_shuffle_ps(t0, t2, 0xdd);
3631 u2 = _mm256_shuffle_ps(t1, t3, 0x88);
3632 u3 = _mm256_shuffle_ps(t1, t3, 0xdd);
3633 __m128 res0 = _mm256_extractf128_ps(u0, 0);
3634 __m128 res4 = _mm256_extractf128_ps(u0, 1);
3635 __m128 res1 = _mm256_extractf128_ps(u1, 0);
3636 __m128 res5 = _mm256_extractf128_ps(u1, 1);
3637 __m128 res2 = _mm256_extractf128_ps(u2, 0);
3638 __m128 res6 = _mm256_extractf128_ps(u2, 1);
3639 __m128 res3 = _mm256_extractf128_ps(u3, 0);
3640 __m128 res7 = _mm256_extractf128_ps(u3, 1);
3641
3642 // Cannot use the same store instructions in both paths of the 'if'
3643 // because the compiler cannot know that there is no aliasing between
3644 // pointers
3645 if (add_into)
3646 {
3647 res0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), res0);
3648 _mm_storeu_ps(out + 4 * i + offsets[0], res0);
3649 res1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), res1);
3650 _mm_storeu_ps(out + 4 * i + offsets[1], res1);
3651 res2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), res2);
3652 _mm_storeu_ps(out + 4 * i + offsets[2], res2);
3653 res3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), res3);
3654 _mm_storeu_ps(out + 4 * i + offsets[3], res3);
3655 res4 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[4]), res4);
3656 _mm_storeu_ps(out + 4 * i + offsets[4], res4);
3657 res5 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[5]), res5);
3658 _mm_storeu_ps(out + 4 * i + offsets[5], res5);
3659 res6 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[6]), res6);
3660 _mm_storeu_ps(out + 4 * i + offsets[6], res6);
3661 res7 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[7]), res7);
3662 _mm_storeu_ps(out + 4 * i + offsets[7], res7);
3663 }
3664 else
3665 {
3666 _mm_storeu_ps(out + 4 * i + offsets[0], res0);
3667 _mm_storeu_ps(out + 4 * i + offsets[1], res1);
3668 _mm_storeu_ps(out + 4 * i + offsets[2], res2);
3669 _mm_storeu_ps(out + 4 * i + offsets[3], res3);
3670 _mm_storeu_ps(out + 4 * i + offsets[4], res4);
3671 _mm_storeu_ps(out + 4 * i + offsets[5], res5);
3672 _mm_storeu_ps(out + 4 * i + offsets[6], res6);
3673 _mm_storeu_ps(out + 4 * i + offsets[7], res7);
3674 }
3675 }
3676
3677 // remainder loop of work that does not divide by 4
3678 if (add_into)
3679 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3680 for (unsigned int v = 0; v < 8; ++v)
3681 out[offsets[v] + i] += in[i][v];
3682 else
3683 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3684 for (unsigned int v = 0; v < 8; ++v)
3685 out[offsets[v] + i] = in[i][v];
3686}
3687
3688
3689
3693template <>
3694inline DEAL_II_ALWAYS_INLINE void
3695vectorized_transpose_and_store(const bool add_into,
3696 const unsigned int n_entries,
3697 const VectorizedArray<float, 8> *in,
3698 std::array<float *, 8> &out)
3699{
3700 // see the comments in the vectorized_transpose_and_store above
3701
3702 const unsigned int n_chunks = n_entries / 4;
3703 for (unsigned int i = 0; i < n_chunks; ++i)
3704 {
3705 __m256 u0 = in[4 * i + 0].data;
3706 __m256 u1 = in[4 * i + 1].data;
3707 __m256 u2 = in[4 * i + 2].data;
3708 __m256 u3 = in[4 * i + 3].data;
3709 __m256 t0 = _mm256_shuffle_ps(u0, u1, 0x44);
3710 __m256 t1 = _mm256_shuffle_ps(u0, u1, 0xee);
3711 __m256 t2 = _mm256_shuffle_ps(u2, u3, 0x44);
3712 __m256 t3 = _mm256_shuffle_ps(u2, u3, 0xee);
3713 u0 = _mm256_shuffle_ps(t0, t2, 0x88);
3714 u1 = _mm256_shuffle_ps(t0, t2, 0xdd);
3715 u2 = _mm256_shuffle_ps(t1, t3, 0x88);
3716 u3 = _mm256_shuffle_ps(t1, t3, 0xdd);
3717 __m128 res0 = _mm256_extractf128_ps(u0, 0);
3718 __m128 res4 = _mm256_extractf128_ps(u0, 1);
3719 __m128 res1 = _mm256_extractf128_ps(u1, 0);
3720 __m128 res5 = _mm256_extractf128_ps(u1, 1);
3721 __m128 res2 = _mm256_extractf128_ps(u2, 0);
3722 __m128 res6 = _mm256_extractf128_ps(u2, 1);
3723 __m128 res3 = _mm256_extractf128_ps(u3, 0);
3724 __m128 res7 = _mm256_extractf128_ps(u3, 1);
3725
3726 if (add_into)
3727 {
3728 res0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), res0);
3729 _mm_storeu_ps(out[0] + 4 * i, res0);
3730 res1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), res1);
3731 _mm_storeu_ps(out[1] + 4 * i, res1);
3732 res2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), res2);
3733 _mm_storeu_ps(out[2] + 4 * i, res2);
3734 res3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), res3);
3735 _mm_storeu_ps(out[3] + 4 * i, res3);
3736 res4 = _mm_add_ps(_mm_loadu_ps(out[4] + 4 * i), res4);
3737 _mm_storeu_ps(out[4] + 4 * i, res4);
3738 res5 = _mm_add_ps(_mm_loadu_ps(out[5] + 4 * i), res5);
3739 _mm_storeu_ps(out[5] + 4 * i, res5);
3740 res6 = _mm_add_ps(_mm_loadu_ps(out[6] + 4 * i), res6);
3741 _mm_storeu_ps(out[6] + 4 * i, res6);
3742 res7 = _mm_add_ps(_mm_loadu_ps(out[7] + 4 * i), res7);
3743 _mm_storeu_ps(out[7] + 4 * i, res7);
3744 }
3745 else
3746 {
3747 _mm_storeu_ps(out[0] + 4 * i, res0);
3748 _mm_storeu_ps(out[1] + 4 * i, res1);
3749 _mm_storeu_ps(out[2] + 4 * i, res2);
3750 _mm_storeu_ps(out[3] + 4 * i, res3);
3751 _mm_storeu_ps(out[4] + 4 * i, res4);
3752 _mm_storeu_ps(out[5] + 4 * i, res5);
3753 _mm_storeu_ps(out[6] + 4 * i, res6);
3754 _mm_storeu_ps(out[7] + 4 * i, res7);
3755 }
3756 }
3757
3758 if (add_into)
3759 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3760 for (unsigned int v = 0; v < 8; ++v)
3761 out[v][i] += in[i][v];
3762 else
3763 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3764 for (unsigned int v = 0; v < 8; ++v)
3765 out[v][i] = in[i][v];
3766}
3767
3768# endif
3769
3770// for safety, also check that __AVX512F__ is defined in case the user manually
3771// set some conflicting compile flags which prevent compilation
3772
3773# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && defined(__AVX512F__)
3774
3778template <>
3779class VectorizedArray<double, 8>
3780 : public VectorizedArrayBase<VectorizedArray<double, 8>, 8>
3781{
3782public:
3786 using value_type = double;
3787
3792 static constexpr bool is_implemented = true;
3793
3798 VectorizedArray() = default;
3799
3803 VectorizedArray(const double scalar)
3804 {
3805 this->operator=(scalar);
3806 }
3807
3811 template <typename U>
3812 VectorizedArray(const std::initializer_list<U> &list)
3813 : VectorizedArrayBase<VectorizedArray<double, 8>, 8>(list)
3814 {}
3815
3821 operator=(const double x) &
3822 {
3823 data = _mm512_set1_pd(x);
3824 return *this;
3825 }
3826
3827
3834 operator=(const double scalar) && = delete;
3835
3840 double &
3841 operator[](const unsigned int comp)
3842 {
3843 AssertIndexRange(comp, 8);
3844 return *(reinterpret_cast<double *>(&data) + comp);
3845 }
3846
3851 const double &
3852 operator[](const unsigned int comp) const
3853 {
3854 AssertIndexRange(comp, 8);
3855 return *(reinterpret_cast<const double *>(&data) + comp);
3856 }
3857
3863 operator+=(const VectorizedArray &vec)
3864 {
3865 // if the compiler supports vector arithmetic, we can simply use +=
3866 // operator on the given data type. this allows the compiler to combine
3867 // additions with multiplication (fused multiply-add) if those
3868 // instructions are available. Otherwise, we need to use the built-in
3869 // intrinsic command for __m512d
3870# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3871 data += vec.data;
3872# else
3873 data = _mm512_add_pd(data, vec.data);
3874# endif
3875 return *this;
3876 }
3877
3883 operator-=(const VectorizedArray &vec)
3884 {
3885# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3886 data -= vec.data;
3887# else
3888 data = _mm512_sub_pd(data, vec.data);
3889# endif
3890 return *this;
3891 }
3897 operator*=(const VectorizedArray &vec)
3898 {
3899# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3900 data *= vec.data;
3901# else
3902 data = _mm512_mul_pd(data, vec.data);
3903# endif
3904 return *this;
3905 }
3906
3912 operator/=(const VectorizedArray &vec)
3913 {
3914# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3915 data /= vec.data;
3916# else
3917 data = _mm512_div_pd(data, vec.data);
3918# endif
3919 return *this;
3920 }
3921
3928 void
3929 load(const double *ptr)
3930 {
3931 data = _mm512_loadu_pd(ptr);
3932 }
3933
3935 void
3936 load(const float *ptr)
3937 {
3938 data = _mm512_cvtps_pd(_mm256_loadu_ps(ptr));
3939 }
3940
3948 void
3949 store(double *ptr) const
3950 {
3951 _mm512_storeu_pd(ptr, data);
3952 }
3953
3955 void
3956 store(float *ptr) const
3957 {
3958 _mm256_storeu_ps(ptr, _mm512_cvtpd_ps(data));
3959 }
3960
3966 void
3967 streaming_store(double *ptr) const
3968 {
3969 Assert(reinterpret_cast<std::size_t>(ptr) % 64 == 0,
3970 ExcMessage("Memory not aligned"));
3971 _mm512_stream_pd(ptr, data);
3972 }
3973
3987 void
3988 gather(const double *base_ptr, const unsigned int *offsets)
3989 {
3990# ifdef DEAL_II_USE_VECTORIZATION_GATHER
3991 // unfortunately, there does not appear to be a 256 bit integer load, so
3992 // do it by some reinterpret casts here. this is allowed because the Intel
3993 // API allows aliasing between different vector types.
3994 const __m256 index_val =
3995 _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
3996 const __m256i index = *reinterpret_cast<const __m256i *>(&index_val);
3997
3998 // work around a warning with gcc-12 about an uninitialized initial state
3999 // for gather by starting with a zero guess, even though all lanes will be
4000 // overwritten
4001 __m512d zero = {};
4002 __mmask8 mask = 0xFF;
4003
4004 data = _mm512_mask_i32gather_pd(zero, mask, index, base_ptr, 8);
4005# else
4006 for (unsigned int i = 0; i < 8; ++i)
4007 *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
4008# endif
4009 }
4010
4024 void
4025 scatter(const unsigned int *offsets, double *base_ptr) const
4026 {
4027# ifdef DEAL_II_USE_VECTORIZATION_GATHER
4028 for (unsigned int i = 0; i < 8; ++i)
4029 for (unsigned int j = i + 1; j < 8; ++j)
4030 Assert(offsets[i] != offsets[j],
4031 ExcMessage("Result of scatter undefined if two offset elements"
4032 " point to the same position"));
4033
4034 // unfortunately, there does not appear to be a 256 bit integer load, so
4035 // do it by some reinterpret casts here. this is allowed because the Intel
4036 // API allows aliasing between different vector types.
4037 const __m256 index_val =
4038 _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
4039 const __m256i index = *reinterpret_cast<const __m256i *>(&index_val);
4040 _mm512_i32scatter_pd(base_ptr, index, data, 8);
4041# else
4042 for (unsigned int i = 0; i < 8; ++i)
4043 base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
4044# endif
4045 }
4046
4051 double
4052 sum() const
4053 {
4055 t1.data = _mm256_add_pd(this->get_lower(), this->get_upper());
4056 return t1.sum();
4057 }
4058
4064 __m512d data;
4065
4066private:
4071 __m256d
4072 get_lower() const
4073 {
4074 return _mm512_castpd512_pd256(data);
4075 }
4076
4081 __m256d
4082 get_upper() const
4083 {
4084 return _mm512_extractf64x4_pd(data, 1);
4085 }
4086
4093 get_sqrt() const
4094 {
4095 VectorizedArray res;
4096 res.data = _mm512_sqrt_pd(data);
4097 return res;
4098 }
4099
4106 get_abs() const
4107 {
4108 // to compute the absolute value, perform bitwise andnot with -0. This
4109 // will leave all value and exponent bits unchanged but force the sign
4110 // value to +. Since there is no andnot for AVX512, we interpret the data
4111 // as 64 bit integers and do the andnot on those types (note that andnot
4112 // is a bitwise operation so the data type does not matter)
4113 __m512d mask = _mm512_set1_pd(-0.);
4114 VectorizedArray res;
4115 res.data = reinterpret_cast<__m512d>(
4116 _mm512_andnot_epi64(reinterpret_cast<__m512i>(mask),
4117 reinterpret_cast<__m512i>(data)));
4118 return res;
4119 }
4120
4127 get_max(const VectorizedArray &other) const
4128 {
4129 VectorizedArray res;
4130 res.data = _mm512_max_pd(data, other.data);
4131 return res;
4132 }
4133
4140 get_min(const VectorizedArray &other) const
4141 {
4142 VectorizedArray res;
4143 res.data = _mm512_min_pd(data, other.data);
4144 return res;
4145 }
4146
4147 // Make a few functions friends.
4148 template <typename Number2, std::size_t width2>
4151 template <typename Number2, std::size_t width2>
4154 template <typename Number2, std::size_t width2>
4158 template <typename Number2, std::size_t width2>
4162};
4163
4164
4165
4169template <>
4170inline DEAL_II_ALWAYS_INLINE void
4171vectorized_load_and_transpose(const unsigned int n_entries,
4172 const double *in,
4173 const unsigned int *offsets,
4175{
4176 // do not do full transpose because the code is long and will most
4177 // likely not pay off because many processors have two load units
4178 // (for the top 8 instructions) but only 1 permute unit (for the 8
4179 // shuffle/unpack instructions). rather start the transposition on the
4180 // vectorized array of half the size with 256 bits
4181 const unsigned int n_chunks = n_entries / 4;
4182 for (unsigned int i = 0; i < n_chunks; ++i)
4183 {
4184 __m512d t0, t1, t2, t3 = {};
4185
4186 t0 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[0] + 4 * i), 0);
4187 t0 = _mm512_insertf64x4(t0, _mm256_loadu_pd(in + offsets[2] + 4 * i), 1);
4188 t1 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[1] + 4 * i), 0);
4189 t1 = _mm512_insertf64x4(t1, _mm256_loadu_pd(in + offsets[3] + 4 * i), 1);
4190 t2 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[4] + 4 * i), 0);
4191 t2 = _mm512_insertf64x4(t2, _mm256_loadu_pd(in + offsets[6] + 4 * i), 1);
4192 t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[5] + 4 * i), 0);
4193 t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[7] + 4 * i), 1);
4194
4195 __m512d v0 = _mm512_shuffle_f64x2(t0, t2, 0x88);
4196 __m512d v1 = _mm512_shuffle_f64x2(t0, t2, 0xdd);
4197 __m512d v2 = _mm512_shuffle_f64x2(t1, t3, 0x88);
4198 __m512d v3 = _mm512_shuffle_f64x2(t1, t3, 0xdd);
4199 out[4 * i + 0].data = _mm512_unpacklo_pd(v0, v2);
4200 out[4 * i + 1].data = _mm512_unpackhi_pd(v0, v2);
4201 out[4 * i + 2].data = _mm512_unpacklo_pd(v1, v3);
4202 out[4 * i + 3].data = _mm512_unpackhi_pd(v1, v3);
4203 }
4204 // remainder loop of work that does not divide by 4
4205 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4206 out[i].gather(in + i, offsets);
4207}
4208
4209
4210
4214template <>
4215inline DEAL_II_ALWAYS_INLINE void
4216vectorized_load_and_transpose(const unsigned int n_entries,
4217 const std::array<double *, 8> &in,
4219{
4220 const unsigned int n_chunks = n_entries / 4;
4221 for (unsigned int i = 0; i < n_chunks; ++i)
4222 {
4223 __m512d t0, t1, t2, t3 = {};
4224
4225 t0 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[0] + 4 * i), 0);
4226 t0 = _mm512_insertf64x4(t0, _mm256_loadu_pd(in[2] + 4 * i), 1);
4227 t1 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[1] + 4 * i), 0);
4228 t1 = _mm512_insertf64x4(t1, _mm256_loadu_pd(in[3] + 4 * i), 1);
4229 t2 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[4] + 4 * i), 0);
4230 t2 = _mm512_insertf64x4(t2, _mm256_loadu_pd(in[6] + 4 * i), 1);
4231 t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[5] + 4 * i), 0);
4232 t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[7] + 4 * i), 1);
4233
4234 __m512d v0 = _mm512_shuffle_f64x2(t0, t2, 0x88);
4235 __m512d v1 = _mm512_shuffle_f64x2(t0, t2, 0xdd);
4236 __m512d v2 = _mm512_shuffle_f64x2(t1, t3, 0x88);
4237 __m512d v3 = _mm512_shuffle_f64x2(t1, t3, 0xdd);
4238 out[4 * i + 0].data = _mm512_unpacklo_pd(v0, v2);
4239 out[4 * i + 1].data = _mm512_unpackhi_pd(v0, v2);
4240 out[4 * i + 2].data = _mm512_unpacklo_pd(v1, v3);
4241 out[4 * i + 3].data = _mm512_unpackhi_pd(v1, v3);
4242 }
4243
4244 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4245 gather(out[i], in, i);
4246}
4247
4248
4249
4253template <>
4254inline DEAL_II_ALWAYS_INLINE void
4255vectorized_transpose_and_store(const bool add_into,
4256 const unsigned int n_entries,
4258 const unsigned int *offsets,
4259 double *out)
4260{
4261 // as for the load, we split the store operations into 256 bit units to
4262 // better balance between code size, shuffle instructions, and stores
4263 const unsigned int n_chunks = n_entries / 4;
4264 __m512i mask1 = _mm512_set_epi64(0xd, 0xc, 0x5, 0x4, 0x9, 0x8, 0x1, 0x0);
4265 __m512i mask2 = _mm512_set_epi64(0xf, 0xe, 0x7, 0x6, 0xb, 0xa, 0x3, 0x2);
4266 for (unsigned int i = 0; i < n_chunks; ++i)
4267 {
4268 __m512d t0 = _mm512_unpacklo_pd(in[i * 4].data, in[i * 4 + 1].data);
4269 __m512d t1 = _mm512_unpackhi_pd(in[i * 4].data, in[i * 4 + 1].data);
4270 __m512d t2 = _mm512_unpacklo_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
4271 __m512d t3 = _mm512_unpackhi_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
4272 __m512d v0 = _mm512_permutex2var_pd(t0, mask1, t2);
4273 __m512d v1 = _mm512_permutex2var_pd(t0, mask2, t2);
4274 __m512d v2 = _mm512_permutex2var_pd(t1, mask1, t3);
4275 __m512d v3 = _mm512_permutex2var_pd(t1, mask2, t3);
4276 __m256d res0 = _mm512_extractf64x4_pd(v0, 0);
4277 __m256d res4 = _mm512_extractf64x4_pd(v0, 1);
4278 __m256d res1 = _mm512_extractf64x4_pd(v2, 0);
4279 __m256d res5 = _mm512_extractf64x4_pd(v2, 1);
4280 __m256d res2 = _mm512_extractf64x4_pd(v1, 0);
4281 __m256d res6 = _mm512_extractf64x4_pd(v1, 1);
4282 __m256d res3 = _mm512_extractf64x4_pd(v3, 0);
4283 __m256d res7 = _mm512_extractf64x4_pd(v3, 1);
4284
4285 // Cannot use the same store instructions in both paths of the 'if'
4286 // because the compiler cannot know that there is no aliasing
4287 // between pointers
4288 if (add_into)
4289 {
4290 res0 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[0]), res0);
4291 _mm256_storeu_pd(out + 4 * i + offsets[0], res0);
4292 res1 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[1]), res1);
4293 _mm256_storeu_pd(out + 4 * i + offsets[1], res1);
4294 res2 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[2]), res2);
4295 _mm256_storeu_pd(out + 4 * i + offsets[2], res2);
4296 res3 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[3]), res3);
4297 _mm256_storeu_pd(out + 4 * i + offsets[3], res3);
4298 res4 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[4]), res4);
4299 _mm256_storeu_pd(out + 4 * i + offsets[4], res4);
4300 res5 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[5]), res5);
4301 _mm256_storeu_pd(out + 4 * i + offsets[5], res5);
4302 res6 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[6]), res6);
4303 _mm256_storeu_pd(out + 4 * i + offsets[6], res6);
4304 res7 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[7]), res7);
4305 _mm256_storeu_pd(out + 4 * i + offsets[7], res7);
4306 }
4307 else
4308 {
4309 _mm256_storeu_pd(out + 4 * i + offsets[0], res0);
4310 _mm256_storeu_pd(out + 4 * i + offsets[1], res1);
4311 _mm256_storeu_pd(out + 4 * i + offsets[2], res2);
4312 _mm256_storeu_pd(out + 4 * i + offsets[3], res3);
4313 _mm256_storeu_pd(out + 4 * i + offsets[4], res4);
4314 _mm256_storeu_pd(out + 4 * i + offsets[5], res5);
4315 _mm256_storeu_pd(out + 4 * i + offsets[6], res6);
4316 _mm256_storeu_pd(out + 4 * i + offsets[7], res7);
4317 }
4318 }
4319
4320 // remainder loop of work that does not divide by 4
4321 if (add_into)
4322 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4323 for (unsigned int v = 0; v < 8; ++v)
4324 out[offsets[v] + i] += in[i][v];
4325 else
4326 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4327 for (unsigned int v = 0; v < 8; ++v)
4328 out[offsets[v] + i] = in[i][v];
4329}
4330
4331
4332
4336template <>
4337inline DEAL_II_ALWAYS_INLINE void
4338vectorized_transpose_and_store(const bool add_into,
4339 const unsigned int n_entries,
4341 std::array<double *, 8> &out)
4342{
4343 // see the comments in the vectorized_transpose_and_store above
4344
4345 const unsigned int n_chunks = n_entries / 4;
4346 __m512i mask1 = _mm512_set_epi64(0xd, 0xc, 0x5, 0x4, 0x9, 0x8, 0x1, 0x0);
4347 __m512i mask2 = _mm512_set_epi64(0xf, 0xe, 0x7, 0x6, 0xb, 0xa, 0x3, 0x2);
4348 for (unsigned int i = 0; i < n_chunks; ++i)
4349 {
4350 __m512d t0 = _mm512_unpacklo_pd(in[i * 4].data, in[i * 4 + 1].data);
4351 __m512d t1 = _mm512_unpackhi_pd(in[i * 4].data, in[i * 4 + 1].data);
4352 __m512d t2 = _mm512_unpacklo_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
4353 __m512d t3 = _mm512_unpackhi_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
4354 __m512d v0 = _mm512_permutex2var_pd(t0, mask1, t2);
4355 __m512d v1 = _mm512_permutex2var_pd(t0, mask2, t2);
4356 __m512d v2 = _mm512_permutex2var_pd(t1, mask1, t3);
4357 __m512d v3 = _mm512_permutex2var_pd(t1, mask2, t3);
4358 __m256d res0 = _mm512_extractf64x4_pd(v0, 0);
4359 __m256d res4 = _mm512_extractf64x4_pd(v0, 1);
4360 __m256d res1 = _mm512_extractf64x4_pd(v2, 0);
4361 __m256d res5 = _mm512_extractf64x4_pd(v2, 1);
4362 __m256d res2 = _mm512_extractf64x4_pd(v1, 0);
4363 __m256d res6 = _mm512_extractf64x4_pd(v1, 1);
4364 __m256d res3 = _mm512_extractf64x4_pd(v3, 0);
4365 __m256d res7 = _mm512_extractf64x4_pd(v3, 1);
4366
4367 if (add_into)
4368 {
4369 res0 = _mm256_add_pd(_mm256_loadu_pd(out[0] + 4 * i), res0);
4370 _mm256_storeu_pd(out[0] + 4 * i, res0);
4371 res1 = _mm256_add_pd(_mm256_loadu_pd(out[1] + 4 * i), res1);
4372 _mm256_storeu_pd(out[1] + 4 * i, res1);
4373 res2 = _mm256_add_pd(_mm256_loadu_pd(out[2] + 4 * i), res2);
4374 _mm256_storeu_pd(out[2] + 4 * i, res2);
4375 res3 = _mm256_add_pd(_mm256_loadu_pd(out[3] + 4 * i), res3);
4376 _mm256_storeu_pd(out[3] + 4 * i, res3);
4377 res4 = _mm256_add_pd(_mm256_loadu_pd(out[4] + 4 * i), res4);
4378 _mm256_storeu_pd(out[4] + 4 * i, res4);
4379 res5 = _mm256_add_pd(_mm256_loadu_pd(out[5] + 4 * i), res5);
4380 _mm256_storeu_pd(out[5] + 4 * i, res5);
4381 res6 = _mm256_add_pd(_mm256_loadu_pd(out[6] + 4 * i), res6);
4382 _mm256_storeu_pd(out[6] + 4 * i, res6);
4383 res7 = _mm256_add_pd(_mm256_loadu_pd(out[7] + 4 * i), res7);
4384 _mm256_storeu_pd(out[7] + 4 * i, res7);
4385 }
4386 else
4387 {
4388 _mm256_storeu_pd(out[0] + 4 * i, res0);
4389 _mm256_storeu_pd(out[1] + 4 * i, res1);
4390 _mm256_storeu_pd(out[2] + 4 * i, res2);
4391 _mm256_storeu_pd(out[3] + 4 * i, res3);
4392 _mm256_storeu_pd(out[4] + 4 * i, res4);
4393 _mm256_storeu_pd(out[5] + 4 * i, res5);
4394 _mm256_storeu_pd(out[6] + 4 * i, res6);
4395 _mm256_storeu_pd(out[7] + 4 * i, res7);
4396 }
4397 }
4398
4399 if (add_into)
4400 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4401 for (unsigned int v = 0; v < 8; ++v)
4402 out[v][i] += in[i][v];
4403 else
4404 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4405 for (unsigned int v = 0; v < 8; ++v)
4406 out[v][i] = in[i][v];
4407}
4408
4409
4410
4414template <>
4415class VectorizedArray<float, 16>
4416 : public VectorizedArrayBase<VectorizedArray<float, 16>, 16>
4417{
4418public:
4422 using value_type = float;
4423
4428 static constexpr bool is_implemented = true;
4429
4434 VectorizedArray() = default;
4435
4439 VectorizedArray(const float scalar)
4440 {
4441 this->operator=(scalar);
4442 }
4443
4447 template <typename U>
4448 VectorizedArray(const std::initializer_list<U> &list)
4449 : VectorizedArrayBase<VectorizedArray<float, 16>, 16>(list)
4450 {}
4451
4457 operator=(const float x) &
4458 {
4459 data = _mm512_set1_ps(x);
4460 return *this;
4461 }
4462
4469 operator=(const float scalar) && = delete;
4470
4475 float &
4476 operator[](const unsigned int comp)
4477 {
4478 AssertIndexRange(comp, 16);
4479 return *(reinterpret_cast<float *>(&data) + comp);
4480 }
4481
4486 const float &
4487 operator[](const unsigned int comp) const
4488 {
4489 AssertIndexRange(comp, 16);
4490 return *(reinterpret_cast<const float *>(&data) + comp);
4491 }
4492
4498 operator+=(const VectorizedArray &vec)
4499 {
4500 // if the compiler supports vector arithmetic, we can simply use +=
4501 // operator on the given data type. this allows the compiler to combine
4502 // additions with multiplication (fused multiply-add) if those
4503 // instructions are available. Otherwise, we need to use the built-in
4504 // intrinsic command for __m512d
4505# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
4506 data += vec.data;
4507# else
4508 data = _mm512_add_ps(data, vec.data);
4509# endif
4510 return *this;
4511 }
4512
4518 operator-=(const VectorizedArray &vec)
4519 {
4520# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
4521 data -= vec.data;
4522# else
4523 data = _mm512_sub_ps(data, vec.data);
4524# endif
4525 return *this;
4526 }
4532 operator*=(const VectorizedArray &vec)
4533 {
4534# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
4535 data *= vec.data;
4536# else
4537 data = _mm512_mul_ps(data, vec.data);
4538# endif
4539 return *this;
4540 }
4541
4547 operator/=(const VectorizedArray &vec)
4548 {
4549# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
4550 data /= vec.data;
4551# else
4552 data = _mm512_div_ps(data, vec.data);
4553# endif
4554 return *this;
4555 }
4556
4563 void
4564 load(const float *ptr)
4565 {
4566 data = _mm512_loadu_ps(ptr);
4567 }
4568
4576 void
4577 store(float *ptr) const
4578 {
4579 _mm512_storeu_ps(ptr, data);
4580 }
4581
4587 void
4588 streaming_store(float *ptr) const
4589 {
4590 Assert(reinterpret_cast<std::size_t>(ptr) % 64 == 0,
4591 ExcMessage("Memory not aligned"));
4592 _mm512_stream_ps(ptr, data);
4593 }
4594
4608 void
4609 gather(const float *base_ptr, const unsigned int *offsets)
4610 {
4611# ifdef DEAL_II_USE_VECTORIZATION_GATHER
4612 // unfortunately, there does not appear to be a 512 bit integer load, so
4613 // do it by some reinterpret casts here. this is allowed because the Intel
4614 // API allows aliasing between different vector types.
4615 const __m512 index_val =
4616 _mm512_loadu_ps(reinterpret_cast<const float *>(offsets));
4617 const __m512i index = *reinterpret_cast<const __m512i *>(&index_val);
4618
4619 // work around a warning with gcc-12 about an uninitialized initial state
4620 // for gather by starting with a zero guess, even though all lanes will be
4621 // overwritten
4622 __m512 zero = {};
4623 __mmask16 mask = 0xFFFF;
4624
4625 data = _mm512_mask_i32gather_ps(zero, mask, index, base_ptr, 4);
4626# else
4627 for (unsigned int i = 0; i < 16; ++i)
4628 *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
4629# endif
4630 }
4631
4645 void
4646 scatter(const unsigned int *offsets, float *base_ptr) const
4647 {
4648# ifdef DEAL_II_USE_VECTORIZATION_GATHER
4649 for (unsigned int i = 0; i < 16; ++i)
4650 for (unsigned int j = i + 1; j < 16; ++j)
4651 Assert(offsets[i] != offsets[j],
4652 ExcMessage("Result of scatter undefined if two offset elements"
4653 " point to the same position"));
4654
4655 // unfortunately, there does not appear to be a 512 bit integer load, so
4656 // do it by some reinterpret casts here. this is allowed because the Intel
4657 // API allows aliasing between different vector types.
4658 const __m512 index_val =
4659 _mm512_loadu_ps(reinterpret_cast<const float *>(offsets));
4660 const __m512i index = *reinterpret_cast<const __m512i *>(&index_val);
4661 _mm512_i32scatter_ps(base_ptr, index, data, 4);
4662# else
4663 for (unsigned int i = 0; i < 16; ++i)
4664 base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
4665# endif
4666 }
4667
4672 float
4673 sum() const
4674 {
4676 t1.data = _mm256_add_ps(this->get_lower(), this->get_upper());
4677 return t1.sum();
4678 }
4679
4685 __m512 data;
4686
4687private:
4692 __m256
4693 get_lower() const
4694 {
4695 return _mm512_castps512_ps256(data);
4696 }
4697
4702 __m256
4703 get_upper() const
4704 {
4705 return _mm256_castpd_ps(_mm512_extractf64x4_pd(_mm512_castps_pd(data), 1));
4706 }
4707
4714 get_sqrt() const
4715 {
4716 VectorizedArray res;
4717 res.data = _mm512_sqrt_ps(data);
4718 return res;
4719 }
4720
4727 get_abs() const
4728 {
4729 // to compute the absolute value, perform bitwise andnot with -0. This
4730 // will leave all value and exponent bits unchanged but force the sign
4731 // value to +. Since there is no andnot for AVX512, we interpret the data
4732 // as 32 bit integers and do the andnot on those types (note that andnot
4733 // is a bitwise operation so the data type does not matter)
4734 __m512 mask = _mm512_set1_ps(-0.f);
4735 VectorizedArray res;
4736 res.data = reinterpret_cast<__m512>(
4737 _mm512_andnot_epi32(reinterpret_cast<__m512i>(mask),
4738 reinterpret_cast<__m512i>(data)));
4739 return res;
4740 }
4741
4748 get_max(const VectorizedArray &other) const
4749 {
4750 VectorizedArray res;
4751 res.data = _mm512_max_ps(data, other.data);
4752 return res;
4753 }
4754
4761 get_min(const VectorizedArray &other) const
4762 {
4763 VectorizedArray res;
4764 res.data = _mm512_min_ps(data, other.data);
4765 return res;
4766 }
4767
4768 // Make a few functions friends.
4769 template <typename Number2, std::size_t width2>
4772 template <typename Number2, std::size_t width2>
4775 template <typename Number2, std::size_t width2>
4779 template <typename Number2, std::size_t width2>
4783};
4784
4785
4786
4790template <>
4791inline DEAL_II_ALWAYS_INLINE void
4792vectorized_load_and_transpose(const unsigned int n_entries,
4793 const float *in,
4794 const unsigned int *offsets,
4796{
4797 // Similar to the double case, we perform the work on smaller entities. In
4798 // this case, we start from 128 bit arrays and insert them into a full 512
4799 // bit index. This reduces the code size and register pressure because we do
4800 // shuffles on 4 numbers rather than 16.
4801 const unsigned int n_chunks = n_entries / 4;
4802
4803 // To avoid warnings about uninitialized variables, need to initialize one
4804 // variable to a pre-existing value in out, which will never get used in
4805 // the end. Keep the initialization outside the loop because of a bug in
4806 // gcc-9.1 which generates a "vmovapd" instruction instead of "vmovupd" in
4807 // case t3 is initialized to zero (inside/outside of loop), see
4808 // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90991
4809 __m512 t0, t1, t2, t3;
4810 if (n_chunks > 0)
4811 t3 = out[0].data;
4812 for (unsigned int i = 0; i < n_chunks; ++i)
4813 {
4814 t0 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[0] + 4 * i), 0);
4815 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[4] + 4 * i), 1);
4816 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[8] + 4 * i), 2);
4817 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[12] + 4 * i), 3);
4818 t1 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[1] + 4 * i), 0);
4819 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[5] + 4 * i), 1);
4820 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[9] + 4 * i), 2);
4821 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[13] + 4 * i), 3);
4822 t2 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[2] + 4 * i), 0);
4823 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[6] + 4 * i), 1);
4824 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[10] + 4 * i), 2);
4825 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[14] + 4 * i), 3);
4826 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[3] + 4 * i), 0);
4827 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[7] + 4 * i), 1);
4828 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[11] + 4 * i), 2);
4829 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[15] + 4 * i), 3);
4830
4831 __m512 v0 = _mm512_shuffle_ps(t0, t1, 0x44);
4832 __m512 v1 = _mm512_shuffle_ps(t0, t1, 0xee);
4833 __m512 v2 = _mm512_shuffle_ps(t2, t3, 0x44);
4834 __m512 v3 = _mm512_shuffle_ps(t2, t3, 0xee);
4835
4836 out[4 * i + 0].data = _mm512_shuffle_ps(v0, v2, 0x88);
4837 out[4 * i + 1].data = _mm512_shuffle_ps(v0, v2, 0xdd);
4838 out[4 * i + 2].data = _mm512_shuffle_ps(v1, v3, 0x88);
4839 out[4 * i + 3].data = _mm512_shuffle_ps(v1, v3, 0xdd);
4840 }
4841
4842 // remainder loop of work that does not divide by 4
4843 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4844 out[i].gather(in + i, offsets);
4845}
4846
4847
4848
4852template <>
4853inline DEAL_II_ALWAYS_INLINE void
4854vectorized_load_and_transpose(const unsigned int n_entries,
4855 const std::array<float *, 16> &in,
4857{
4858 // see the comments in the vectorized_load_and_transpose above
4859
4860 const unsigned int n_chunks = n_entries / 4;
4861
4862 __m512 t0, t1, t2, t3;
4863 if (n_chunks > 0)
4864 t3 = out[0].data;
4865 for (unsigned int i = 0; i < n_chunks; ++i)
4866 {
4867 t0 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[0] + 4 * i), 0);
4868 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[4] + 4 * i), 1);
4869 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[8] + 4 * i), 2);
4870 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[12] + 4 * i), 3);
4871 t1 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[1] + 4 * i), 0);
4872 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[5] + 4 * i), 1);
4873 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[9] + 4 * i), 2);
4874 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[13] + 4 * i), 3);
4875 t2 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[2] + 4 * i), 0);
4876 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[6] + 4 * i), 1);
4877 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[10] + 4 * i), 2);
4878 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[14] + 4 * i), 3);
4879 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[3] + 4 * i), 0);
4880 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[7] + 4 * i), 1);
4881 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[11] + 4 * i), 2);
4882 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[15] + 4 * i), 3);
4883
4884 __m512 v0 = _mm512_shuffle_ps(t0, t1, 0x44);
4885 __m512 v1 = _mm512_shuffle_ps(t0, t1, 0xee);
4886 __m512 v2 = _mm512_shuffle_ps(t2, t3, 0x44);
4887 __m512 v3 = _mm512_shuffle_ps(t2, t3, 0xee);
4888
4889 out[4 * i + 0].data = _mm512_shuffle_ps(v0, v2, 0x88);
4890 out[4 * i + 1].data = _mm512_shuffle_ps(v0, v2, 0xdd);
4891 out[4 * i + 2].data = _mm512_shuffle_ps(v1, v3, 0x88);
4892 out[4 * i + 3].data = _mm512_shuffle_ps(v1, v3, 0xdd);
4893 }
4894
4895 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4896 gather(out[i], in, i);
4897}
4898
4899
4900
4904template <>
4905inline DEAL_II_ALWAYS_INLINE void
4906vectorized_transpose_and_store(const bool add_into,
4907 const unsigned int n_entries,
4909 const unsigned int *offsets,
4910 float *out)
4911{
4912 const unsigned int n_chunks = n_entries / 4;
4913 for (unsigned int i = 0; i < n_chunks; ++i)
4914 {
4915 __m512 t0 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0x44);
4916 __m512 t1 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0xee);
4917 __m512 t2 =
4918 _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0x44);
4919 __m512 t3 =
4920 _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0xee);
4921 __m512 u0 = _mm512_shuffle_ps(t0, t2, 0x88);
4922 __m512 u1 = _mm512_shuffle_ps(t0, t2, 0xdd);
4923 __m512 u2 = _mm512_shuffle_ps(t1, t3, 0x88);
4924 __m512 u3 = _mm512_shuffle_ps(t1, t3, 0xdd);
4925
4926 __m128 res0 = _mm512_extractf32x4_ps(u0, 0);
4927 __m128 res4 = _mm512_extractf32x4_ps(u0, 1);
4928 __m128 res8 = _mm512_extractf32x4_ps(u0, 2);
4929 __m128 res12 = _mm512_extractf32x4_ps(u0, 3);
4930 __m128 res1 = _mm512_extractf32x4_ps(u1, 0);
4931 __m128 res5 = _mm512_extractf32x4_ps(u1, 1);
4932 __m128 res9 = _mm512_extractf32x4_ps(u1, 2);
4933 __m128 res13 = _mm512_extractf32x4_ps(u1, 3);
4934 __m128 res2 = _mm512_extractf32x4_ps(u2, 0);
4935 __m128 res6 = _mm512_extractf32x4_ps(u2, 1);
4936 __m128 res10 = _mm512_extractf32x4_ps(u2, 2);
4937 __m128 res14 = _mm512_extractf32x4_ps(u2, 3);
4938 __m128 res3 = _mm512_extractf32x4_ps(u3, 0);
4939 __m128 res7 = _mm512_extractf32x4_ps(u3, 1);
4940 __m128 res11 = _mm512_extractf32x4_ps(u3, 2);
4941 __m128 res15 = _mm512_extractf32x4_ps(u3, 3);
4942
4943 // Cannot use the same store instructions in both paths of the 'if'
4944 // because the compiler cannot know that there is no aliasing between
4945 // pointers
4946 if (add_into)
4947 {
4948 res0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), res0);
4949 _mm_storeu_ps(out + 4 * i + offsets[0], res0);
4950 res1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), res1);
4951 _mm_storeu_ps(out + 4 * i + offsets[1], res1);
4952 res2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), res2);
4953 _mm_storeu_ps(out + 4 * i + offsets[2], res2);
4954 res3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), res3);
4955 _mm_storeu_ps(out + 4 * i + offsets[3], res3);
4956 res4 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[4]), res4);
4957 _mm_storeu_ps(out + 4 * i + offsets[4], res4);
4958 res5 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[5]), res5);
4959 _mm_storeu_ps(out + 4 * i + offsets[5], res5);
4960 res6 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[6]), res6);
4961 _mm_storeu_ps(out + 4 * i + offsets[6], res6);
4962 res7 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[7]), res7);
4963 _mm_storeu_ps(out + 4 * i + offsets[7], res7);
4964 res8 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[8]), res8);
4965 _mm_storeu_ps(out + 4 * i + offsets[8], res8);
4966 res9 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[9]), res9);
4967 _mm_storeu_ps(out + 4 * i + offsets[9], res9);
4968 res10 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[10]), res10);
4969 _mm_storeu_ps(out + 4 * i + offsets[10], res10);
4970 res11 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[11]), res11);
4971 _mm_storeu_ps(out + 4 * i + offsets[11], res11);
4972 res12 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[12]), res12);
4973 _mm_storeu_ps(out + 4 * i + offsets[12], res12);
4974 res13 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[13]), res13);
4975 _mm_storeu_ps(out + 4 * i + offsets[13], res13);
4976 res14 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[14]), res14);
4977 _mm_storeu_ps(out + 4 * i + offsets[14], res14);
4978 res15 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[15]), res15);
4979 _mm_storeu_ps(out + 4 * i + offsets[15], res15);
4980 }
4981 else
4982 {
4983 _mm_storeu_ps(out + 4 * i + offsets[0], res0);
4984 _mm_storeu_ps(out + 4 * i + offsets[1], res1);
4985 _mm_storeu_ps(out + 4 * i + offsets[2], res2);
4986 _mm_storeu_ps(out + 4 * i + offsets[3], res3);
4987 _mm_storeu_ps(out + 4 * i + offsets[4], res4);
4988 _mm_storeu_ps(out + 4 * i + offsets[5], res5);
4989 _mm_storeu_ps(out + 4 * i + offsets[6], res6);
4990 _mm_storeu_ps(out + 4 * i + offsets[7], res7);
4991 _mm_storeu_ps(out + 4 * i + offsets[8], res8);
4992 _mm_storeu_ps(out + 4 * i + offsets[9], res9);
4993 _mm_storeu_ps(out + 4 * i + offsets[10], res10);
4994 _mm_storeu_ps(out + 4 * i + offsets[11], res11);
4995 _mm_storeu_ps(out + 4 * i + offsets[12], res12);
4996 _mm_storeu_ps(out + 4 * i + offsets[13], res13);
4997 _mm_storeu_ps(out + 4 * i + offsets[14], res14);
4998 _mm_storeu_ps(out + 4 * i + offsets[15], res15);
4999 }
5000 }
5001
5002 // remainder loop of work that does not divide by 4
5003 if (add_into)
5004 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
5005 for (unsigned int v = 0; v < 16; ++v)
5006 out[offsets[v] + i] += in[i][v];
5007 else
5008 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
5009 for (unsigned int v = 0; v < 16; ++v)
5010 out[offsets[v] + i] = in[i][v];
5011}
5012
5013
5014
5018template <>
5019inline DEAL_II_ALWAYS_INLINE void
5020vectorized_transpose_and_store(const bool add_into,
5021 const unsigned int n_entries,
5023 std::array<float *, 16> &out)
5024{
5025 // see the comments in the vectorized_transpose_and_store above
5026
5027 const unsigned int n_chunks = n_entries / 4;
5028 for (unsigned int i = 0; i < n_chunks; ++i)
5029 {
5030 __m512 t0 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0x44);
5031 __m512 t1 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0xee);
5032 __m512 t2 =
5033 _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0x44);
5034 __m512 t3 =
5035 _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0xee);
5036 __m512 u0 = _mm512_shuffle_ps(t0, t2, 0x88);
5037 __m512 u1 = _mm512_shuffle_ps(t0, t2, 0xdd);
5038 __m512 u2 = _mm512_shuffle_ps(t1, t3, 0x88);
5039 __m512 u3 = _mm512_shuffle_ps(t1, t3, 0xdd);
5040
5041 __m128 res0 = _mm512_extractf32x4_ps(u0, 0);
5042 __m128 res4 = _mm512_extractf32x4_ps(u0, 1);
5043 __m128 res8 = _mm512_extractf32x4_ps(u0, 2);
5044 __m128 res12 = _mm512_extractf32x4_ps(u0, 3);
5045 __m128 res1 = _mm512_extractf32x4_ps(u1, 0);
5046 __m128 res5 = _mm512_extractf32x4_ps(u1, 1);
5047 __m128 res9 = _mm512_extractf32x4_ps(u1, 2);
5048 __m128 res13 = _mm512_extractf32x4_ps(u1, 3);
5049 __m128 res2 = _mm512_extractf32x4_ps(u2, 0);
5050 __m128 res6 = _mm512_extractf32x4_ps(u2, 1);
5051 __m128 res10 = _mm512_extractf32x4_ps(u2, 2);
5052 __m128 res14 = _mm512_extractf32x4_ps(u2, 3);
5053 __m128 res3 = _mm512_extractf32x4_ps(u3, 0);
5054 __m128 res7 = _mm512_extractf32x4_ps(u3, 1);
5055 __m128 res11 = _mm512_extractf32x4_ps(u3, 2);
5056 __m128 res15 = _mm512_extractf32x4_ps(u3, 3);
5057
5058 if (add_into)
5059 {
5060 res0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), res0);
5061 _mm_storeu_ps(out[0] + 4 * i, res0);
5062 res1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), res1);
5063 _mm_storeu_ps(out[1] + 4 * i, res1);
5064 res2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), res2);
5065 _mm_storeu_ps(out[2] + 4 * i, res2);
5066 res3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), res3);
5067 _mm_storeu_ps(out[3] + 4 * i, res3);
5068 res4 = _mm_add_ps(_mm_loadu_ps(out[4] + 4 * i), res4);
5069 _mm_storeu_ps(out[4] + 4 * i, res4);
5070 res5 = _mm_add_ps(_mm_loadu_ps(out[5] + 4 * i), res5);
5071 _mm_storeu_ps(out[5] + 4 * i, res5);
5072 res6 = _mm_add_ps(_mm_loadu_ps(out[6] + 4 * i), res6);
5073 _mm_storeu_ps(out[6] + 4 * i, res6);
5074 res7 = _mm_add_ps(_mm_loadu_ps(out[7] + 4 * i), res7);
5075 _mm_storeu_ps(out[7] + 4 * i, res7);
5076 res8 = _mm_add_ps(_mm_loadu_ps(out[8] + 4 * i), res8);
5077 _mm_storeu_ps(out[8] + 4 * i, res8);
5078 res9 = _mm_add_ps(_mm_loadu_ps(out[9] + 4 * i), res9);
5079 _mm_storeu_ps(out[9] + 4 * i, res9);
5080 res10 = _mm_add_ps(_mm_loadu_ps(out[10] + 4 * i), res10);
5081 _mm_storeu_ps(out[10] + 4 * i, res10);
5082 res11 = _mm_add_ps(_mm_loadu_ps(out[11] + 4 * i), res11);
5083 _mm_storeu_ps(out[11] + 4 * i, res11);
5084 res12 = _mm_add_ps(_mm_loadu_ps(out[12] + 4 * i), res12);
5085 _mm_storeu_ps(out[12] + 4 * i, res12);
5086 res13 = _mm_add_ps(_mm_loadu_ps(out[13] + 4 * i), res13);
5087 _mm_storeu_ps(out[13] + 4 * i, res13);
5088 res14 = _mm_add_ps(_mm_loadu_ps(out[14] + 4 * i), res14);
5089 _mm_storeu_ps(out[14] + 4 * i, res14);
5090 res15 = _mm_add_ps(_mm_loadu_ps(out[15] + 4 * i), res15);
5091 _mm_storeu_ps(out[15] + 4 * i, res15);
5092 }
5093 else
5094 {
5095 _mm_storeu_ps(out[0] + 4 * i, res0);
5096 _mm_storeu_ps(out[1] + 4 * i, res1);
5097 _mm_storeu_ps(out[2] + 4 * i, res2);
5098 _mm_storeu_ps(out[3] + 4 * i, res3);
5099 _mm_storeu_ps(out[4] + 4 * i, res4);
5100 _mm_storeu_ps(out[5] + 4 * i, res5);
5101 _mm_storeu_ps(out[6] + 4 * i, res6);
5102 _mm_storeu_ps(out[7] + 4 * i, res7);
5103 _mm_storeu_ps(out[8] + 4 * i, res8);
5104 _mm_storeu_ps(out[9] + 4 * i, res9);
5105 _mm_storeu_ps(out[10] + 4 * i, res10);
5106 _mm_storeu_ps(out[11] + 4 * i, res11);
5107 _mm_storeu_ps(out[12] + 4 * i, res12);
5108 _mm_storeu_ps(out[13] + 4 * i, res13);
5109 _mm_storeu_ps(out[14] + 4 * i, res14);
5110 _mm_storeu_ps(out[15] + 4 * i, res15);
5111 }
5112 }
5113
5114 if (add_into)
5115 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
5116 for (unsigned int v = 0; v < 16; ++v)
5117 out[v][i] += in[i][v];
5118 else
5119 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
5120 for (unsigned int v = 0; v < 16; ++v)
5121 out[v][i] = in[i][v];
5122}
5123
5124# endif
5125
5126# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__ALTIVEC__) && \
5127 defined(__VSX__)
5128
5129template <>
5130class VectorizedArray<double, 2>
5131 : public VectorizedArrayBase<VectorizedArray<double, 2>, 2>
5132{
5133public:
5137 using value_type = double;
5138
5143 static constexpr bool is_implemented = true;
5144
5149 VectorizedArray() = default;
5150
5154 VectorizedArray(const double scalar)
5155 {
5156 this->operator=(scalar);
5157 }
5158
5162 template <typename U>
5163 VectorizedArray(const std::initializer_list<U> &list)
5164 : VectorizedArrayBase<VectorizedArray<double, 2>, 2>(list)
5165 {}
5166
5172 operator=(const double x) &
5173 {
5174 data = vec_splats(x);
5175
5176 // Some compilers believe that vec_splats sets 'x', but that's not true.
5177 // They then warn about setting a variable and not using it. Suppress the
5178 // warning by "using" the variable:
5179 (void)x;
5180 return *this;
5181 }
5182
5189 operator=(const double scalar) && = delete;
5190
5195 double &
5196 operator[](const unsigned int comp)
5197 {
5198 AssertIndexRange(comp, 2);
5199 return *(reinterpret_cast<double *>(&data) + comp);
5200 }
5201
5206 const double &
5207 operator[](const unsigned int comp) const
5208 {
5209 AssertIndexRange(comp, 2);
5210 return *(reinterpret_cast<const double *>(&data) + comp);
5211 }
5212
5218 operator+=(const VectorizedArray &vec)
5219 {
5220 data = vec_add(data, vec.data);
5221 return *this;
5222 }
5223
5229 operator-=(const VectorizedArray &vec)
5230 {
5231 data = vec_sub(data, vec.data);
5232 return *this;
5233 }
5234
5240 operator*=(const VectorizedArray &vec)
5241 {
5242 data = vec_mul(data, vec.data);
5243 return *this;
5244 }
5245
5251 operator/=(const VectorizedArray &vec)
5252 {
5253 data = vec_div(data, vec.data);
5254 return *this;
5255 }
5256
5262 void
5263 load(const double *ptr)
5264 {
5265 data = vec_vsx_ld(0, ptr);
5266 }
5267
5273 void
5274 store(double *ptr) const
5275 {
5276 vec_vsx_st(data, 0, ptr);
5277 }
5278
5283 void
5284 streaming_store(double *ptr) const
5285 {
5286 store(ptr);
5287 }
5288
5293 void
5294 gather(const double *base_ptr, const unsigned int *offsets)
5295 {
5296 for (unsigned int i = 0; i < 2; ++i)
5297 *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
5298 }
5299
5304 void
5305 scatter(const unsigned int *offsets, double *base_ptr) const
5306 {
5307 for (unsigned int i = 0; i < 2; ++i)
5308 base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
5309 }
5310
5316 __vector double data;
5317
5318private:
5325 get_sqrt() const
5326 {
5327 VectorizedArray res;
5328 res.data = vec_sqrt(data);
5329 return res;
5330 }
5331
5338 get_abs() const
5339 {
5340 VectorizedArray res;
5341 res.data = vec_abs(data);
5342 return res;
5343 }
5344
5351 get_max(const VectorizedArray &other) const
5352 {
5353 VectorizedArray res;
5354 res.data = vec_max(data, other.data);
5355 return res;
5356 }
5357
5364 get_min(const VectorizedArray &other) const
5365 {
5366 VectorizedArray res;
5367 res.data = vec_min(data, other.data);
5368 return res;
5369 }
5370
5371 // Make a few functions friends.
5372 template <typename Number2, std::size_t width2>
5375 template <typename Number2, std::size_t width2>
5378 template <typename Number2, std::size_t width2>
5382 template <typename Number2, std::size_t width2>
5386};
5387
5388
5389
5390template <>
5391class VectorizedArray<float, 4>
5392 : public VectorizedArrayBase<VectorizedArray<float, 4>, 4>
5393{
5394public:
5398 using value_type = float;
5399
5404 static constexpr bool is_implemented = true;
5405
5410 VectorizedArray() = default;
5411
5415 VectorizedArray(const float scalar)
5416 {
5417 this->operator=(scalar);
5418 }
5419
5423 template <typename U>
5424 VectorizedArray(const std::initializer_list<U> &list)
5425 : VectorizedArrayBase<VectorizedArray<float, 4>, 4>(list)
5426 {}
5427
5433 operator=(const float x) &
5434 {
5435 data = vec_splats(x);
5436
5437 // Some compilers believe that vec_splats sets 'x', but that's not true.
5438 // They then warn about setting a variable and not using it. Suppress the
5439 // warning by "using" the variable:
5440 (void)x;
5441 return *this;
5442 }
5443
5450 operator=(const float scalar) && = delete;
5451
5456 float &
5457 operator[](const unsigned int comp)
5458 {
5459 AssertIndexRange(comp, 4);
5460 return *(reinterpret_cast<float *>(&data) + comp);
5461 }
5462
5467 const float &
5468 operator[](const unsigned int comp) const
5469 {
5470 AssertIndexRange(comp, 4);
5471 return *(reinterpret_cast<const float *>(&data) + comp);
5472 }
5473
5479 operator+=(const VectorizedArray &vec)
5480 {
5481 data = vec_add(data, vec.data);
5482 return *this;
5483 }
5484
5490 operator-=(const VectorizedArray &vec)
5491 {
5492 data = vec_sub(data, vec.data);
5493 return *this;
5494 }
5495
5501 operator*=(const VectorizedArray &vec)
5502 {
5503 data = vec_mul(data, vec.data);
5504 return *this;
5505 }
5506
5512 operator/=(const VectorizedArray &vec)
5513 {
5514 data = vec_div(data, vec.data);
5515 return *this;
5516 }
5517
5523 void
5524 load(const float *ptr)
5525 {
5526 data = vec_vsx_ld(0, ptr);
5527 }
5528
5534 void
5535 store(float *ptr) const
5536 {
5537 vec_vsx_st(data, 0, ptr);
5538 }
5539
5544 void
5545 streaming_store(float *ptr) const
5546 {
5547 store(ptr);
5548 }
5549
5554 void
5555 gather(const float *base_ptr, const unsigned int *offsets)
5556 {
5557 for (unsigned int i = 0; i < 4; ++i)
5558 *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
5559 }
5560
5565 void
5566 scatter(const unsigned int *offsets, float *base_ptr) const
5567 {
5568 for (unsigned int i = 0; i < 4; ++i)
5569 base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
5570 }
5571
5577 __vector float data;
5578
5579private:
5586 get_sqrt() const
5587 {
5588 VectorizedArray res;
5589 res.data = vec_sqrt(data);
5590 return res;
5591 }
5592
5599 get_abs() const
5600 {
5601 VectorizedArray res;
5602 res.data = vec_abs(data);
5603 return res;
5604 }
5605
5612 get_max(const VectorizedArray &other) const
5613 {
5614 VectorizedArray res;
5615 res.data = vec_max(data, other.data);
5616 return res;
5617 }
5618
5625 get_min(const VectorizedArray &other) const
5626 {
5627 VectorizedArray res;
5628 res.data = vec_min(data, other.data);
5629 return res;
5630 }
5631
5632 // Make a few functions friends.
5633 template <typename Number2, std::size_t width2>
5636 template <typename Number2, std::size_t width2>
5639 template <typename Number2, std::size_t width2>
5643 template <typename Number2, std::size_t width2>
5647};
5648
5649# endif // if DEAL_II_VECTORIZATION_LEVEL >=1 && defined(__ALTIVEC__) &&
5650 // defined(__VSX__)
5651
5652
5653#endif // DOXYGEN
5654
5655
5656
5667template <typename Number, std::size_t width>
5668inline DEAL_II_ALWAYS_INLINE bool
5671{
5672 for (unsigned int i = 0; i < VectorizedArray<Number, width>::size(); ++i)
5673 if (lhs[i] != rhs[i])
5674 return false;
5675
5676 return true;
5677}
5678
5679
5685template <typename Number, std::size_t width>
5689{
5691 return tmp += v;
5692}
5693
5699template <typename Number, std::size_t width>
5703{
5705 return tmp -= v;
5706}
5707
5713template <typename Number, std::size_t width>
5717{
5719 return tmp *= v;
5720}
5721
5727template <typename Number, std::size_t width>
5731{
5733 return tmp /= v;
5734}
5735
5742template <typename Number, std::size_t width>
5745{
5747 return tmp += v;
5748}
5749
5758template <std::size_t width>
5761{
5763 return tmp += v;
5764}
5765
5772template <typename Number, std::size_t width>
5775{
5776 return u + v;
5777}
5778
5787template <std::size_t width>
5790{
5791 return u + v;
5792}
5793
5800template <typename Number, std::size_t width>
5803{
5805 return tmp -= v;
5806}
5807
5816template <std::size_t width>
5819{
5820 VectorizedArray<float, width> tmp = static_cast<float>(u);
5821 return tmp -= v;
5822}
5823
5830template <typename Number, std::size_t width>
5833{
5835 return v - tmp;
5836}
5837
5846template <std::size_t width>
5849{
5850 VectorizedArray<float, width> tmp = static_cast<float>(u);
5851 return v - tmp;
5852}
5853
5860template <typename Number, std::size_t width>
5863{
5865 return tmp *= v;
5866}
5867
5876template <std::size_t width>
5879{
5880 VectorizedArray<float, width> tmp = static_cast<float>(u);
5881 return tmp *= v;
5882}
5883
5890template <typename Number, std::size_t width>
5893{
5894 return u * v;
5895}
5896
5905template <std::size_t width>
5908{
5909 return u * v;
5910}
5911
5918template <typename Number, std::size_t width>
5921{
5923 return tmp /= v;
5924}
5925
5934template <std::size_t width>
5937{
5938 VectorizedArray<float, width> tmp = static_cast<float>(u);
5939 return tmp /= v;
5940}
5941
5948template <typename Number, std::size_t width>
5951{
5953 return v / tmp;
5954}
5955
5964template <std::size_t width>
5967{
5968 VectorizedArray<float, width> tmp = static_cast<float>(u);
5969 return v / tmp;
5970}
5971
5977template <typename Number, std::size_t width>
5980{
5981 return u;
5982}
5983
5989template <typename Number, std::size_t width>
5992{
5993 // to get a negative sign, subtract the input from zero (could also
5994 // multiply by -1, but this one is slightly simpler)
5995 return VectorizedArray<Number, width>() - u;
5996}
5997
6003template <typename Number, std::size_t width>
6004inline std::ostream &
6005operator<<(std::ostream &out, const VectorizedArray<Number, width> &p)
6006{
6007 constexpr unsigned int n = VectorizedArray<Number, width>::size();
6008 for (unsigned int i = 0; i < n - 1; ++i)
6009 out << p[i] << ' ';
6010 out << p[n - 1];
6011
6012 return out;
6013}
6014
6029enum class SIMDComparison : int
6030{
6031#if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
6032 equal = _CMP_EQ_OQ,
6033 not_equal = _CMP_NEQ_OQ,
6034 less_than = _CMP_LT_OQ,
6035 less_than_or_equal = _CMP_LE_OQ,
6036 greater_than = _CMP_GT_OQ,
6037 greater_than_or_equal = _CMP_GE_OQ
6038#else
6039 equal,
6040 not_equal,
6041 less_than,
6045#endif
6046};
6047
6048
6112template <SIMDComparison predicate, typename Number>
6113DEAL_II_ALWAYS_INLINE inline Number
6114compare_and_apply_mask(const Number &left,
6115 const Number &right,
6116 const Number &true_value,
6117 const Number &false_value)
6118{
6119 bool mask;
6120 switch (predicate)
6121 {
6123 mask = (left == right);
6124 break;
6126 mask = (left != right);
6127 break;
6129 mask = (left < right);
6130 break;
6132 mask = (left <= right);
6133 break;
6135 mask = (left > right);
6136 break;
6138 mask = (left >= right);
6139 break;
6140 }
6141
6142 return mask ? true_value : false_value;
6143}
6144
6145
6150template <SIMDComparison predicate, typename Number>
6153 const VectorizedArray<Number, 1> &right,
6154 const VectorizedArray<Number, 1> &true_value,
6155 const VectorizedArray<Number, 1> &false_value)
6156{
6159 right.data,
6160 true_value.data,
6161 false_value.data);
6162 return result;
6163}
6164
6167#ifndef DOXYGEN
6168# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && defined(__AVX512F__)
6169
6170template <SIMDComparison predicate>
6173 const VectorizedArray<float, 16> &right,
6174 const VectorizedArray<float, 16> &true_values,
6175 const VectorizedArray<float, 16> &false_values)
6176{
6177 const __mmask16 mask =
6178 _mm512_cmp_ps_mask(left.data, right.data, static_cast<int>(predicate));
6180 result.data = _mm512_mask_mov_ps(false_values.data, mask, true_values.data);
6181 return result;
6182}
6183
6184
6185
6186template <SIMDComparison predicate>
6189 const VectorizedArray<double, 8> &right,
6190 const VectorizedArray<double, 8> &true_values,
6191 const VectorizedArray<double, 8> &false_values)
6192{
6193 const __mmask16 mask =
6194 _mm512_cmp_pd_mask(left.data, right.data, static_cast<int>(predicate));
6196 result.data = _mm512_mask_mov_pd(false_values.data, mask, true_values.data);
6197 return result;
6198}
6199
6200# endif
6201
6202# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
6203
6204template <SIMDComparison predicate>
6207 const VectorizedArray<float, 8> &right,
6208 const VectorizedArray<float, 8> &true_values,
6209 const VectorizedArray<float, 8> &false_values)
6210{
6211 const auto mask =
6212 _mm256_cmp_ps(left.data, right.data, static_cast<int>(predicate));
6213
6215 result.data = _mm256_blendv_ps(false_values.data, true_values.data, mask);
6216 return result;
6217}
6218
6219
6220template <SIMDComparison predicate>
6223 const VectorizedArray<double, 4> &right,
6224 const VectorizedArray<double, 4> &true_values,
6225 const VectorizedArray<double, 4> &false_values)
6226{
6227 const auto mask =
6228 _mm256_cmp_pd(left.data, right.data, static_cast<int>(predicate));
6229
6231 result.data = _mm256_blendv_pd(false_values.data, true_values.data, mask);
6232 return result;
6233}
6234
6235# endif
6236
6237# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__SSE2__)
6238
6239template <SIMDComparison predicate>
6242 const VectorizedArray<float, 4> &right,
6243 const VectorizedArray<float, 4> &true_values,
6244 const VectorizedArray<float, 4> &false_values)
6245{
6246 __m128 mask;
6247 switch (predicate)
6248 {
6250 mask = _mm_cmpeq_ps(left.data, right.data);
6251 break;
6253 mask = _mm_cmpneq_ps(left.data, right.data);
6254 break;
6256 mask = _mm_cmplt_ps(left.data, right.data);
6257 break;
6259 mask = _mm_cmple_ps(left.data, right.data);
6260 break;
6262 mask = _mm_cmpgt_ps(left.data, right.data);
6263 break;
6265 mask = _mm_cmpge_ps(left.data, right.data);
6266 break;
6267 }
6268
6270 result.data = _mm_or_ps(_mm_and_ps(mask, true_values.data),
6271 _mm_andnot_ps(mask, false_values.data));
6272
6273 return result;
6274}
6275
6276
6277template <SIMDComparison predicate>
6280 const VectorizedArray<double, 2> &right,
6281 const VectorizedArray<double, 2> &true_values,
6282 const VectorizedArray<double, 2> &false_values)
6283{
6284 __m128d mask;
6285 switch (predicate)
6286 {
6288 mask = _mm_cmpeq_pd(left.data, right.data);
6289 break;
6291 mask = _mm_cmpneq_pd(left.data, right.data);
6292 break;
6294 mask = _mm_cmplt_pd(left.data, right.data);
6295 break;
6297 mask = _mm_cmple_pd(left.data, right.data);
6298 break;
6300 mask = _mm_cmpgt_pd(left.data, right.data);
6301 break;
6303 mask = _mm_cmpge_pd(left.data, right.data);
6304 break;
6305 }
6306
6308 result.data = _mm_or_pd(_mm_and_pd(mask, true_values.data),
6309 _mm_andnot_pd(mask, false_values.data));
6310
6311 return result;
6312}
6313
6314# endif
6315
6316# if defined(DEAL_II_HAVE_ARM_NEON) && defined(__ARM_NEON)
6317
6318template <SIMDComparison predicate>
6321 const VectorizedArray<float, 4> &right,
6322 const VectorizedArray<float, 4> &true_values,
6323 const VectorizedArray<float, 4> &false_values)
6324{
6325 uint32x4_t mask;
6326 switch (predicate)
6327 {
6329 mask = vceqq_f32(left.data, right.data);
6330 break;
6332 mask = vmvnq_u32(vceqq_f32(left.data, right.data));
6333 break;
6335 mask = vcltq_f32(left.data, right.data);
6336 break;
6338 mask = vcleq_f32(left.data, right.data);
6339 break;
6341 mask = vcgtq_f32(left.data, right.data);
6342 break;
6344 mask = vcgeq_f32(left.data, right.data);
6345 break;
6346 }
6347
6349 result.data = vreinterpretq_f32_u32(vorrq_u32(
6350 vandq_u32(mask, vreinterpretq_u32_f32(true_values.data)),
6351 vandq_u32(vmvnq_u32(mask), vreinterpretq_u32_f32(false_values.data))));
6352
6353 return result;
6354}
6355
6356
6357template <SIMDComparison predicate>
6360 const VectorizedArray<double, 2> &right,
6361 const VectorizedArray<double, 2> &true_values,
6362 const VectorizedArray<double, 2> &false_values)
6363{
6364 uint64x2_t mask;
6365 switch (predicate)
6366 {
6368 mask = vceqq_f64(left.data, right.data);
6369 break;
6371 mask = vreinterpretq_u64_u32(
6372 vmvnq_u32(vreinterpretq_u32_u64(vceqq_f64(left.data, right.data))));
6373 break;
6375 mask = vcltq_f64(left.data, right.data);
6376 break;
6378 mask = vcleq_f64(left.data, right.data);
6379 break;
6381 mask = vcgtq_f64(left.data, right.data);
6382 break;
6384 mask = vcgeq_f64(left.data, right.data);
6385 break;
6386 }
6387
6389 result.data = vreinterpretq_f64_u64(vorrq_u64(
6390 vandq_u64(mask, vreinterpretq_u64_f64(true_values.data)),
6391 vandq_u64(vreinterpretq_u64_u32(vmvnq_u32(vreinterpretq_u32_u64(mask))),
6392 vreinterpretq_u64_f64(false_values.data))));
6393
6394 return result;
6395}
6396
6397# endif
6398#endif // DOXYGEN
6399
6400
6401namespace internal
6402{
6403 template <typename T>
6405 {
6409 using value_type = T;
6410
6414 static constexpr std::size_t
6416 {
6417 return 1;
6418 }
6419
6424
6431 static constexpr std::size_t
6433 {
6435 }
6436
6440 static value_type &
6441 get(value_type &value, unsigned int c)
6442 {
6443 AssertIndexRange(c, 1);
6444 (void)c;
6445
6446 return value;
6447 }
6448
6452 static const value_type &
6453 get(const value_type &value, unsigned int c)
6454 {
6455 AssertIndexRange(c, 1);
6456 (void)c;
6457
6458 return value;
6459 }
6460
6464 static value_type &
6466 {
6468
6469 return values[c];
6470 }
6471
6476 static const value_type &
6477 get_from_vectorized(const vectorized_value_type &values, unsigned int c)
6478 {
6480
6481 return values[c];
6482 }
6483 };
6484
6485 template <typename T, std::size_t width_>
6487 {
6491 using value_type = T;
6492
6496 static constexpr std::size_t
6498 {
6499 return width_;
6500 }
6501
6506
6514 static constexpr std::size_t
6516 {
6517 return 1;
6518 }
6519
6523 static value_type &
6524 get(vectorized_value_type &values, unsigned int c)
6525 {
6526 AssertIndexRange(c, width_);
6527
6528 return values[c];
6529 }
6530
6534 static const value_type &
6535 get(const vectorized_value_type &values, unsigned int c)
6536 {
6537 AssertIndexRange(c, width_);
6538
6539 return values[c];
6540 }
6541
6545 static vectorized_value_type &
6547 {
6548 (void)c;
6550
6551 return values;
6552 }
6553
6558 static const vectorized_value_type &
6559 get_from_vectorized(const vectorized_value_type &values, unsigned int c)
6560 {
6561 (void)c;
6563
6564 return values;
6565 }
6566 };
6567} // namespace internal
6568
6569
6571
6578namespace std
6579{
6587 template <typename Number, std::size_t width>
6588 inline ::VectorizedArray<Number, width>
6589 sin(const ::VectorizedArray<Number, width> &x)
6590 {
6592 for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6593 ++i)
6594 out[i] = std::sin(x[i]);
6595 return out;
6596 }
6597
6598
6599
6607 template <typename Number, std::size_t width>
6608 inline ::VectorizedArray<Number, width>
6609 cos(const ::VectorizedArray<Number, width> &x)
6610 {
6612 for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6613 ++i)
6614 out[i] = std::cos(x[i]);
6615 return out;
6616 }
6617
6618
6619
6627 template <typename Number, std::size_t width>
6628 inline ::VectorizedArray<Number, width>
6629 tan(const ::VectorizedArray<Number, width> &x)
6630 {
6632 for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6633 ++i)
6634 out[i] = std::tan(x[i]);
6635 return out;
6636 }
6637
6638
6639
6647 template <typename Number, std::size_t width>
6648 inline ::VectorizedArray<Number, width>
6649 acos(const ::VectorizedArray<Number, width> &x)
6650 {
6652 for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6653 ++i)
6654 out[i] = std::acos(x[i]);
6655 return out;
6656 }
6657
6658
6659
6667 template <typename Number, std::size_t width>
6668 inline ::VectorizedArray<Number, width>
6669 asin(const ::VectorizedArray<Number, width> &x)
6670 {
6672 for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6673 ++i)
6674 out[i] = std::asin(x[i]);
6675 return out;
6676 }
6677
6678
6679
6687 template <typename Number, std::size_t width>
6688 inline ::VectorizedArray<Number, width>
6689 atan(const ::VectorizedArray<Number, width> &x)
6690 {
6692 for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6693 ++i)
6694 out[i] = std::atan(x[i]);
6695 return out;
6696 }
6697
6698
6699
6707 template <typename Number, std::size_t width>
6708 inline ::VectorizedArray<Number, width>
6709 cosh(const ::VectorizedArray<Number, width> &x)
6710 {
6712 for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6713 ++i)
6714 out[i] = std::cosh(x[i]);
6715 return out;
6716 }
6717
6718
6719
6727 template <typename Number, std::size_t width>
6728 inline ::VectorizedArray<Number, width>
6729 sinh(const ::VectorizedArray<Number, width> &x)
6730 {
6732 for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6733 ++i)
6734 out[i] = std::sinh(x[i]);
6735 return out;
6736 }
6737
6738
6739
6747 template <typename Number, std::size_t width>
6748 inline ::VectorizedArray<Number, width>
6749 tanh(const ::VectorizedArray<Number, width> &x)
6750 {
6752 for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6753 ++i)
6754 out[i] = std::tanh(x[i]);
6755 return out;
6756 }
6757
6758
6759
6767 template <typename Number, std::size_t width>
6768 inline ::VectorizedArray<Number, width>
6769 acosh(const ::VectorizedArray<Number, width> &x)
6770 {
6772 for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6773 ++i)
6774 out[i] = std::acosh(x[i]);
6775 return out;
6776 }
6777
6778
6779
6787 template <typename Number, std::size_t width>
6788 inline ::VectorizedArray<Number, width>
6789 asinh(const ::VectorizedArray<Number, width> &x)
6790 {
6792 for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6793 ++i)
6794 out[i] = std::asinh(x[i]);
6795 return out;
6796 }
6797
6798
6799
6807 template <typename Number, std::size_t width>
6808 inline ::VectorizedArray<Number, width>
6809 atanh(const ::VectorizedArray<Number, width> &x)
6810 {
6812 for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6813 ++i)
6814 out[i] = std::atanh(x[i]);
6815 return out;
6816 }
6817
6818
6819
6827 template <typename Number, std::size_t width>
6828 inline ::VectorizedArray<Number, width>
6829 exp(const ::VectorizedArray<Number, width> &x)
6830 {
6832 for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6833 ++i)
6834 out[i] = std::exp(x[i]);
6835 return out;
6836 }
6837
6838
6839
6847 template <typename Number, std::size_t width>
6848 inline ::VectorizedArray<Number, width>
6849 log(const ::VectorizedArray<Number, width> &x)
6850 {
6852 for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6853 ++i)
6854 out[i] = std::log(x[i]);
6855 return out;
6856 }
6857
6858
6859
6867 template <typename Number, std::size_t width>
6868 inline ::VectorizedArray<Number, width>
6869 sqrt(const ::VectorizedArray<Number, width> &x)
6870 {
6871 return x.get_sqrt();
6872 }
6873
6874
6875
6883 template <typename Number, std::size_t width>
6884 inline ::VectorizedArray<Number, width>
6885 pow(const ::VectorizedArray<Number, width> &x, const Number p)
6886 {
6888 for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6889 ++i)
6890 out[i] = std::pow(x[i], p);
6891 return out;
6892 }
6893
6894
6895
6904 template <typename Number, std::size_t width>
6905 inline ::VectorizedArray<Number, width>
6906 pow(const ::VectorizedArray<Number, width> &x,
6907 const ::VectorizedArray<Number, width> &p)
6908 {
6910 for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6911 ++i)
6912 out[i] = std::pow(x[i], p[i]);
6913 return out;
6914 }
6915
6916
6917
6925 template <typename Number, std::size_t width>
6926 inline ::VectorizedArray<Number, width>
6927 abs(const ::VectorizedArray<Number, width> &x)
6928 {
6929 return x.get_abs();
6930 }
6931
6932
6933
6941 template <typename Number, std::size_t width>
6942 inline ::VectorizedArray<Number, width>
6943 max(const ::VectorizedArray<Number, width> &x,
6944 const ::VectorizedArray<Number, width> &y)
6945 {
6946 return x.get_max(y);
6947 }
6948
6949
6950
6958 template <typename Number, std::size_t width>
6959 inline ::VectorizedArray<Number, width>
6960 min(const ::VectorizedArray<Number, width> &x,
6961 const ::VectorizedArray<Number, width> &y)
6962 {
6963 return x.get_min(y);
6964 }
6965
6966
6967
6971 template <class T>
6973 {
6974#ifdef DEAL_II_HAVE_CXX20
6975 using iterator_category = contiguous_iterator_tag;
6976#else
6977 using iterator_category = random_access_iterator_tag;
6978#endif
6979 using value_type = typename T::value_type;
6980 using difference_type = std::ptrdiff_t;
6981 };
6982
6983} // namespace std
6984
6985#endif
constexpr VectorizedArrayBase()=default
constexpr VectorizedArrayIterator< const VectorizedArrayType > begin() const
constexpr VectorizedArrayBase(const std::initializer_list< U > &list)
constexpr VectorizedArrayIterator< const VectorizedArrayType > end() const
constexpr VectorizedArrayIterator< VectorizedArrayType > begin()
constexpr VectorizedArrayIterator< VectorizedArrayType > end()
static constexpr std::size_t size()
auto dot_product(const VectorizedArrayType &v) const
constexpr VectorizedArrayIterator< T > & operator++()
constexpr VectorizedArrayIterator(T &data, const std::size_t lane)
constexpr VectorizedArrayIterator< T > operator+(const std::size_t &offset) const
constexpr VectorizedArrayIterator< T > & operator+=(const std::size_t offset)
constexpr bool operator==(const VectorizedArrayIterator< T > &other) const
constexpr std::enable_if_t<!std::is_same_v< U, const U >, typename T::value_type > & operator*()
constexpr const T::value_type & operator*() const
constexpr VectorizedArrayIterator< T > & operator--()
constexpr std::ptrdiff_t operator-(const VectorizedArrayIterator< T > &other) const
constexpr bool operator!=(const VectorizedArrayIterator< T > &other) const
VectorizedArray< Number, width > operator-(const VectorizedArray< Number, width > &u)
VectorizedArray & operator=(const Number scalar) &
VectorizedArray< float, width > operator+(const VectorizedArray< float, width > &v, const double u)
VectorizedArray & operator/=(const VectorizedArray &vec)
void gather(const Number *base_ptr, const unsigned int *offsets)
void vectorized_load_and_transpose(const unsigned int n_entries, const Number *in, const unsigned int *offsets, VectorizedArray< Number, width > *out)
VectorizedArray< Number, width > operator+(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArrayType make_vectorized_array(const typename VectorizedArrayType::value_type &u)
VectorizedArray< Number, width > sinh(const::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > operator/(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArray< Number, width > acos(const::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > cos(const::VectorizedArray< Number, width > &x)
VectorizedArray get_abs() const
VectorizedArray< float, width > operator/(const VectorizedArray< float, width > &v, const double u)
VectorizedArray< Number, width > exp(const::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > operator*(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArray< Number, width > operator-(const VectorizedArray< Number, width > &v, const Number &u)
Number & operator[](const unsigned int comp)
VectorizedArray< Number, width > asinh(const::VectorizedArray< Number, width > &x)
VectorizedArray< float, width > operator-(const double u, const VectorizedArray< float, width > &v)
VectorizedArray< Number, width > operator+(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > operator+(const VectorizedArray< Number, width > &u)
VectorizedArray< Number, width > atanh(const::VectorizedArray< Number, width > &x)
Number sum() const
VectorizedArray()=default
bool operator==(const VectorizedArray< Number, width > &lhs, const VectorizedArray< Number, width > &rhs)
VectorizedArray(const Number scalar)
VectorizedArray< Number, width > operator-(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray< float, width > operator*(const VectorizedArray< float, width > &v, const double u)
VectorizedArray & operator*=(const VectorizedArray &vec)
VectorizedArray get_max(const VectorizedArray &other) const
const Number & operator[](const unsigned int comp) const
VectorizedArray< Number, width > pow(const::VectorizedArray< Number, width > &x, const::VectorizedArray< Number, width > &p)
VectorizedArray< Number, width > log(const::VectorizedArray< Number, width > &x)
VectorizedArray get_min(const VectorizedArray &other) const
VectorizedArray< Number, width > cosh(const::VectorizedArray< Number, width > &x)
void store(OtherNumber *ptr) const
VectorizedArray< float, width > operator-(const VectorizedArray< float, width > &v, const double u)
VectorizedArray< Number, width > min(const::VectorizedArray< Number, width > &x, const::VectorizedArray< Number, width > &y)
void load(const OtherNumber *ptr)
void scatter(const unsigned int *offsets, Number *base_ptr) const
VectorizedArray< Number, width > operator-(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > sin(const::VectorizedArray< Number, width > &x)
VectorizedArray & operator=(const Number scalar) &&=delete
VectorizedArray< Number, width > pow(const::VectorizedArray< Number, width > &x, const Number p)
VectorizedArray< Number, width > atan(const::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > operator*(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray & operator-=(const VectorizedArray &vec)
VectorizedArray< float, width > operator+(const double u, const VectorizedArray< float, width > &v)
VectorizedArray< Number, width > operator*(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray get_sqrt() const
VectorizedArray< Number, width > operator/(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > abs(const::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > tan(const::VectorizedArray< Number, width > &x)
VectorizedArray & operator+=(const VectorizedArray &vec)
VectorizedArray< Number, width > acosh(const::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > sqrt(const::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > make_vectorized_array(const Number &u)
VectorizedArray< Number, width > operator/(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > operator+(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > max(const::VectorizedArray< Number, width > &x, const::VectorizedArray< Number, width > &y)
VectorizedArray< Number, width > asin(const::VectorizedArray< Number, width > &x)
void streaming_store(Number *ptr) const
VectorizedArray(const std::initializer_list< U > &list)
void vectorized_transpose_and_store(const bool add_into, const unsigned int n_entries, const VectorizedArray< Number, width > *in, const unsigned int *offsets, Number *out)
VectorizedArray< float, width > operator/(const double u, const VectorizedArray< float, width > &v)
VectorizedArray< Number, width > tanh(const::VectorizedArray< Number, width > &x)
VectorizedArray< float, width > operator*(const double u, const VectorizedArray< float, width > &v)
static constexpr bool is_implemented
#define DEAL_II_ALWAYS_INLINE
Definition config.h:109
#define DEAL_II_OPENMP_SIMD_PRAGMA
Definition config.h:143
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:503
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:504
const unsigned int v0
const unsigned int v1
__global__ void vec_add(Number *val, const Number a, const size_type N)
#define Assert(cond, exc)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcMessage(std::string arg1)
STL namespace.
inline ::VectorizedArray< Number, width > acosh(const ::VectorizedArray< Number, width > &x)
::VectorizedArray< Number, width > log(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > exp(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > tan(const ::VectorizedArray< Number, width > &)
inline ::VectorizedArray< Number, width > asinh(const ::VectorizedArray< Number, width > &x)
inline ::VectorizedArray< Number, width > tanh(const ::VectorizedArray< Number, width > &x)
inline ::VectorizedArray< Number, width > sinh(const ::VectorizedArray< Number, width > &x)
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > cos(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &)
inline ::VectorizedArray< Number, width > cosh(const ::VectorizedArray< Number, width > &x)
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &, const Number p)
inline ::VectorizedArray< Number, width > atanh(const ::VectorizedArray< Number, width > &x)
inline ::VectorizedArray< Number, width > atan(const ::VectorizedArray< Number, width > &x)
::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)
inline ::VectorizedArray< Number, width > acos(const ::VectorizedArray< Number, width > &x)
inline ::VectorizedArray< Number, width > asin(const ::VectorizedArray< Number, width > &x)
static value_type & get(vectorized_value_type &values, unsigned int c)
static vectorized_value_type & get_from_vectorized(vectorized_value_type &values, unsigned int c)
static const value_type & get(const vectorized_value_type &values, unsigned int c)
static const vectorized_value_type & get_from_vectorized(const vectorized_value_type &values, unsigned int c)
static constexpr std::size_t width()
static constexpr std::size_t stride()
static const value_type & get(const value_type &value, unsigned int c)
static const value_type & get_from_vectorized(const vectorized_value_type &values, unsigned int c)
static value_type & get_from_vectorized(vectorized_value_type &values, unsigned int c)
static value_type & get(value_type &value, unsigned int c)
void gather(VectorizedArray< Number, width > &out, const std::array< Number *, width > &ptrs, const unsigned int offset)
void vectorized_load_and_transpose(const unsigned int n_entries, const Number *in, const unsigned int *offsets, VectorizedArray< Number, width > *out)
SIMDComparison
std::ostream & operator<<(std::ostream &out, const VectorizedArray< Number, width > &p)
Number compare_and_apply_mask(const Number &left, const Number &right, const Number &true_value, const Number &false_value)
void vectorized_transpose_and_store(const bool add_into, const unsigned int n_entries, const VectorizedArray< Number, width > *in, const unsigned int *offsets, Number *out)