Loading [MathJax]/extensions/TeX/newcommand.js
 deal.II version GIT relicensing-3041-g9c4075ddf4 2025-04-07 16:30:00+00:00
\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}} \newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=} \newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]} \newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages Concepts
vectorization.h
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2012 - 2024 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
15
16#ifndef dealii_vectorization_h
17#define dealii_vectorization_h
18
19#include <deal.II/base/config.h>
20
24
25#include <algorithm>
26#include <array>
27#include <cmath>
28
29// Note:
30// The flag DEAL_II_VECTORIZATION_WIDTH_IN_BITS is essentially constructed
31// according to the following scheme (on x86-based architectures)
32// #ifdef __AVX512F__
33// #define DEAL_II_VECTORIZATION_WIDTH_IN_BITS 512
34// #elif defined (__AVX__)
35// #define DEAL_II_VECTORIZATION_WIDTH_IN_BITS 256
36// #elif defined (__SSE2__)
37// #define DEAL_II_VECTORIZATION_WIDTH_IN_BITS 128
38// #else
39// #define DEAL_II_VECTORIZATION_WIDTH_IN_BITS 0
40// #endif
41// In addition to checking the flags __AVX512F__, __AVX__ and __SSE2__, a CMake
42// test, 'check_01_cpu_features.cmake', ensures that these feature are not only
43// present in the compilation unit but also working properly.
44
45#if DEAL_II_VECTORIZATION_WIDTH_IN_BITS > 0
46
47// These error messages try to detect the case that deal.II was compiled with
48// a wider instruction set extension as the current compilation unit, for
49// example because deal.II was compiled with AVX, but a user project does not
50// add -march=native or similar flags, making it fall to SSE2. This leads to
51// very strange errors as the size of data structures differs between the
52// compiled deal.II code sitting in libdeal_II.so and the user code if not
53// detected.
54# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && !defined(__AVX__)
55# error \
56 "Mismatch in vectorization capabilities: AVX was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native."
57# endif
58# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && !defined(__AVX512F__)
59# error \
60 "Mismatch in vectorization capabilities: AVX-512F was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native."
61# endif
62
63# ifdef _MSC_VER
64# include <intrin.h>
65# elif defined(__ALTIVEC__)
66# include <altivec.h>
67
68// altivec.h defines vector, pixel, bool, but we do not use them, so undefine
69// them before they make trouble
70# undef vector
71# undef pixel
72# undef bool
73# elif defined(__ARM_NEON)
74# include <arm_neon.h>
75# elif defined(__x86_64__)
76# include <x86intrin.h>
77# endif
78
79#endif
80
81
83
84
85// Enable the EnableIfScalar type trait for VectorizedArray<Number> such
86// that it can be used as a Number type in Tensor<rank,dim,Number>, etc.
87
88template <typename Number, std::size_t width>
93
94
95
99template <typename T>
101{
102public:
109 constexpr VectorizedArrayIterator(T &data, const std::size_t lane)
110 : data(&data)
111 , lane(lane)
112 {}
113
117 constexpr bool
119 {
120 Assert(this->data == other.data,
122 "You are trying to compare iterators into different arrays."));
123 return this->lane == other.lane;
124 }
125
129 constexpr bool
131 {
132 Assert(this->data == other.data,
134 "You are trying to compare iterators into different arrays."));
135 return this->lane != other.lane;
136 }
137
142 constexpr const typename T::value_type &
143 operator*() const
144 {
145 AssertIndexRange(lane, T::size());
146 return (*data)[lane];
147 }
148
149
154 template <typename U = T>
155 constexpr std::enable_if_t<!std::is_same_v<U, const U>,
156 typename T::value_type> &
158 {
159 AssertIndexRange(lane, T::size());
160 return (*data)[lane];
161 }
162
170 {
171 AssertIndexRange(lane + 1, T::size() + 1);
172 ++lane;
173 return *this;
174 }
175
181 operator+=(const std::size_t offset)
182 {
183 AssertIndexRange(lane + offset, T::size() + 1);
184 lane += offset;
185 return *this;
186 }
187
195 {
196 Assert(
197 lane > 0,
199 "You can't decrement an iterator that is already at the beginning of the range."));
200 --lane;
201 return *this;
202 }
203
208 operator+(const std::size_t &offset) const
209 {
210 AssertIndexRange(lane + offset, T::size() + 1);
211 return VectorizedArrayIterator<T>(*data, lane + offset);
212 }
213
217 constexpr std::ptrdiff_t
219 {
220 return static_cast<std::ptrdiff_t>(lane) -
221 static_cast<std::ptrdiff_t>(other.lane);
222 }
223
224private:
229
233 std::size_t lane;
234};
235
236
237
250template <typename VectorizedArrayType, std::size_t width>
252{
253public:
257 constexpr VectorizedArrayBase() = default;
258
266 template <typename U>
267 constexpr VectorizedArrayBase(const std::initializer_list<U> &list)
268 {
269 const unsigned int n_initializers = list.size();
270 Assert(n_initializers <= size(),
271 ExcMessage("The initializer list must have at most "
272 "as many elements as the vector length."));
273
274 // Copy what's in the list.
275 std::copy_n(list.begin(), n_initializers, this->begin());
276
277 // Then add zero padding where necessary.
278 if (n_initializers < size())
279 std::fill(this->begin() + n_initializers, this->end(), 0.0);
280 }
281
285 static constexpr std::size_t
287 {
288 return width;
289 }
290
296 {
298 static_cast<VectorizedArrayType &>(*this), 0);
299 }
300
306 begin() const
307 {
309 static_cast<const VectorizedArrayType &>(*this), 0);
310 }
311
317 {
319 static_cast<VectorizedArrayType &>(*this), width);
320 }
321
327 end() const
328 {
330 static_cast<const VectorizedArrayType &>(*this), width);
331 }
332
344 auto
345 dot_product(const VectorizedArrayType &v) const
346 {
347 VectorizedArrayType p = static_cast<const VectorizedArrayType &>(*this);
348 p *= v;
349 return p.sum();
350 }
351};
352
353
354
443template <typename Number, std::size_t width>
445 : public VectorizedArrayBase<VectorizedArray<Number, width>, 1>
446{
447public:
451 using value_type = Number;
452
461 static constexpr bool is_implemented = (width == 1);
462
467 VectorizedArray() = default;
468
472 VectorizedArray(const Number scalar)
473 {
474 static_assert(width == 1,
475 "You specified an illegal width that is not supported.");
476
477 this->operator=(scalar);
478 }
479
483 template <typename U>
484 VectorizedArray(const std::initializer_list<U> &list)
485 : VectorizedArrayBase<VectorizedArray<Number, width>, 1>(list)
486 {
487 static_assert(width == 1,
488 "You specified an illegal width that is not supported.");
489 }
490
496 operator=(const Number scalar) &
497 {
498 data = scalar;
499 return *this;
500 }
501
508 operator=(const Number scalar) && = delete;
509
515 Number &
516 operator[](const unsigned int comp)
517 {
518 (void)comp;
519 AssertIndexRange(comp, 1);
520 return data;
521 }
522
528 const Number &
529 operator[](const unsigned int comp) const
530 {
531 (void)comp;
532 AssertIndexRange(comp, 1);
533 return data;
534 }
535
542 {
543 data += vec.data;
544 return *this;
545 }
546
553 {
554 data -= vec.data;
555 return *this;
556 }
557
564 {
565 data *= vec.data;
566 return *this;
567 }
568
575 {
576 data /= vec.data;
577 return *this;
578 }
579
586 template <typename OtherNumber>
588 load(const OtherNumber *ptr)
589 {
590 data = *ptr;
591 }
592
599 template <typename OtherNumber>
601 store(OtherNumber *ptr) const
602 {
603 *ptr = data;
604 }
605
653 void
654 streaming_store(Number *ptr) const
655 {
656 *ptr = data;
657 }
658
672 void
673 gather(const Number *base_ptr, const unsigned int *offsets)
674 {
675 data = base_ptr[offsets[0]];
676 }
677
691 void
692 scatter(const unsigned int *offsets, Number *base_ptr) const
693 {
694 base_ptr[offsets[0]] = data;
695 }
696
702 Number
703 sum() const
704 {
705 return data;
706 }
707
713 Number data;
714
715private:
722 get_sqrt() const
723 {
724 VectorizedArray res;
725 res.data = std::sqrt(data);
726 return res;
727 }
728
735 get_abs() const
736 {
737 VectorizedArray res;
738 res.data = std::fabs(data);
739 return res;
740 }
741
748 get_max(const VectorizedArray &other) const
749 {
750 VectorizedArray res;
751 res.data = std::max(data, other.data);
752 return res;
753 }
754
761 get_min(const VectorizedArray &other) const
762 {
763 VectorizedArray res;
764 res.data = std::min(data, other.data);
765 return res;
766 }
767
768 // Make a few functions friends.
769 template <typename Number2, std::size_t width2>
772 template <typename Number2, std::size_t width2>
775 template <typename Number2, std::size_t width2>
779 template <typename Number2, std::size_t width2>
783};
784
785
786
798template <typename Number,
799 std::size_t width =
802 make_vectorized_array(const Number &u)
803{
805 return result;
806}
807
808
809
816template <typename VectorizedArrayType>
817inline DEAL_II_ALWAYS_INLINE VectorizedArrayType
818make_vectorized_array(const typename VectorizedArrayType::value_type &u)
819{
820 static_assert(
821 std::is_same_v<VectorizedArrayType,
822 VectorizedArray<typename VectorizedArrayType::value_type,
823 VectorizedArrayType::size()>>,
824 "VectorizedArrayType is not a VectorizedArray.");
825
826 VectorizedArrayType result = u;
827 return result;
828}
829
830
831
843template <typename Number, std::size_t width>
844inline DEAL_II_ALWAYS_INLINE void
846 const std::array<Number *, width> &ptrs,
847 const unsigned int offset)
848{
849 for (unsigned int v = 0; v < width; ++v)
850 out.data[v] = ptrs[v][offset];
851}
852
853
854
880template <typename Number, std::size_t width>
881inline DEAL_II_ALWAYS_INLINE void
882vectorized_load_and_transpose(const unsigned int n_entries,
883 const Number *in,
884 const unsigned int *offsets,
886{
887 for (unsigned int i = 0; i < n_entries; ++i)
888 for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
889 out[i][v] = in[offsets[v] + i];
890}
891
892
904template <typename Number, std::size_t width>
905inline DEAL_II_ALWAYS_INLINE void
906vectorized_load_and_transpose(const unsigned int n_entries,
907 const std::array<Number *, width> &in,
909{
910 for (unsigned int i = 0; i < n_entries; ++i)
911 for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
912 out[i][v] = in[v][i];
913}
914
915
916
955template <typename Number, std::size_t width>
956inline DEAL_II_ALWAYS_INLINE void
958 const unsigned int n_entries,
960 const unsigned int *offsets,
961 Number *out)
962{
963 if (add_into)
964 for (unsigned int i = 0; i < n_entries; ++i)
965 for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
966 out[offsets[v] + i] += in[i][v];
967 else
968 for (unsigned int i = 0; i < n_entries; ++i)
969 for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
970 out[offsets[v] + i] = in[i][v];
971}
972
973
985template <typename Number, std::size_t width>
986inline DEAL_II_ALWAYS_INLINE void
988 const unsigned int n_entries,
990 std::array<Number *, width> &out)
991{
992 if (add_into)
993 for (unsigned int i = 0; i < n_entries; ++i)
994 for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
995 out[v][i] += in[i][v];
996 else
997 for (unsigned int i = 0; i < n_entries; ++i)
998 for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
999 out[v][i] = in[i][v];
1000}
1001
1002
1005#ifndef DOXYGEN
1006
1007# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__ARM_NEON)
1008
1012template <>
1013class VectorizedArray<double, 2>
1014 : public VectorizedArrayBase<VectorizedArray<double, 2>, 2>
1015{
1016public:
1020 using value_type = double;
1021
1026 static constexpr bool is_implemented = true;
1027
1032 VectorizedArray() = default;
1033
1037 VectorizedArray(const double scalar)
1038 {
1039 this->operator=(scalar);
1040 }
1041
1045 template <typename U>
1046 VectorizedArray(const std::initializer_list<U> &list)
1047 : VectorizedArrayBase<VectorizedArray<double, 2>, 2>(list)
1048 {}
1049
1054 operator=(const double x) &
1055 {
1056 data = vdupq_n_f64(x);
1057 return *this;
1058 }
1059
1066 operator=(const double scalar) && = delete;
1067
1071 double &
1072 operator[](const unsigned int comp)
1073 {
1074 return *(reinterpret_cast<double *>(&data) + comp);
1075 }
1076
1080 const double &
1081 operator[](const unsigned int comp) const
1082 {
1083 return *(reinterpret_cast<const double *>(&data) + comp);
1084 }
1085
1090 operator+=(const VectorizedArray &vec)
1091 {
1092 data = vaddq_f64(data, vec.data);
1093 return *this;
1094 }
1095
1100 operator-=(const VectorizedArray &vec)
1101 {
1102 data = vsubq_f64(data, vec.data);
1103 return *this;
1104 }
1105
1110 operator*=(const VectorizedArray &vec)
1111 {
1112 data = vmulq_f64(data, vec.data);
1113 return *this;
1114 }
1115
1120 operator/=(const VectorizedArray &vec)
1121 {
1122 data = vdivq_f64(data, vec.data);
1123 return *this;
1124 }
1125
1131 void
1132 load(const double *ptr)
1133 {
1134 data = vld1q_f64(ptr);
1135 }
1136
1138 void
1139 load(const float *ptr)
1140 {
1142 for (unsigned int i = 0; i < 2; ++i)
1143 data[i] = ptr[i];
1144 }
1145
1152 void
1153 store(double *ptr) const
1154 {
1155 vst1q_f64(ptr, data);
1156 }
1157
1159 void
1160 store(float *ptr) const
1161 {
1163 for (unsigned int i = 0; i < 2; ++i)
1164 ptr[i] = data[i];
1165 }
1166
1172 void
1173 streaming_store(double *ptr) const
1174 {
1175 Assert(reinterpret_cast<std::size_t>(ptr) % 16 == 0,
1176 ExcMessage("Memory not aligned"));
1177 vst1q_f64(ptr, data);
1178 }
1179
1192 void
1193 gather(const double *base_ptr, const unsigned int *offsets)
1194 {
1195 for (unsigned int i = 0; i < 2; ++i)
1196 *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
1197 }
1198
1211 void
1212 scatter(const unsigned int *offsets, double *base_ptr) const
1213 {
1214 for (unsigned int i = 0; i < 2; ++i)
1215 base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
1216 }
1217
1222 double
1223 sum() const
1224 {
1225 return vaddvq_f64(data);
1226 }
1227
1233 mutable float64x2_t data;
1234
1235private:
1241 get_sqrt() const
1242 {
1243 VectorizedArray res;
1244 res.data = vsqrtq_f64(data);
1245 return res;
1246 }
1247
1253 get_abs() const
1254 {
1255 VectorizedArray res;
1256 res.data = vabsq_f64(data);
1257 return res;
1258 }
1259
1265 get_max(const VectorizedArray &other) const
1266 {
1267 VectorizedArray res;
1268 res.data = vmaxq_f64(data, other.data);
1269 return res;
1270 }
1271
1277 get_min(const VectorizedArray &other) const
1278 {
1279 VectorizedArray res;
1280 res.data = vminq_f64(data, other.data);
1281 return res;
1282 }
1283
1284 // Make a few functions friends.
1285 template <typename Number2, std::size_t width2>
1288 template <typename Number2, std::size_t width2>
1291 template <typename Number2, std::size_t width2>
1295 template <typename Number2, std::size_t width2>
1299};
1300
1304template <>
1305class VectorizedArray<float, 4>
1306 : public VectorizedArrayBase<VectorizedArray<float, 4>, 4>
1307{
1308public:
1312 using value_type = float;
1313
1318 static constexpr bool is_implemented = true;
1319
1324 VectorizedArray() = default;
1325
1329 VectorizedArray(const float scalar)
1330 {
1331 this->operator=(scalar);
1332 }
1333
1337 template <typename U>
1338 VectorizedArray(const std::initializer_list<U> &list)
1339 : VectorizedArrayBase<VectorizedArray<float, 4>, 4>(list)
1340 {}
1341
1346 operator=(const float x) &
1347 {
1348 data = vdupq_n_f32(x);
1349 return *this;
1350 }
1351
1358 operator=(const float scalar) && = delete;
1359
1363 value_type &
1364 operator[](const unsigned int comp)
1365 {
1366 return *(reinterpret_cast<float *>(&data) + comp);
1367 }
1368
1372 const value_type &
1373 operator[](const unsigned int comp) const
1374 {
1375 return *(reinterpret_cast<const float *>(&data) + comp);
1376 }
1377
1382 operator+=(const VectorizedArray &vec)
1383 {
1384 data = vaddq_f32(data, vec.data);
1385 return *this;
1386 }
1387
1392 operator-=(const VectorizedArray &vec)
1393 {
1394 data = vsubq_f32(data, vec.data);
1395 return *this;
1396 }
1397
1402 operator*=(const VectorizedArray &vec)
1403 {
1404 data = vmulq_f32(data, vec.data);
1405 return *this;
1406 }
1407
1412 operator/=(const VectorizedArray &vec)
1413 {
1414 data = vdivq_f32(data, vec.data);
1415 return *this;
1416 }
1417
1423 void
1424 load(const float *ptr)
1425 {
1426 data = vld1q_f32(ptr);
1427 }
1428
1435 void
1436 store(float *ptr) const
1437 {
1438 vst1q_f32(ptr, data);
1439 }
1440
1446 void
1447 streaming_store(float *ptr) const
1448 {
1449 Assert(reinterpret_cast<std::size_t>(ptr) % 16 == 0,
1450 ExcMessage("Memory not aligned"));
1451 vst1q_f32(ptr, data);
1452 }
1453
1466 void
1467 gather(const float *base_ptr, const unsigned int *offsets)
1468 {
1469 for (unsigned int i = 0; i < 4; ++i)
1470 *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
1471 }
1472
1485 void
1486 scatter(const unsigned int *offsets, float *base_ptr) const
1487 {
1488 for (unsigned int i = 0; i < 4; ++i)
1489 base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
1490 }
1491
1496 float
1497 sum() const
1498 {
1499 return vaddvq_f32(data);
1500 }
1501
1507 mutable float32x4_t data;
1508
1509private:
1515 get_sqrt() const
1516 {
1517 VectorizedArray res;
1518 res.data = vsqrtq_f32(data);
1519 return res;
1520 }
1521
1527 get_abs() const
1528 {
1529 VectorizedArray res;
1530 res.data = vabsq_f32(data);
1531 return res;
1532 }
1533
1539 get_max(const VectorizedArray &other) const
1540 {
1541 VectorizedArray res;
1542 res.data = vmaxq_f32(data, other.data);
1543 return res;
1544 }
1545
1551 get_min(const VectorizedArray &other) const
1552 {
1553 VectorizedArray res;
1554 res.data = vminq_f32(data, other.data);
1555 return res;
1556 }
1557
1558 // Make a few functions friends.
1559 template <typename Number2, std::size_t width2>
1562 template <typename Number2, std::size_t width2>
1565 template <typename Number2, std::size_t width2>
1569 template <typename Number2, std::size_t width2>
1573};
1574
1575
1576# endif
1577
1578# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__SSE2__)
1579
1583template <>
1584class VectorizedArray<double, 2>
1585 : public VectorizedArrayBase<VectorizedArray<double, 2>, 2>
1586{
1587public:
1591 using value_type = double;
1592
1597 static constexpr bool is_implemented = true;
1598
1603 VectorizedArray() = default;
1604
1608 VectorizedArray(const double scalar)
1609 {
1610 this->operator=(scalar);
1611 }
1612
1616 template <typename U>
1617 VectorizedArray(const std::initializer_list<U> &list)
1618 : VectorizedArrayBase<VectorizedArray<double, 2>, 2>(list)
1619 {}
1620
1626 operator=(const double x) &
1627 {
1628 data = _mm_set1_pd(x);
1629 return *this;
1630 }
1631
1638 operator=(const double scalar) && = delete;
1639
1644 double &
1645 operator[](const unsigned int comp)
1646 {
1647 AssertIndexRange(comp, 2);
1648 return *(reinterpret_cast<double *>(&data) + comp);
1649 }
1650
1655 const double &
1656 operator[](const unsigned int comp) const
1657 {
1658 AssertIndexRange(comp, 2);
1659 return *(reinterpret_cast<const double *>(&data) + comp);
1660 }
1661
1667 operator+=(const VectorizedArray &vec)
1668 {
1669# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1670 data += vec.data;
1671# else
1672 data = _mm_add_pd(data, vec.data);
1673# endif
1674 return *this;
1675 }
1676
1682 operator-=(const VectorizedArray &vec)
1683 {
1684# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1685 data -= vec.data;
1686# else
1687 data = _mm_sub_pd(data, vec.data);
1688# endif
1689 return *this;
1690 }
1691
1697 operator*=(const VectorizedArray &vec)
1698 {
1699# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1700 data *= vec.data;
1701# else
1702 data = _mm_mul_pd(data, vec.data);
1703# endif
1704 return *this;
1705 }
1706
1712 operator/=(const VectorizedArray &vec)
1713 {
1714# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1715 data /= vec.data;
1716# else
1717 data = _mm_div_pd(data, vec.data);
1718# endif
1719 return *this;
1720 }
1721
1728 void
1729 load(const double *ptr)
1730 {
1731 data = _mm_loadu_pd(ptr);
1732 }
1733
1735 void
1736 load(const float *ptr)
1737 {
1739 for (unsigned int i = 0; i < 2; ++i)
1740 data[i] = ptr[i];
1741 }
1742
1750 void
1751 store(double *ptr) const
1752 {
1753 _mm_storeu_pd(ptr, data);
1754 }
1755
1757 void
1758 store(float *ptr) const
1759 {
1761 for (unsigned int i = 0; i < 2; ++i)
1762 ptr[i] = data[i];
1763 }
1764
1770 void
1771 streaming_store(double *ptr) const
1772 {
1773 Assert(reinterpret_cast<std::size_t>(ptr) % 16 == 0,
1774 ExcMessage("Memory not aligned"));
1775 _mm_stream_pd(ptr, data);
1776 }
1777
1791 void
1792 gather(const double *base_ptr, const unsigned int *offsets)
1793 {
1794 for (unsigned int i = 0; i < 2; ++i)
1795 *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
1796 }
1797
1811 void
1812 scatter(const unsigned int *offsets, double *base_ptr) const
1813 {
1814 for (unsigned int i = 0; i < 2; ++i)
1815 base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
1816 }
1817
1822 double
1823 sum() const
1824 {
1825 __m128d t1 = _mm_unpackhi_pd(data, data);
1826 __m128d t2 = _mm_add_pd(data, t1);
1827 return _mm_cvtsd_f64(t2);
1828 }
1829
1835 __m128d data;
1836
1837private:
1844 get_sqrt() const
1845 {
1846 VectorizedArray res;
1847 res.data = _mm_sqrt_pd(data);
1848 return res;
1849 }
1850
1857 get_abs() const
1858 {
1859 // to compute the absolute value, perform
1860 // bitwise andnot with -0. This will leave all
1861 // value and exponent bits unchanged but force
1862 // the sign value to +.
1863 __m128d mask = _mm_set1_pd(-0.);
1864 VectorizedArray res;
1865 res.data = _mm_andnot_pd(mask, data);
1866 return res;
1867 }
1868
1875 get_max(const VectorizedArray &other) const
1876 {
1877 VectorizedArray res;
1878 res.data = _mm_max_pd(data, other.data);
1879 return res;
1880 }
1881
1888 get_min(const VectorizedArray &other) const
1889 {
1890 VectorizedArray res;
1891 res.data = _mm_min_pd(data, other.data);
1892 return res;
1893 }
1894
1895 // Make a few functions friends.
1896 template <typename Number2, std::size_t width2>
1899 template <typename Number2, std::size_t width2>
1902 template <typename Number2, std::size_t width2>
1906 template <typename Number2, std::size_t width2>
1910};
1911
1912
1913
1917template <>
1918inline DEAL_II_ALWAYS_INLINE void
1919vectorized_load_and_transpose(const unsigned int n_entries,
1920 const double *in,
1921 const unsigned int *offsets,
1923{
1924 const unsigned int n_chunks = n_entries / 2;
1925 for (unsigned int i = 0; i < n_chunks; ++i)
1926 {
1927 __m128d u0 = _mm_loadu_pd(in + 2 * i + offsets[0]);
1928 __m128d u1 = _mm_loadu_pd(in + 2 * i + offsets[1]);
1929 out[2 * i + 0].data = _mm_unpacklo_pd(u0, u1);
1930 out[2 * i + 1].data = _mm_unpackhi_pd(u0, u1);
1931 }
1932
1933 // remainder loop of work that does not divide by 2
1934 for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
1935 for (unsigned int v = 0; v < 2; ++v)
1936 out[i][v] = in[offsets[v] + i];
1937}
1938
1939
1940
1944template <>
1945inline DEAL_II_ALWAYS_INLINE void
1946vectorized_load_and_transpose(const unsigned int n_entries,
1947 const std::array<double *, 2> &in,
1949{
1950 // see the comments in the vectorized_load_and_transpose above
1951
1952 const unsigned int n_chunks = n_entries / 2;
1953 for (unsigned int i = 0; i < n_chunks; ++i)
1954 {
1955 __m128d u0 = _mm_loadu_pd(in[0] + 2 * i);
1956 __m128d u1 = _mm_loadu_pd(in[1] + 2 * i);
1957 out[2 * i + 0].data = _mm_unpacklo_pd(u0, u1);
1958 out[2 * i + 1].data = _mm_unpackhi_pd(u0, u1);
1959 }
1960
1961 for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
1962 for (unsigned int v = 0; v < 2; ++v)
1963 out[i][v] = in[v][i];
1964}
1965
1966
1967
1971template <>
1972inline DEAL_II_ALWAYS_INLINE void
1973vectorized_transpose_and_store(const bool add_into,
1974 const unsigned int n_entries,
1976 const unsigned int *offsets,
1977 double *out)
1978{
1979 const unsigned int n_chunks = n_entries / 2;
1980 if (add_into)
1981 {
1982 for (unsigned int i = 0; i < n_chunks; ++i)
1983 {
1984 __m128d u0 = in[2 * i + 0].data;
1985 __m128d u1 = in[2 * i + 1].data;
1986 __m128d res0 = _mm_unpacklo_pd(u0, u1);
1987 __m128d res1 = _mm_unpackhi_pd(u0, u1);
1988 _mm_storeu_pd(out + 2 * i + offsets[0],
1989 _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[0]),
1990 res0));
1991 _mm_storeu_pd(out + 2 * i + offsets[1],
1992 _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[1]),
1993 res1));
1994 }
1995 // remainder loop of work that does not divide by 2
1996 for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
1997 for (unsigned int v = 0; v < 2; ++v)
1998 out[offsets[v] + i] += in[i][v];
1999 }
2000 else
2001 {
2002 for (unsigned int i = 0; i < n_chunks; ++i)
2003 {
2004 __m128d u0 = in[2 * i + 0].data;
2005 __m128d u1 = in[2 * i + 1].data;
2006 __m128d res0 = _mm_unpacklo_pd(u0, u1);
2007 __m128d res1 = _mm_unpackhi_pd(u0, u1);
2008 _mm_storeu_pd(out + 2 * i + offsets[0], res0);
2009 _mm_storeu_pd(out + 2 * i + offsets[1], res1);
2010 }
2011 // remainder loop of work that does not divide by 2
2012 for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
2013 for (unsigned int v = 0; v < 2; ++v)
2014 out[offsets[v] + i] = in[i][v];
2015 }
2016}
2017
2018
2019
2023template <>
2024inline DEAL_II_ALWAYS_INLINE void
2025vectorized_transpose_and_store(const bool add_into,
2026 const unsigned int n_entries,
2028 std::array<double *, 2> &out)
2029{
2030 // see the comments in the vectorized_transpose_and_store above
2031
2032 const unsigned int n_chunks = n_entries / 2;
2033 if (add_into)
2034 {
2035 for (unsigned int i = 0; i < n_chunks; ++i)
2036 {
2037 __m128d u0 = in[2 * i + 0].data;
2038 __m128d u1 = in[2 * i + 1].data;
2039 __m128d res0 = _mm_unpacklo_pd(u0, u1);
2040 __m128d res1 = _mm_unpackhi_pd(u0, u1);
2041 _mm_storeu_pd(out[0] + 2 * i,
2042 _mm_add_pd(_mm_loadu_pd(out[0] + 2 * i), res0));
2043 _mm_storeu_pd(out[1] + 2 * i,
2044 _mm_add_pd(_mm_loadu_pd(out[1] + 2 * i), res1));
2045 }
2046
2047 for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
2048 for (unsigned int v = 0; v < 2; ++v)
2049 out[v][i] += in[i][v];
2050 }
2051 else
2052 {
2053 for (unsigned int i = 0; i < n_chunks; ++i)
2054 {
2055 __m128d u0 = in[2 * i + 0].data;
2056 __m128d u1 = in[2 * i + 1].data;
2057 __m128d res0 = _mm_unpacklo_pd(u0, u1);
2058 __m128d res1 = _mm_unpackhi_pd(u0, u1);
2059 _mm_storeu_pd(out[0] + 2 * i, res0);
2060 _mm_storeu_pd(out[1] + 2 * i, res1);
2061 }
2062
2063 for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
2064 for (unsigned int v = 0; v < 2; ++v)
2065 out[v][i] = in[i][v];
2066 }
2067}
2068
2069
2070
2074template <>
2075class VectorizedArray<float, 4>
2076 : public VectorizedArrayBase<VectorizedArray<float, 4>, 4>
2077{
2078public:
2082 using value_type = float;
2083
2088 static constexpr bool is_implemented = true;
2089
2094 VectorizedArray() = default;
2095
2099 VectorizedArray(const float scalar)
2100 {
2101 this->operator=(scalar);
2102 }
2103
2107 template <typename U>
2108 VectorizedArray(const std::initializer_list<U> &list)
2109 : VectorizedArrayBase<VectorizedArray<float, 4>, 4>(list)
2110 {}
2111
2117 operator=(const float x) &
2118 {
2119 data = _mm_set1_ps(x);
2120 return *this;
2121 }
2122
2129 operator=(const float scalar) && = delete;
2130
2135 float &
2136 operator[](const unsigned int comp)
2137 {
2138 AssertIndexRange(comp, 4);
2139 return *(reinterpret_cast<float *>(&data) + comp);
2140 }
2141
2146 const float &
2147 operator[](const unsigned int comp) const
2148 {
2149 AssertIndexRange(comp, 4);
2150 return *(reinterpret_cast<const float *>(&data) + comp);
2151 }
2152
2158 operator+=(const VectorizedArray &vec)
2159 {
2160# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2161 data += vec.data;
2162# else
2163 data = _mm_add_ps(data, vec.data);
2164# endif
2165 return *this;
2166 }
2167
2173 operator-=(const VectorizedArray &vec)
2174 {
2175# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2176 data -= vec.data;
2177# else
2178 data = _mm_sub_ps(data, vec.data);
2179# endif
2180 return *this;
2181 }
2182
2188 operator*=(const VectorizedArray &vec)
2189 {
2190# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2191 data *= vec.data;
2192# else
2193 data = _mm_mul_ps(data, vec.data);
2194# endif
2195 return *this;
2196 }
2197
2203 operator/=(const VectorizedArray &vec)
2204 {
2205# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2206 data /= vec.data;
2207# else
2208 data = _mm_div_ps(data, vec.data);
2209# endif
2210 return *this;
2211 }
2212
2219 void
2220 load(const float *ptr)
2221 {
2222 data = _mm_loadu_ps(ptr);
2223 }
2224
2232 void
2233 store(float *ptr) const
2234 {
2235 _mm_storeu_ps(ptr, data);
2236 }
2237
2243 void
2244 streaming_store(float *ptr) const
2245 {
2246 Assert(reinterpret_cast<std::size_t>(ptr) % 16 == 0,
2247 ExcMessage("Memory not aligned"));
2248 _mm_stream_ps(ptr, data);
2249 }
2250
2264 void
2265 gather(const float *base_ptr, const unsigned int *offsets)
2266 {
2267 for (unsigned int i = 0; i < 4; ++i)
2268 *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
2269 }
2270
2284 void
2285 scatter(const unsigned int *offsets, float *base_ptr) const
2286 {
2287 for (unsigned int i = 0; i < 4; ++i)
2288 base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
2289 }
2290
2295 float
2296 sum() const
2297 {
2298 __m128 t1 = _mm_movehl_ps(data, data);
2299 __m128 t2 = _mm_add_ps(data, t1);
2300 __m128 t3 = _mm_shuffle_ps(t2, t2, 1);
2301 __m128 t4 = _mm_add_ss(t2, t3);
2302 return _mm_cvtss_f32(t4);
2303 }
2304
2310 __m128 data;
2311
2312private:
2319 get_sqrt() const
2320 {
2321 VectorizedArray res;
2322 res.data = _mm_sqrt_ps(data);
2323 return res;
2324 }
2325
2332 get_abs() const
2333 {
2334 // to compute the absolute value, perform bitwise andnot with -0. This
2335 // will leave all value and exponent bits unchanged but force the sign
2336 // value to +.
2337 __m128 mask = _mm_set1_ps(-0.f);
2338 VectorizedArray res;
2339 res.data = _mm_andnot_ps(mask, data);
2340 return res;
2341 }
2342
2349 get_max(const VectorizedArray &other) const
2350 {
2351 VectorizedArray res;
2352 res.data = _mm_max_ps(data, other.data);
2353 return res;
2354 }
2355
2362 get_min(const VectorizedArray &other) const
2363 {
2364 VectorizedArray res;
2365 res.data = _mm_min_ps(data, other.data);
2366 return res;
2367 }
2368
2369 // Make a few functions friends.
2370 template <typename Number2, std::size_t width2>
2373 template <typename Number2, std::size_t width2>
2376 template <typename Number2, std::size_t width2>
2380 template <typename Number2, std::size_t width2>
2384};
2385
2386
2387
2391template <>
2392inline DEAL_II_ALWAYS_INLINE void
2393vectorized_load_and_transpose(const unsigned int n_entries,
2394 const float *in,
2395 const unsigned int *offsets,
2397{
2398 const unsigned int n_chunks = n_entries / 4;
2399 for (unsigned int i = 0; i < n_chunks; ++i)
2400 {
2401 __m128 u0 = _mm_loadu_ps(in + 4 * i + offsets[0]);
2402 __m128 u1 = _mm_loadu_ps(in + 4 * i + offsets[1]);
2403 __m128 u2 = _mm_loadu_ps(in + 4 * i + offsets[2]);
2404 __m128 u3 = _mm_loadu_ps(in + 4 * i + offsets[3]);
2405 __m128 v0 = _mm_shuffle_ps(u0, u1, 0x44);
2406 __m128 v1 = _mm_shuffle_ps(u0, u1, 0xee);
2407 __m128 v2 = _mm_shuffle_ps(u2, u3, 0x44);
2408 __m128 v3 = _mm_shuffle_ps(u2, u3, 0xee);
2409 out[4 * i + 0].data = _mm_shuffle_ps(v0, v2, 0x88);
2410 out[4 * i + 1].data = _mm_shuffle_ps(v0, v2, 0xdd);
2411 out[4 * i + 2].data = _mm_shuffle_ps(v1, v3, 0x88);
2412 out[4 * i + 3].data = _mm_shuffle_ps(v1, v3, 0xdd);
2413 }
2414
2415 // remainder loop of work that does not divide by 4
2416 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2417 for (unsigned int v = 0; v < 4; ++v)
2418 out[i][v] = in[offsets[v] + i];
2419}
2420
2421
2422
2426template <>
2427inline DEAL_II_ALWAYS_INLINE void
2428vectorized_load_and_transpose(const unsigned int n_entries,
2429 const std::array<float *, 4> &in,
2431{
2432 // see the comments in the vectorized_load_and_transpose above
2433
2434 const unsigned int n_chunks = n_entries / 4;
2435 for (unsigned int i = 0; i < n_chunks; ++i)
2436 {
2437 __m128 u0 = _mm_loadu_ps(in[0] + 4 * i);
2438 __m128 u1 = _mm_loadu_ps(in[1] + 4 * i);
2439 __m128 u2 = _mm_loadu_ps(in[2] + 4 * i);
2440 __m128 u3 = _mm_loadu_ps(in[3] + 4 * i);
2441 __m128 v0 = _mm_shuffle_ps(u0, u1, 0x44);
2442 __m128 v1 = _mm_shuffle_ps(u0, u1, 0xee);
2443 __m128 v2 = _mm_shuffle_ps(u2, u3, 0x44);
2444 __m128 v3 = _mm_shuffle_ps(u2, u3, 0xee);
2445 out[4 * i + 0].data = _mm_shuffle_ps(v0, v2, 0x88);
2446 out[4 * i + 1].data = _mm_shuffle_ps(v0, v2, 0xdd);
2447 out[4 * i + 2].data = _mm_shuffle_ps(v1, v3, 0x88);
2448 out[4 * i + 3].data = _mm_shuffle_ps(v1, v3, 0xdd);
2449 }
2450
2451 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2452 for (unsigned int v = 0; v < 4; ++v)
2453 out[i][v] = in[v][i];
2454}
2455
2456
2457
2461template <>
2462inline DEAL_II_ALWAYS_INLINE void
2463vectorized_transpose_and_store(const bool add_into,
2464 const unsigned int n_entries,
2465 const VectorizedArray<float, 4> *in,
2466 const unsigned int *offsets,
2467 float *out)
2468{
2469 const unsigned int n_chunks = n_entries / 4;
2470 for (unsigned int i = 0; i < n_chunks; ++i)
2471 {
2472 __m128 u0 = in[4 * i + 0].data;
2473 __m128 u1 = in[4 * i + 1].data;
2474 __m128 u2 = in[4 * i + 2].data;
2475 __m128 u3 = in[4 * i + 3].data;
2476 __m128 t0 = _mm_shuffle_ps(u0, u1, 0x44);
2477 __m128 t1 = _mm_shuffle_ps(u0, u1, 0xee);
2478 __m128 t2 = _mm_shuffle_ps(u2, u3, 0x44);
2479 __m128 t3 = _mm_shuffle_ps(u2, u3, 0xee);
2480 u0 = _mm_shuffle_ps(t0, t2, 0x88);
2481 u1 = _mm_shuffle_ps(t0, t2, 0xdd);
2482 u2 = _mm_shuffle_ps(t1, t3, 0x88);
2483 u3 = _mm_shuffle_ps(t1, t3, 0xdd);
2484
2485 // Cannot use the same store instructions in both paths of the 'if'
2486 // because the compiler cannot know that there is no aliasing between
2487 // pointers
2488 if (add_into)
2489 {
2490 u0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), u0);
2491 _mm_storeu_ps(out + 4 * i + offsets[0], u0);
2492 u1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), u1);
2493 _mm_storeu_ps(out + 4 * i + offsets[1], u1);
2494 u2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), u2);
2495 _mm_storeu_ps(out + 4 * i + offsets[2], u2);
2496 u3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), u3);
2497 _mm_storeu_ps(out + 4 * i + offsets[3], u3);
2498 }
2499 else
2500 {
2501 _mm_storeu_ps(out + 4 * i + offsets[0], u0);
2502 _mm_storeu_ps(out + 4 * i + offsets[1], u1);
2503 _mm_storeu_ps(out + 4 * i + offsets[2], u2);
2504 _mm_storeu_ps(out + 4 * i + offsets[3], u3);
2505 }
2506 }
2507
2508 // remainder loop of work that does not divide by 4
2509 if (add_into)
2510 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2511 for (unsigned int v = 0; v < 4; ++v)
2512 out[offsets[v] + i] += in[i][v];
2513 else
2514 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2515 for (unsigned int v = 0; v < 4; ++v)
2516 out[offsets[v] + i] = in[i][v];
2517}
2518
2519
2520
2524template <>
2525inline DEAL_II_ALWAYS_INLINE void
2526vectorized_transpose_and_store(const bool add_into,
2527 const unsigned int n_entries,
2528 const VectorizedArray<float, 4> *in,
2529 std::array<float *, 4> &out)
2530{
2531 // see the comments in the vectorized_transpose_and_store above
2532
2533 const unsigned int n_chunks = n_entries / 4;
2534 for (unsigned int i = 0; i < n_chunks; ++i)
2535 {
2536 __m128 u0 = in[4 * i + 0].data;
2537 __m128 u1 = in[4 * i + 1].data;
2538 __m128 u2 = in[4 * i + 2].data;
2539 __m128 u3 = in[4 * i + 3].data;
2540 __m128 t0 = _mm_shuffle_ps(u0, u1, 0x44);
2541 __m128 t1 = _mm_shuffle_ps(u0, u1, 0xee);
2542 __m128 t2 = _mm_shuffle_ps(u2, u3, 0x44);
2543 __m128 t3 = _mm_shuffle_ps(u2, u3, 0xee);
2544 u0 = _mm_shuffle_ps(t0, t2, 0x88);
2545 u1 = _mm_shuffle_ps(t0, t2, 0xdd);
2546 u2 = _mm_shuffle_ps(t1, t3, 0x88);
2547 u3 = _mm_shuffle_ps(t1, t3, 0xdd);
2548
2549 if (add_into)
2550 {
2551 u0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), u0);
2552 _mm_storeu_ps(out[0] + 4 * i, u0);
2553 u1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), u1);
2554 _mm_storeu_ps(out[1] + 4 * i, u1);
2555 u2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), u2);
2556 _mm_storeu_ps(out[2] + 4 * i, u2);
2557 u3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), u3);
2558 _mm_storeu_ps(out[3] + 4 * i, u3);
2559 }
2560 else
2561 {
2562 _mm_storeu_ps(out[0] + 4 * i, u0);
2563 _mm_storeu_ps(out[1] + 4 * i, u1);
2564 _mm_storeu_ps(out[2] + 4 * i, u2);
2565 _mm_storeu_ps(out[3] + 4 * i, u3);
2566 }
2567 }
2568
2569 if (add_into)
2570 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2571 for (unsigned int v = 0; v < 4; ++v)
2572 out[v][i] += in[i][v];
2573 else
2574 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2575 for (unsigned int v = 0; v < 4; ++v)
2576 out[v][i] = in[i][v];
2577}
2578
2579
2580
2581# endif // if DEAL_II_VECTORIZATION_WIDTH_IN_BITS > 0 && defined(__SSE2__)
2582
2583# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
2584
2588template <>
2589class VectorizedArray<double, 4>
2590 : public VectorizedArrayBase<VectorizedArray<double, 4>, 4>
2591{
2592public:
2596 using value_type = double;
2597
2602 static constexpr bool is_implemented = true;
2603
2608 VectorizedArray() = default;
2609
2613 VectorizedArray(const double scalar)
2614 {
2615 this->operator=(scalar);
2616 }
2617
2621 template <typename U>
2622 VectorizedArray(const std::initializer_list<U> &list)
2623 : VectorizedArrayBase<VectorizedArray<double, 4>, 4>(list)
2624 {}
2625
2631 operator=(const double x) &
2632 {
2633 data = _mm256_set1_pd(x);
2634 return *this;
2635 }
2636
2643 operator=(const double scalar) && = delete;
2644
2649 double &
2650 operator[](const unsigned int comp)
2651 {
2652 AssertIndexRange(comp, 4);
2653 return *(reinterpret_cast<double *>(&data) + comp);
2654 }
2655
2660 const double &
2661 operator[](const unsigned int comp) const
2662 {
2663 AssertIndexRange(comp, 4);
2664 return *(reinterpret_cast<const double *>(&data) + comp);
2665 }
2666
2672 operator+=(const VectorizedArray &vec)
2673 {
2674 // if the compiler supports vector arithmetic, we can simply use +=
2675 // operator on the given data type. this allows the compiler to combine
2676 // additions with multiplication (fused multiply-add) if those
2677 // instructions are available. Otherwise, we need to use the built-in
2678 // intrinsic command for __m256d
2679# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2680 data += vec.data;
2681# else
2682 data = _mm256_add_pd(data, vec.data);
2683# endif
2684 return *this;
2685 }
2686
2692 operator-=(const VectorizedArray &vec)
2693 {
2694# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2695 data -= vec.data;
2696# else
2697 data = _mm256_sub_pd(data, vec.data);
2698# endif
2699 return *this;
2700 }
2706 operator*=(const VectorizedArray &vec)
2707 {
2708# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2709 data *= vec.data;
2710# else
2711 data = _mm256_mul_pd(data, vec.data);
2712# endif
2713 return *this;
2714 }
2715
2721 operator/=(const VectorizedArray &vec)
2722 {
2723# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2724 data /= vec.data;
2725# else
2726 data = _mm256_div_pd(data, vec.data);
2727# endif
2728 return *this;
2729 }
2730
2737 void
2738 load(const double *ptr)
2739 {
2740 data = _mm256_loadu_pd(ptr);
2741 }
2742
2744 void
2745 load(const float *ptr)
2746 {
2747 data = _mm256_cvtps_pd(_mm_loadu_ps(ptr));
2748 }
2749
2757 void
2758 store(double *ptr) const
2759 {
2760 _mm256_storeu_pd(ptr, data);
2761 }
2762
2764 void
2765 store(float *ptr) const
2766 {
2767 _mm_storeu_ps(ptr, _mm256_cvtpd_ps(data));
2768 }
2769
2775 void
2776 streaming_store(double *ptr) const
2777 {
2778 Assert(reinterpret_cast<std::size_t>(ptr) % 32 == 0,
2779 ExcMessage("Memory not aligned"));
2780 _mm256_stream_pd(ptr, data);
2781 }
2782
2796 void
2797 gather(const double *base_ptr, const unsigned int *offsets)
2798 {
2799# if defined(__AVX2__) && defined(DEAL_II_USE_VECTORIZATION_GATHER)
2800 // unfortunately, there does not appear to be a 128 bit integer load, so
2801 // do it by some reinterpret casts here. this is allowed because the Intel
2802 // API allows aliasing between different vector types.
2803 const __m128 index_val =
2804 _mm_loadu_ps(reinterpret_cast<const float *>(offsets));
2805 const __m128i index = *reinterpret_cast<const __m128i *>(&index_val);
2806
2807 // work around a warning with gcc-12 about an uninitialized initial state
2808 // for gather by starting with a zero guess, even though all lanes will be
2809 // overwritten
2810 __m256d zero = _mm256_setzero_pd();
2811 __m256d mask = _mm256_cmp_pd(zero, zero, _CMP_EQ_OQ);
2812
2813 data = _mm256_mask_i32gather_pd(zero, base_ptr, index, mask, 8);
2814# else
2815 for (unsigned int i = 0; i < 4; ++i)
2816 *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
2817# endif
2818 }
2819
2833 void
2834 scatter(const unsigned int *offsets, double *base_ptr) const
2835 {
2836 // no scatter operation in AVX/AVX2
2837 for (unsigned int i = 0; i < 4; ++i)
2838 base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
2839 }
2840
2845 double
2846 sum() const
2847 {
2849 t1.data = _mm_add_pd(this->get_lower(), this->get_upper());
2850 return t1.sum();
2851 }
2852
2858 __m256d data;
2859
2860private:
2865 __m128d
2866 get_lower() const
2867 {
2868 return _mm256_castpd256_pd128(data);
2869 }
2870
2875 __m128d
2876 get_upper() const
2877 {
2878 return _mm256_extractf128_pd(data, 1);
2879 }
2880
2887 get_sqrt() const
2888 {
2889 VectorizedArray res;
2890 res.data = _mm256_sqrt_pd(data);
2891 return res;
2892 }
2893
2900 get_abs() const
2901 {
2902 // to compute the absolute value, perform bitwise andnot with -0. This
2903 // will leave all value and exponent bits unchanged but force the sign
2904 // value to +.
2905 __m256d mask = _mm256_set1_pd(-0.);
2906 VectorizedArray res;
2907 res.data = _mm256_andnot_pd(mask, data);
2908 return res;
2909 }
2910
2917 get_max(const VectorizedArray &other) const
2918 {
2919 VectorizedArray res;
2920 res.data = _mm256_max_pd(data, other.data);
2921 return res;
2922 }
2923
2930 get_min(const VectorizedArray &other) const
2931 {
2932 VectorizedArray res;
2933 res.data = _mm256_min_pd(data, other.data);
2934 return res;
2935 }
2936
2937 // Make a few functions friends.
2938 template <typename Number2, std::size_t width2>
2941 template <typename Number2, std::size_t width2>
2944 template <typename Number2, std::size_t width2>
2948 template <typename Number2, std::size_t width2>
2952};
2953
2954
2955
2959template <>
2960inline DEAL_II_ALWAYS_INLINE void
2961vectorized_load_and_transpose(const unsigned int n_entries,
2962 const double *in,
2963 const unsigned int *offsets,
2965{
2966 const unsigned int n_chunks = n_entries / 4;
2967 const double *in0 = in + offsets[0];
2968 const double *in1 = in + offsets[1];
2969 const double *in2 = in + offsets[2];
2970 const double *in3 = in + offsets[3];
2971
2972 for (unsigned int i = 0; i < n_chunks; ++i)
2973 {
2974 __m256d u0 = _mm256_loadu_pd(in0 + 4 * i);
2975 __m256d u1 = _mm256_loadu_pd(in1 + 4 * i);
2976 __m256d u2 = _mm256_loadu_pd(in2 + 4 * i);
2977 __m256d u3 = _mm256_loadu_pd(in3 + 4 * i);
2978 __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2979 __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2980 __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2981 __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2982 out[4 * i + 0].data = _mm256_unpacklo_pd(t0, t1);
2983 out[4 * i + 1].data = _mm256_unpackhi_pd(t0, t1);
2984 out[4 * i + 2].data = _mm256_unpacklo_pd(t2, t3);
2985 out[4 * i + 3].data = _mm256_unpackhi_pd(t2, t3);
2986 }
2987
2988 // remainder loop of work that does not divide by 4
2989 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2990 out[i].gather(in + i, offsets);
2991}
2992
2993
2994
2998template <>
2999inline DEAL_II_ALWAYS_INLINE void
3000vectorized_load_and_transpose(const unsigned int n_entries,
3001 const std::array<double *, 4> &in,
3003{
3004 // see the comments in the vectorized_load_and_transpose above
3005
3006 const unsigned int n_chunks = n_entries / 4;
3007 const double *in0 = in[0];
3008 const double *in1 = in[1];
3009 const double *in2 = in[2];
3010 const double *in3 = in[3];
3011
3012 for (unsigned int i = 0; i < n_chunks; ++i)
3013 {
3014 __m256d u0 = _mm256_loadu_pd(in0 + 4 * i);
3015 __m256d u1 = _mm256_loadu_pd(in1 + 4 * i);
3016 __m256d u2 = _mm256_loadu_pd(in2 + 4 * i);
3017 __m256d u3 = _mm256_loadu_pd(in3 + 4 * i);
3018 __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
3019 __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
3020 __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
3021 __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
3022 out[4 * i + 0].data = _mm256_unpacklo_pd(t0, t1);
3023 out[4 * i + 1].data = _mm256_unpackhi_pd(t0, t1);
3024 out[4 * i + 2].data = _mm256_unpacklo_pd(t2, t3);
3025 out[4 * i + 3].data = _mm256_unpackhi_pd(t2, t3);
3026 }
3027
3028 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3029 gather(out[i], in, i);
3030}
3031
3032
3033
3037template <>
3038inline DEAL_II_ALWAYS_INLINE void
3039vectorized_transpose_and_store(const bool add_into,
3040 const unsigned int n_entries,
3042 const unsigned int *offsets,
3043 double *out)
3044{
3045 const unsigned int n_chunks = n_entries / 4;
3046 double *out0 = out + offsets[0];
3047 double *out1 = out + offsets[1];
3048 double *out2 = out + offsets[2];
3049 double *out3 = out + offsets[3];
3050 for (unsigned int i = 0; i < n_chunks; ++i)
3051 {
3052 __m256d u0 = in[4 * i + 0].data;
3053 __m256d u1 = in[4 * i + 1].data;
3054 __m256d u2 = in[4 * i + 2].data;
3055 __m256d u3 = in[4 * i + 3].data;
3056 __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
3057 __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
3058 __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
3059 __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
3060 __m256d res0 = _mm256_unpacklo_pd(t0, t1);
3061 __m256d res1 = _mm256_unpackhi_pd(t0, t1);
3062 __m256d res2 = _mm256_unpacklo_pd(t2, t3);
3063 __m256d res3 = _mm256_unpackhi_pd(t2, t3);
3064
3065 // Cannot use the same store instructions in both paths of the 'if'
3066 // because the compiler cannot know that there is no aliasing between
3067 // pointers
3068 if (add_into)
3069 {
3070 res0 = _mm256_add_pd(_mm256_loadu_pd(out0 + 4 * i), res0);
3071 _mm256_storeu_pd(out0 + 4 * i, res0);
3072 res1 = _mm256_add_pd(_mm256_loadu_pd(out1 + 4 * i), res1);
3073 _mm256_storeu_pd(out1 + 4 * i, res1);
3074 res2 = _mm256_add_pd(_mm256_loadu_pd(out2 + 4 * i), res2);
3075 _mm256_storeu_pd(out2 + 4 * i, res2);
3076 res3 = _mm256_add_pd(_mm256_loadu_pd(out3 + 4 * i), res3);
3077 _mm256_storeu_pd(out3 + 4 * i, res3);
3078 }
3079 else
3080 {
3081 _mm256_storeu_pd(out0 + 4 * i, res0);
3082 _mm256_storeu_pd(out1 + 4 * i, res1);
3083 _mm256_storeu_pd(out2 + 4 * i, res2);
3084 _mm256_storeu_pd(out3 + 4 * i, res3);
3085 }
3086 }
3087
3088 // remainder loop of work that does not divide by 4
3089 if (add_into)
3090 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3091 for (unsigned int v = 0; v < 4; ++v)
3092 out[offsets[v] + i] += in[i][v];
3093 else
3094 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3095 for (unsigned int v = 0; v < 4; ++v)
3096 out[offsets[v] + i] = in[i][v];
3097}
3098
3099
3100
3104template <>
3105inline DEAL_II_ALWAYS_INLINE void
3106vectorized_transpose_and_store(const bool add_into,
3107 const unsigned int n_entries,
3109 std::array<double *, 4> &out)
3110{
3111 // see the comments in the vectorized_transpose_and_store above
3112
3113 const unsigned int n_chunks = n_entries / 4;
3114 double *out0 = out[0];
3115 double *out1 = out[1];
3116 double *out2 = out[2];
3117 double *out3 = out[3];
3118 for (unsigned int i = 0; i < n_chunks; ++i)
3119 {
3120 __m256d u0 = in[4 * i + 0].data;
3121 __m256d u1 = in[4 * i + 1].data;
3122 __m256d u2 = in[4 * i + 2].data;
3123 __m256d u3 = in[4 * i + 3].data;
3124 __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
3125 __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
3126 __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
3127 __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
3128 __m256d res0 = _mm256_unpacklo_pd(t0, t1);
3129 __m256d res1 = _mm256_unpackhi_pd(t0, t1);
3130 __m256d res2 = _mm256_unpacklo_pd(t2, t3);
3131 __m256d res3 = _mm256_unpackhi_pd(t2, t3);
3132
3133 // Cannot use the same store instructions in both paths of the 'if'
3134 // because the compiler cannot know that there is no aliasing between
3135 // pointers
3136 if (add_into)
3137 {
3138 res0 = _mm256_add_pd(_mm256_loadu_pd(out0 + 4 * i), res0);
3139 _mm256_storeu_pd(out0 + 4 * i, res0);
3140 res1 = _mm256_add_pd(_mm256_loadu_pd(out1 + 4 * i), res1);
3141 _mm256_storeu_pd(out1 + 4 * i, res1);
3142 res2 = _mm256_add_pd(_mm256_loadu_pd(out2 + 4 * i), res2);
3143 _mm256_storeu_pd(out2 + 4 * i, res2);
3144 res3 = _mm256_add_pd(_mm256_loadu_pd(out3 + 4 * i), res3);
3145 _mm256_storeu_pd(out3 + 4 * i, res3);
3146 }
3147 else
3148 {
3149 _mm256_storeu_pd(out0 + 4 * i, res0);
3150 _mm256_storeu_pd(out1 + 4 * i, res1);
3151 _mm256_storeu_pd(out2 + 4 * i, res2);
3152 _mm256_storeu_pd(out3 + 4 * i, res3);
3153 }
3154 }
3155
3156 // remainder loop of work that does not divide by 4
3157 if (add_into)
3158 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3159 for (unsigned int v = 0; v < 4; ++v)
3160 out[v][i] += in[i][v];
3161 else
3162 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3163 for (unsigned int v = 0; v < 4; ++v)
3164 out[v][i] = in[i][v];
3165}
3166
3167
3168
3172template <>
3173class VectorizedArray<float, 8>
3174 : public VectorizedArrayBase<VectorizedArray<float, 8>, 8>
3175{
3176public:
3180 using value_type = float;
3181
3186 static constexpr bool is_implemented = true;
3187
3192 VectorizedArray() = default;
3193
3197 VectorizedArray(const float scalar)
3198 {
3199 this->operator=(scalar);
3200 }
3201
3205 template <typename U>
3206 VectorizedArray(const std::initializer_list<U> &list)
3207 : VectorizedArrayBase<VectorizedArray<float, 8>, 8>(list)
3208 {}
3209
3215 operator=(const float x) &
3216 {
3217 data = _mm256_set1_ps(x);
3218 return *this;
3219 }
3220
3227 operator=(const float scalar) && = delete;
3228
3233 float &
3234 operator[](const unsigned int comp)
3235 {
3236 AssertIndexRange(comp, 8);
3237 return *(reinterpret_cast<float *>(&data) + comp);
3238 }
3239
3244 const float &
3245 operator[](const unsigned int comp) const
3246 {
3247 AssertIndexRange(comp, 8);
3248 return *(reinterpret_cast<const float *>(&data) + comp);
3249 }
3250
3256 operator+=(const VectorizedArray &vec)
3257 {
3258 // if the compiler supports vector arithmetic, we can simply use +=
3259 // operator on the given data type. this allows the compiler to combine
3260 // additions with multiplication (fused multiply-add) if those
3261 // instructions are available. Otherwise, we need to use the built-in
3262 // intrinsic command for __m256d
3263# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3264 data += vec.data;
3265# else
3266 data = _mm256_add_ps(data, vec.data);
3267# endif
3268 return *this;
3269 }
3270
3276 operator-=(const VectorizedArray &vec)
3277 {
3278# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3279 data -= vec.data;
3280# else
3281 data = _mm256_sub_ps(data, vec.data);
3282# endif
3283 return *this;
3284 }
3290 operator*=(const VectorizedArray &vec)
3291 {
3292# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3293 data *= vec.data;
3294# else
3295 data = _mm256_mul_ps(data, vec.data);
3296# endif
3297 return *this;
3298 }
3299
3305 operator/=(const VectorizedArray &vec)
3306 {
3307# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3308 data /= vec.data;
3309# else
3310 data = _mm256_div_ps(data, vec.data);
3311# endif
3312 return *this;
3313 }
3314
3321 void
3322 load(const float *ptr)
3323 {
3324 data = _mm256_loadu_ps(ptr);
3325 }
3326
3334 void
3335 store(float *ptr) const
3336 {
3337 _mm256_storeu_ps(ptr, data);
3338 }
3339
3345 void
3346 streaming_store(float *ptr) const
3347 {
3348 Assert(reinterpret_cast<std::size_t>(ptr) % 32 == 0,
3349 ExcMessage("Memory not aligned"));
3350 _mm256_stream_ps(ptr, data);
3351 }
3352
3366 void
3367 gather(const float *base_ptr, const unsigned int *offsets)
3368 {
3369# if defined(__AVX2__) && defined(DEAL_II_USE_VECTORIZATION_GATHER)
3370 // unfortunately, there does not appear to be a 256 bit integer load, so
3371 // do it by some reinterpret casts here. this is allowed because the Intel
3372 // API allows aliasing between different vector types.
3373 const __m256 index_val =
3374 _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
3375 const __m256i index = *reinterpret_cast<const __m256i *>(&index_val);
3376
3377 // work around a warning with gcc-12 about an uninitialized initial state
3378 // for gather by starting with a zero guess, even though all lanes will be
3379 // overwritten
3380 __m256 zero = _mm256_setzero_ps();
3381 __m256 mask = _mm256_cmp_ps(zero, zero, _CMP_EQ_OQ);
3382
3383 data = _mm256_mask_i32gather_ps(zero, base_ptr, index, mask, 4);
3384# else
3385 for (unsigned int i = 0; i < 8; ++i)
3386 *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
3387# endif
3388 }
3389
3403 void
3404 scatter(const unsigned int *offsets, float *base_ptr) const
3405 {
3406 // no scatter operation in AVX/AVX2
3407 for (unsigned int i = 0; i < 8; ++i)
3408 base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
3409 }
3410
3415 float
3416 sum() const
3417 {
3419 t1.data = _mm_add_ps(this->get_lower(), this->get_upper());
3420 return t1.sum();
3421 }
3422
3428 __m256 data;
3429
3430private:
3435 __m128
3436 get_lower() const
3437 {
3438 return _mm256_castps256_ps128(data);
3439 }
3440
3445 __m128
3446 get_upper() const
3447 {
3448 return _mm256_extractf128_ps(data, 1);
3449 }
3450
3457 get_sqrt() const
3458 {
3459 VectorizedArray res;
3460 res.data = _mm256_sqrt_ps(data);
3461 return res;
3462 }
3463
3470 get_abs() const
3471 {
3472 // to compute the absolute value, perform bitwise andnot with -0. This
3473 // will leave all value and exponent bits unchanged but force the sign
3474 // value to +.
3475 __m256 mask = _mm256_set1_ps(-0.f);
3476 VectorizedArray res;
3477 res.data = _mm256_andnot_ps(mask, data);
3478 return res;
3479 }
3480
3487 get_max(const VectorizedArray &other) const
3488 {
3489 VectorizedArray res;
3490 res.data = _mm256_max_ps(data, other.data);
3491 return res;
3492 }
3493
3500 get_min(const VectorizedArray &other) const
3501 {
3502 VectorizedArray res;
3503 res.data = _mm256_min_ps(data, other.data);
3504 return res;
3505 }
3506
3507 // Make a few functions friends.
3508 template <typename Number2, std::size_t width2>
3511 template <typename Number2, std::size_t width2>
3514 template <typename Number2, std::size_t width2>
3518 template <typename Number2, std::size_t width2>
3522};
3523
3524
3525
3529template <>
3530inline DEAL_II_ALWAYS_INLINE void
3531vectorized_load_and_transpose(const unsigned int n_entries,
3532 const float *in,
3533 const unsigned int *offsets,
3535{
3536 const unsigned int n_chunks = n_entries / 4;
3537 for (unsigned int i = 0; i < n_chunks; ++i)
3538 {
3539 // To avoid warnings about uninitialized variables, need to initialize
3540 // one variable with zero before using it.
3541 __m256 t0, t1, t2, t3 = {};
3542 t0 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[0]), 0);
3543 t0 = _mm256_insertf128_ps(t0, _mm_loadu_ps(in + 4 * i + offsets[4]), 1);
3544 t1 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[1]), 0);
3545 t1 = _mm256_insertf128_ps(t1, _mm_loadu_ps(in + 4 * i + offsets[5]), 1);
3546 t2 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[2]), 0);
3547 t2 = _mm256_insertf128_ps(t2, _mm_loadu_ps(in + 4 * i + offsets[6]), 1);
3548 t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[3]), 0);
3549 t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[7]), 1);
3550
3551 __m256 v0 = _mm256_shuffle_ps(t0, t1, 0x44);
3552 __m256 v1 = _mm256_shuffle_ps(t0, t1, 0xee);
3553 __m256 v2 = _mm256_shuffle_ps(t2, t3, 0x44);
3554 __m256 v3 = _mm256_shuffle_ps(t2, t3, 0xee);
3555 out[4 * i + 0].data = _mm256_shuffle_ps(v0, v2, 0x88);
3556 out[4 * i + 1].data = _mm256_shuffle_ps(v0, v2, 0xdd);
3557 out[4 * i + 2].data = _mm256_shuffle_ps(v1, v3, 0x88);
3558 out[4 * i + 3].data = _mm256_shuffle_ps(v1, v3, 0xdd);
3559 }
3560
3561 // remainder loop of work that does not divide by 4
3562 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3563 out[i].gather(in + i, offsets);
3564}
3565
3566
3567
3571template <>
3572inline DEAL_II_ALWAYS_INLINE void
3573vectorized_load_and_transpose(const unsigned int n_entries,
3574 const std::array<float *, 8> &in,
3576{
3577 // see the comments in the vectorized_load_and_transpose above
3578
3579 const unsigned int n_chunks = n_entries / 4;
3580 for (unsigned int i = 0; i < n_chunks; ++i)
3581 {
3582 __m256 t0, t1, t2, t3 = {};
3583 t0 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[0] + 4 * i), 0);
3584 t0 = _mm256_insertf128_ps(t0, _mm_loadu_ps(in[4] + 4 * i), 1);
3585 t1 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[1] + 4 * i), 0);
3586 t1 = _mm256_insertf128_ps(t1, _mm_loadu_ps(in[5] + 4 * i), 1);
3587 t2 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[2] + 4 * i), 0);
3588 t2 = _mm256_insertf128_ps(t2, _mm_loadu_ps(in[6] + 4 * i), 1);
3589 t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[3] + 4 * i), 0);
3590 t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[7] + 4 * i), 1);
3591
3592 __m256 v0 = _mm256_shuffle_ps(t0, t1, 0x44);
3593 __m256 v1 = _mm256_shuffle_ps(t0, t1, 0xee);
3594 __m256 v2 = _mm256_shuffle_ps(t2, t3, 0x44);
3595 __m256 v3 = _mm256_shuffle_ps(t2, t3, 0xee);
3596 out[4 * i + 0].data = _mm256_shuffle_ps(v0, v2, 0x88);
3597 out[4 * i + 1].data = _mm256_shuffle_ps(v0, v2, 0xdd);
3598 out[4 * i + 2].data = _mm256_shuffle_ps(v1, v3, 0x88);
3599 out[4 * i + 3].data = _mm256_shuffle_ps(v1, v3, 0xdd);
3600 }
3601
3602 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3603 gather(out[i], in, i);
3604}
3605
3606
3607
3611template <>
3612inline DEAL_II_ALWAYS_INLINE void
3613vectorized_transpose_and_store(const bool add_into,
3614 const unsigned int n_entries,
3615 const VectorizedArray<float, 8> *in,
3616 const unsigned int *offsets,
3617 float *out)
3618{
3619 const unsigned int n_chunks = n_entries / 4;
3620 for (unsigned int i = 0; i < n_chunks; ++i)
3621 {
3622 __m256 u0 = in[4 * i + 0].data;
3623 __m256 u1 = in[4 * i + 1].data;
3624 __m256 u2 = in[4 * i + 2].data;
3625 __m256 u3 = in[4 * i + 3].data;
3626 __m256 t0 = _mm256_shuffle_ps(u0, u1, 0x44);
3627 __m256 t1 = _mm256_shuffle_ps(u0, u1, 0xee);
3628 __m256 t2 = _mm256_shuffle_ps(u2, u3, 0x44);
3629 __m256 t3 = _mm256_shuffle_ps(u2, u3, 0xee);
3630 u0 = _mm256_shuffle_ps(t0, t2, 0x88);
3631 u1 = _mm256_shuffle_ps(t0, t2, 0xdd);
3632 u2 = _mm256_shuffle_ps(t1, t3, 0x88);
3633 u3 = _mm256_shuffle_ps(t1, t3, 0xdd);
3634 __m128 res0 = _mm256_extractf128_ps(u0, 0);
3635 __m128 res4 = _mm256_extractf128_ps(u0, 1);
3636 __m128 res1 = _mm256_extractf128_ps(u1, 0);
3637 __m128 res5 = _mm256_extractf128_ps(u1, 1);
3638 __m128 res2 = _mm256_extractf128_ps(u2, 0);
3639 __m128 res6 = _mm256_extractf128_ps(u2, 1);
3640 __m128 res3 = _mm256_extractf128_ps(u3, 0);
3641 __m128 res7 = _mm256_extractf128_ps(u3, 1);
3642
3643 // Cannot use the same store instructions in both paths of the 'if'
3644 // because the compiler cannot know that there is no aliasing between
3645 // pointers
3646 if (add_into)
3647 {
3648 res0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), res0);
3649 _mm_storeu_ps(out + 4 * i + offsets[0], res0);
3650 res1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), res1);
3651 _mm_storeu_ps(out + 4 * i + offsets[1], res1);
3652 res2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), res2);
3653 _mm_storeu_ps(out + 4 * i + offsets[2], res2);
3654 res3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), res3);
3655 _mm_storeu_ps(out + 4 * i + offsets[3], res3);
3656 res4 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[4]), res4);
3657 _mm_storeu_ps(out + 4 * i + offsets[4], res4);
3658 res5 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[5]), res5);
3659 _mm_storeu_ps(out + 4 * i + offsets[5], res5);
3660 res6 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[6]), res6);
3661 _mm_storeu_ps(out + 4 * i + offsets[6], res6);
3662 res7 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[7]), res7);
3663 _mm_storeu_ps(out + 4 * i + offsets[7], res7);
3664 }
3665 else
3666 {
3667 _mm_storeu_ps(out + 4 * i + offsets[0], res0);
3668 _mm_storeu_ps(out + 4 * i + offsets[1], res1);
3669 _mm_storeu_ps(out + 4 * i + offsets[2], res2);
3670 _mm_storeu_ps(out + 4 * i + offsets[3], res3);
3671 _mm_storeu_ps(out + 4 * i + offsets[4], res4);
3672 _mm_storeu_ps(out + 4 * i + offsets[5], res5);
3673 _mm_storeu_ps(out + 4 * i + offsets[6], res6);
3674 _mm_storeu_ps(out + 4 * i + offsets[7], res7);
3675 }
3676 }
3677
3678 // remainder loop of work that does not divide by 4
3679 if (add_into)
3680 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3681 for (unsigned int v = 0; v < 8; ++v)
3682 out[offsets[v] + i] += in[i][v];
3683 else
3684 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3685 for (unsigned int v = 0; v < 8; ++v)
3686 out[offsets[v] + i] = in[i][v];
3687}
3688
3689
3690
3694template <>
3695inline DEAL_II_ALWAYS_INLINE void
3696vectorized_transpose_and_store(const bool add_into,
3697 const unsigned int n_entries,
3698 const VectorizedArray<float, 8> *in,
3699 std::array<float *, 8> &out)
3700{
3701 // see the comments in the vectorized_transpose_and_store above
3702
3703 const unsigned int n_chunks = n_entries / 4;
3704 for (unsigned int i = 0; i < n_chunks; ++i)
3705 {
3706 __m256 u0 = in[4 * i + 0].data;
3707 __m256 u1 = in[4 * i + 1].data;
3708 __m256 u2 = in[4 * i + 2].data;
3709 __m256 u3 = in[4 * i + 3].data;
3710 __m256 t0 = _mm256_shuffle_ps(u0, u1, 0x44);
3711 __m256 t1 = _mm256_shuffle_ps(u0, u1, 0xee);
3712 __m256 t2 = _mm256_shuffle_ps(u2, u3, 0x44);
3713 __m256 t3 = _mm256_shuffle_ps(u2, u3, 0xee);
3714 u0 = _mm256_shuffle_ps(t0, t2, 0x88);
3715 u1 = _mm256_shuffle_ps(t0, t2, 0xdd);
3716 u2 = _mm256_shuffle_ps(t1, t3, 0x88);
3717 u3 = _mm256_shuffle_ps(t1, t3, 0xdd);
3718 __m128 res0 = _mm256_extractf128_ps(u0, 0);
3719 __m128 res4 = _mm256_extractf128_ps(u0, 1);
3720 __m128 res1 = _mm256_extractf128_ps(u1, 0);
3721 __m128 res5 = _mm256_extractf128_ps(u1, 1);
3722 __m128 res2 = _mm256_extractf128_ps(u2, 0);
3723 __m128 res6 = _mm256_extractf128_ps(u2, 1);
3724 __m128 res3 = _mm256_extractf128_ps(u3, 0);
3725 __m128 res7 = _mm256_extractf128_ps(u3, 1);
3726
3727 if (add_into)
3728 {
3729 res0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), res0);
3730 _mm_storeu_ps(out[0] + 4 * i, res0);
3731 res1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), res1);
3732 _mm_storeu_ps(out[1] + 4 * i, res1);
3733 res2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), res2);
3734 _mm_storeu_ps(out[2] + 4 * i, res2);
3735 res3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), res3);
3736 _mm_storeu_ps(out[3] + 4 * i, res3);
3737 res4 = _mm_add_ps(_mm_loadu_ps(out[4] + 4 * i), res4);
3738 _mm_storeu_ps(out[4] + 4 * i, res4);
3739 res5 = _mm_add_ps(_mm_loadu_ps(out[5] + 4 * i), res5);
3740 _mm_storeu_ps(out[5] + 4 * i, res5);
3741 res6 = _mm_add_ps(_mm_loadu_ps(out[6] + 4 * i), res6);
3742 _mm_storeu_ps(out[6] + 4 * i, res6);
3743 res7 = _mm_add_ps(_mm_loadu_ps(out[7] + 4 * i), res7);
3744 _mm_storeu_ps(out[7] + 4 * i, res7);
3745 }
3746 else
3747 {
3748 _mm_storeu_ps(out[0] + 4 * i, res0);
3749 _mm_storeu_ps(out[1] + 4 * i, res1);
3750 _mm_storeu_ps(out[2] + 4 * i, res2);
3751 _mm_storeu_ps(out[3] + 4 * i, res3);
3752 _mm_storeu_ps(out[4] + 4 * i, res4);
3753 _mm_storeu_ps(out[5] + 4 * i, res5);
3754 _mm_storeu_ps(out[6] + 4 * i, res6);
3755 _mm_storeu_ps(out[7] + 4 * i, res7);
3756 }
3757 }
3758
3759 if (add_into)
3760 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3761 for (unsigned int v = 0; v < 8; ++v)
3762 out[v][i] += in[i][v];
3763 else
3764 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3765 for (unsigned int v = 0; v < 8; ++v)
3766 out[v][i] = in[i][v];
3767}
3768
3769# endif
3770
3771// for safety, also check that __AVX512F__ is defined in case the user manually
3772// set some conflicting compile flags which prevent compilation
3773
3774# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && defined(__AVX512F__)
3775
3779template <>
3780class VectorizedArray<double, 8>
3781 : public VectorizedArrayBase<VectorizedArray<double, 8>, 8>
3782{
3783public:
3787 using value_type = double;
3788
3793 static constexpr bool is_implemented = true;
3794
3799 VectorizedArray() = default;
3800
3804 VectorizedArray(const double scalar)
3805 {
3806 this->operator=(scalar);
3807 }
3808
3812 template <typename U>
3813 VectorizedArray(const std::initializer_list<U> &list)
3814 : VectorizedArrayBase<VectorizedArray<double, 8>, 8>(list)
3815 {}
3816
3822 operator=(const double x) &
3823 {
3824 data = _mm512_set1_pd(x);
3825 return *this;
3826 }
3827
3828
3835 operator=(const double scalar) && = delete;
3836
3841 double &
3842 operator[](const unsigned int comp)
3843 {
3844 AssertIndexRange(comp, 8);
3845 return *(reinterpret_cast<double *>(&data) + comp);
3846 }
3847
3852 const double &
3853 operator[](const unsigned int comp) const
3854 {
3855 AssertIndexRange(comp, 8);
3856 return *(reinterpret_cast<const double *>(&data) + comp);
3857 }
3858
3864 operator+=(const VectorizedArray &vec)
3865 {
3866 // if the compiler supports vector arithmetic, we can simply use +=
3867 // operator on the given data type. this allows the compiler to combine
3868 // additions with multiplication (fused multiply-add) if those
3869 // instructions are available. Otherwise, we need to use the built-in
3870 // intrinsic command for __m512d
3871# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3872 data += vec.data;
3873# else
3874 data = _mm512_add_pd(data, vec.data);
3875# endif
3876 return *this;
3877 }
3878
3884 operator-=(const VectorizedArray &vec)
3885 {
3886# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3887 data -= vec.data;
3888# else
3889 data = _mm512_sub_pd(data, vec.data);
3890# endif
3891 return *this;
3892 }
3898 operator*=(const VectorizedArray &vec)
3899 {
3900# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3901 data *= vec.data;
3902# else
3903 data = _mm512_mul_pd(data, vec.data);
3904# endif
3905 return *this;
3906 }
3907
3913 operator/=(const VectorizedArray &vec)
3914 {
3915# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3916 data /= vec.data;
3917# else
3918 data = _mm512_div_pd(data, vec.data);
3919# endif
3920 return *this;
3921 }
3922
3929 void
3930 load(const double *ptr)
3931 {
3932 data = _mm512_loadu_pd(ptr);
3933 }
3934
3936 void
3937 load(const float *ptr)
3938 {
3939 data = _mm512_cvtps_pd(_mm256_loadu_ps(ptr));
3940 }
3941
3949 void
3950 store(double *ptr) const
3951 {
3952 _mm512_storeu_pd(ptr, data);
3953 }
3954
3956 void
3957 store(float *ptr) const
3958 {
3959 _mm256_storeu_ps(ptr, _mm512_cvtpd_ps(data));
3960 }
3961
3967 void
3968 streaming_store(double *ptr) const
3969 {
3970 Assert(reinterpret_cast<std::size_t>(ptr) % 64 == 0,
3971 ExcMessage("Memory not aligned"));
3972 _mm512_stream_pd(ptr, data);
3973 }
3974
3988 void
3989 gather(const double *base_ptr, const unsigned int *offsets)
3990 {
3991# ifdef DEAL_II_USE_VECTORIZATION_GATHER
3992 // unfortunately, there does not appear to be a 256 bit integer load, so
3993 // do it by some reinterpret casts here. this is allowed because the Intel
3994 // API allows aliasing between different vector types.
3995 const __m256 index_val =
3996 _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
3997 const __m256i index = *reinterpret_cast<const __m256i *>(&index_val);
3998
3999 // work around a warning with gcc-12 about an uninitialized initial state
4000 // for gather by starting with a zero guess, even though all lanes will be
4001 // overwritten
4002 __m512d zero = {};
4003 __mmask8 mask = 0xFF;
4004
4005 data = _mm512_mask_i32gather_pd(zero, mask, index, base_ptr, 8);
4006# else
4007 for (unsigned int i = 0; i < 8; ++i)
4008 *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
4009# endif
4010 }
4011
4025 void
4026 scatter(const unsigned int *offsets, double *base_ptr) const
4027 {
4028# ifdef DEAL_II_USE_VECTORIZATION_GATHER
4029 for (unsigned int i = 0; i < 8; ++i)
4030 for (unsigned int j = i + 1; j < 8; ++j)
4031 Assert(offsets[i] != offsets[j],
4032 ExcMessage("Result of scatter undefined if two offset elements"
4033 " point to the same position"));
4034
4035 // unfortunately, there does not appear to be a 256 bit integer load, so
4036 // do it by some reinterpret casts here. this is allowed because the Intel
4037 // API allows aliasing between different vector types.
4038 const __m256 index_val =
4039 _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
4040 const __m256i index = *reinterpret_cast<const __m256i *>(&index_val);
4041 _mm512_i32scatter_pd(base_ptr, index, data, 8);
4042# else
4043 for (unsigned int i = 0; i < 8; ++i)
4044 base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
4045# endif
4046 }
4047
4052 double
4053 sum() const
4054 {
4056 t1.data = _mm256_add_pd(this->get_lower(), this->get_upper());
4057 return t1.sum();
4058 }
4059
4065 __m512d data;
4066
4067private:
4072 __m256d
4073 get_lower() const
4074 {
4075 return _mm512_castpd512_pd256(data);
4076 }
4077
4082 __m256d
4083 get_upper() const
4084 {
4085 return _mm512_extractf64x4_pd(data, 1);
4086 }
4087
4094 get_sqrt() const
4095 {
4096 VectorizedArray res;
4097 res.data = _mm512_sqrt_pd(data);
4098 return res;
4099 }
4100
4107 get_abs() const
4108 {
4109 // to compute the absolute value, perform bitwise andnot with -0. This
4110 // will leave all value and exponent bits unchanged but force the sign
4111 // value to +. Since there is no andnot for AVX512, we interpret the data
4112 // as 64 bit integers and do the andnot on those types (note that andnot
4113 // is a bitwise operation so the data type does not matter)
4114 __m512d mask = _mm512_set1_pd(-0.);
4115 VectorizedArray res;
4116 res.data = reinterpret_cast<__m512d>(
4117 _mm512_andnot_epi64(reinterpret_cast<__m512i>(mask),
4118 reinterpret_cast<__m512i>(data)));
4119 return res;
4120 }
4121
4128 get_max(const VectorizedArray &other) const
4129 {
4130 VectorizedArray res;
4131 res.data = _mm512_max_pd(data, other.data);
4132 return res;
4133 }
4134
4141 get_min(const VectorizedArray &other) const
4142 {
4143 VectorizedArray res;
4144 res.data = _mm512_min_pd(data, other.data);
4145 return res;
4146 }
4147
4148 // Make a few functions friends.
4149 template <typename Number2, std::size_t width2>
4152 template <typename Number2, std::size_t width2>
4155 template <typename Number2, std::size_t width2>
4159 template <typename Number2, std::size_t width2>
4163};
4164
4165
4166
4170template <>
4171inline DEAL_II_ALWAYS_INLINE void
4172vectorized_load_and_transpose(const unsigned int n_entries,
4173 const double *in,
4174 const unsigned int *offsets,
4176{
4177 // do not do full transpose because the code is long and will most
4178 // likely not pay off because many processors have two load units
4179 // (for the top 8 instructions) but only 1 permute unit (for the 8
4180 // shuffle/unpack instructions). rather start the transposition on the
4181 // vectorized array of half the size with 256 bits
4182 const unsigned int n_chunks = n_entries / 4;
4183 for (unsigned int i = 0; i < n_chunks; ++i)
4184 {
4185 __m512d t0, t1, t2, t3 = {};
4186
4187 t0 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[0] + 4 * i), 0);
4188 t0 = _mm512_insertf64x4(t0, _mm256_loadu_pd(in + offsets[2] + 4 * i), 1);
4189 t1 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[1] + 4 * i), 0);
4190 t1 = _mm512_insertf64x4(t1, _mm256_loadu_pd(in + offsets[3] + 4 * i), 1);
4191 t2 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[4] + 4 * i), 0);
4192 t2 = _mm512_insertf64x4(t2, _mm256_loadu_pd(in + offsets[6] + 4 * i), 1);
4193 t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[5] + 4 * i), 0);
4194 t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[7] + 4 * i), 1);
4195
4196 __m512d v0 = _mm512_shuffle_f64x2(t0, t2, 0x88);
4197 __m512d v1 = _mm512_shuffle_f64x2(t0, t2, 0xdd);
4198 __m512d v2 = _mm512_shuffle_f64x2(t1, t3, 0x88);
4199 __m512d v3 = _mm512_shuffle_f64x2(t1, t3, 0xdd);
4200 out[4 * i + 0].data = _mm512_unpacklo_pd(v0, v2);
4201 out[4 * i + 1].data = _mm512_unpackhi_pd(v0, v2);
4202 out[4 * i + 2].data = _mm512_unpacklo_pd(v1, v3);
4203 out[4 * i + 3].data = _mm512_unpackhi_pd(v1, v3);
4204 }
4205 // remainder loop of work that does not divide by 4
4206 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4207 out[i].gather(in + i, offsets);
4208}
4209
4210
4211
4215template <>
4216inline DEAL_II_ALWAYS_INLINE void
4217vectorized_load_and_transpose(const unsigned int n_entries,
4218 const std::array<double *, 8> &in,
4220{
4221 const unsigned int n_chunks = n_entries / 4;
4222 for (unsigned int i = 0; i < n_chunks; ++i)
4223 {
4224 __m512d t0, t1, t2, t3 = {};
4225
4226 t0 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[0] + 4 * i), 0);
4227 t0 = _mm512_insertf64x4(t0, _mm256_loadu_pd(in[2] + 4 * i), 1);
4228 t1 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[1] + 4 * i), 0);
4229 t1 = _mm512_insertf64x4(t1, _mm256_loadu_pd(in[3] + 4 * i), 1);
4230 t2 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[4] + 4 * i), 0);
4231 t2 = _mm512_insertf64x4(t2, _mm256_loadu_pd(in[6] + 4 * i), 1);
4232 t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[5] + 4 * i), 0);
4233 t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[7] + 4 * i), 1);
4234
4235 __m512d v0 = _mm512_shuffle_f64x2(t0, t2, 0x88);
4236 __m512d v1 = _mm512_shuffle_f64x2(t0, t2, 0xdd);
4237 __m512d v2 = _mm512_shuffle_f64x2(t1, t3, 0x88);
4238 __m512d v3 = _mm512_shuffle_f64x2(t1, t3, 0xdd);
4239 out[4 * i + 0].data = _mm512_unpacklo_pd(v0, v2);
4240 out[4 * i + 1].data = _mm512_unpackhi_pd(v0, v2);
4241 out[4 * i + 2].data = _mm512_unpacklo_pd(v1, v3);
4242 out[4 * i + 3].data = _mm512_unpackhi_pd(v1, v3);
4243 }
4244
4245 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4246 gather(out[i], in, i);
4247}
4248
4249
4250
4254template <>
4255inline DEAL_II_ALWAYS_INLINE void
4256vectorized_transpose_and_store(const bool add_into,
4257 const unsigned int n_entries,
4259 const unsigned int *offsets,
4260 double *out)
4261{
4262 // as for the load, we split the store operations into 256 bit units to
4263 // better balance between code size, shuffle instructions, and stores
4264 const unsigned int n_chunks = n_entries / 4;
4265 __m512i mask1 = _mm512_set_epi64(0xd, 0xc, 0x5, 0x4, 0x9, 0x8, 0x1, 0x0);
4266 __m512i mask2 = _mm512_set_epi64(0xf, 0xe, 0x7, 0x6, 0xb, 0xa, 0x3, 0x2);
4267 for (unsigned int i = 0; i < n_chunks; ++i)
4268 {
4269 __m512d t0 = _mm512_unpacklo_pd(in[i * 4].data, in[i * 4 + 1].data);
4270 __m512d t1 = _mm512_unpackhi_pd(in[i * 4].data, in[i * 4 + 1].data);
4271 __m512d t2 = _mm512_unpacklo_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
4272 __m512d t3 = _mm512_unpackhi_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
4273 __m512d v0 = _mm512_permutex2var_pd(t0, mask1, t2);
4274 __m512d v1 = _mm512_permutex2var_pd(t0, mask2, t2);
4275 __m512d v2 = _mm512_permutex2var_pd(t1, mask1, t3);
4276 __m512d v3 = _mm512_permutex2var_pd(t1, mask2, t3);
4277 __m256d res0 = _mm512_extractf64x4_pd(v0, 0);
4278 __m256d res4 = _mm512_extractf64x4_pd(v0, 1);
4279 __m256d res1 = _mm512_extractf64x4_pd(v2, 0);
4280 __m256d res5 = _mm512_extractf64x4_pd(v2, 1);
4281 __m256d res2 = _mm512_extractf64x4_pd(v1, 0);
4282 __m256d res6 = _mm512_extractf64x4_pd(v1, 1);
4283 __m256d res3 = _mm512_extractf64x4_pd(v3, 0);
4284 __m256d res7 = _mm512_extractf64x4_pd(v3, 1);
4285
4286 // Cannot use the same store instructions in both paths of the 'if'
4287 // because the compiler cannot know that there is no aliasing
4288 // between pointers
4289 if (add_into)
4290 {
4291 res0 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[0]), res0);
4292 _mm256_storeu_pd(out + 4 * i + offsets[0], res0);
4293 res1 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[1]), res1);
4294 _mm256_storeu_pd(out + 4 * i + offsets[1], res1);
4295 res2 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[2]), res2);
4296 _mm256_storeu_pd(out + 4 * i + offsets[2], res2);
4297 res3 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[3]), res3);
4298 _mm256_storeu_pd(out + 4 * i + offsets[3], res3);
4299 res4 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[4]), res4);
4300 _mm256_storeu_pd(out + 4 * i + offsets[4], res4);
4301 res5 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[5]), res5);
4302 _mm256_storeu_pd(out + 4 * i + offsets[5], res5);
4303 res6 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[6]), res6);
4304 _mm256_storeu_pd(out + 4 * i + offsets[6], res6);
4305 res7 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[7]), res7);
4306 _mm256_storeu_pd(out + 4 * i + offsets[7], res7);
4307 }
4308 else
4309 {
4310 _mm256_storeu_pd(out + 4 * i + offsets[0], res0);
4311 _mm256_storeu_pd(out + 4 * i + offsets[1], res1);
4312 _mm256_storeu_pd(out + 4 * i + offsets[2], res2);
4313 _mm256_storeu_pd(out + 4 * i + offsets[3], res3);
4314 _mm256_storeu_pd(out + 4 * i + offsets[4], res4);
4315 _mm256_storeu_pd(out + 4 * i + offsets[5], res5);
4316 _mm256_storeu_pd(out + 4 * i + offsets[6], res6);
4317 _mm256_storeu_pd(out + 4 * i + offsets[7], res7);
4318 }
4319 }
4320
4321 // remainder loop of work that does not divide by 4
4322 if (add_into)
4323 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4324 for (unsigned int v = 0; v < 8; ++v)
4325 out[offsets[v] + i] += in[i][v];
4326 else
4327 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4328 for (unsigned int v = 0; v < 8; ++v)
4329 out[offsets[v] + i] = in[i][v];
4330}
4331
4332
4333
4337template <>
4338inline DEAL_II_ALWAYS_INLINE void
4339vectorized_transpose_and_store(const bool add_into,
4340 const unsigned int n_entries,
4342 std::array<double *, 8> &out)
4343{
4344 // see the comments in the vectorized_transpose_and_store above
4345
4346 const unsigned int n_chunks = n_entries / 4;
4347 __m512i mask1 = _mm512_set_epi64(0xd, 0xc, 0x5, 0x4, 0x9, 0x8, 0x1, 0x0);
4348 __m512i mask2 = _mm512_set_epi64(0xf, 0xe, 0x7, 0x6, 0xb, 0xa, 0x3, 0x2);
4349 for (unsigned int i = 0; i < n_chunks; ++i)
4350 {
4351 __m512d t0 = _mm512_unpacklo_pd(in[i * 4].data, in[i * 4 + 1].data);
4352 __m512d t1 = _mm512_unpackhi_pd(in[i * 4].data, in[i * 4 + 1].data);
4353 __m512d t2 = _mm512_unpacklo_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
4354 __m512d t3 = _mm512_unpackhi_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
4355 __m512d v0 = _mm512_permutex2var_pd(t0, mask1, t2);
4356 __m512d v1 = _mm512_permutex2var_pd(t0, mask2, t2);
4357 __m512d v2 = _mm512_permutex2var_pd(t1, mask1, t3);
4358 __m512d v3 = _mm512_permutex2var_pd(t1, mask2, t3);
4359 __m256d res0 = _mm512_extractf64x4_pd(v0, 0);
4360 __m256d res4 = _mm512_extractf64x4_pd(v0, 1);
4361 __m256d res1 = _mm512_extractf64x4_pd(v2, 0);
4362 __m256d res5 = _mm512_extractf64x4_pd(v2, 1);
4363 __m256d res2 = _mm512_extractf64x4_pd(v1, 0);
4364 __m256d res6 = _mm512_extractf64x4_pd(v1, 1);
4365 __m256d res3 = _mm512_extractf64x4_pd(v3, 0);
4366 __m256d res7 = _mm512_extractf64x4_pd(v3, 1);
4367
4368 if (add_into)
4369 {
4370 res0 = _mm256_add_pd(_mm256_loadu_pd(out[0] + 4 * i), res0);
4371 _mm256_storeu_pd(out[0] + 4 * i, res0);
4372 res1 = _mm256_add_pd(_mm256_loadu_pd(out[1] + 4 * i), res1);
4373 _mm256_storeu_pd(out[1] + 4 * i, res1);
4374 res2 = _mm256_add_pd(_mm256_loadu_pd(out[2] + 4 * i), res2);
4375 _mm256_storeu_pd(out[2] + 4 * i, res2);
4376 res3 = _mm256_add_pd(_mm256_loadu_pd(out[3] + 4 * i), res3);
4377 _mm256_storeu_pd(out[3] + 4 * i, res3);
4378 res4 = _mm256_add_pd(_mm256_loadu_pd(out[4] + 4 * i), res4);
4379 _mm256_storeu_pd(out[4] + 4 * i, res4);
4380 res5 = _mm256_add_pd(_mm256_loadu_pd(out[5] + 4 * i), res5);
4381 _mm256_storeu_pd(out[5] + 4 * i, res5);
4382 res6 = _mm256_add_pd(_mm256_loadu_pd(out[6] + 4 * i), res6);
4383 _mm256_storeu_pd(out[6] + 4 * i, res6);
4384 res7 = _mm256_add_pd(_mm256_loadu_pd(out[7] + 4 * i), res7);
4385 _mm256_storeu_pd(out[7] + 4 * i, res7);
4386 }
4387 else
4388 {
4389 _mm256_storeu_pd(out[0] + 4 * i, res0);
4390 _mm256_storeu_pd(out[1] + 4 * i, res1);
4391 _mm256_storeu_pd(out[2] + 4 * i, res2);
4392 _mm256_storeu_pd(out[3] + 4 * i, res3);
4393 _mm256_storeu_pd(out[4] + 4 * i, res4);
4394 _mm256_storeu_pd(out[5] + 4 * i, res5);
4395 _mm256_storeu_pd(out[6] + 4 * i, res6);
4396 _mm256_storeu_pd(out[7] + 4 * i, res7);
4397 }
4398 }
4399
4400 if (add_into)
4401 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4402 for (unsigned int v = 0; v < 8; ++v)
4403 out[v][i] += in[i][v];
4404 else
4405 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4406 for (unsigned int v = 0; v < 8; ++v)
4407 out[v][i] = in[i][v];
4408}
4409
4410
4411
4415template <>
4416class VectorizedArray<float, 16>
4417 : public VectorizedArrayBase<VectorizedArray<float, 16>, 16>
4418{
4419public:
4423 using value_type = float;
4424
4429 static constexpr bool is_implemented = true;
4430
4435 VectorizedArray() = default;
4436
4440 VectorizedArray(const float scalar)
4441 {
4442 this->operator=(scalar);
4443 }
4444
4448 template <typename U>
4449 VectorizedArray(const std::initializer_list<U> &list)
4450 : VectorizedArrayBase<VectorizedArray<float, 16>, 16>(list)
4451 {}
4452
4458 operator=(const float x) &
4459 {
4460 data = _mm512_set1_ps(x);
4461 return *this;
4462 }
4463
4470 operator=(const float scalar) && = delete;
4471
4476 float &
4477 operator[](const unsigned int comp)
4478 {
4479 AssertIndexRange(comp, 16);
4480 return *(reinterpret_cast<float *>(&data) + comp);
4481 }
4482
4487 const float &
4488 operator[](const unsigned int comp) const
4489 {
4490 AssertIndexRange(comp, 16);
4491 return *(reinterpret_cast<const float *>(&data) + comp);
4492 }
4493
4499 operator+=(const VectorizedArray &vec)
4500 {
4501 // if the compiler supports vector arithmetic, we can simply use +=
4502 // operator on the given data type. this allows the compiler to combine
4503 // additions with multiplication (fused multiply-add) if those
4504 // instructions are available. Otherwise, we need to use the built-in
4505 // intrinsic command for __m512d
4506# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
4507 data += vec.data;
4508# else
4509 data = _mm512_add_ps(data, vec.data);
4510# endif
4511 return *this;
4512 }
4513
4519 operator-=(const VectorizedArray &vec)
4520 {
4521# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
4522 data -= vec.data;
4523# else
4524 data = _mm512_sub_ps(data, vec.data);
4525# endif
4526 return *this;
4527 }
4533 operator*=(const VectorizedArray &vec)
4534 {
4535# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
4536 data *= vec.data;
4537# else
4538 data = _mm512_mul_ps(data, vec.data);
4539# endif
4540 return *this;
4541 }
4542
4548 operator/=(const VectorizedArray &vec)
4549 {
4550# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
4551 data /= vec.data;
4552# else
4553 data = _mm512_div_ps(data, vec.data);
4554# endif
4555 return *this;
4556 }
4557
4564 void
4565 load(const float *ptr)
4566 {
4567 data = _mm512_loadu_ps(ptr);
4568 }
4569
4577 void
4578 store(float *ptr) const
4579 {
4580 _mm512_storeu_ps(ptr, data);
4581 }
4582
4588 void
4589 streaming_store(float *ptr) const
4590 {
4591 Assert(reinterpret_cast<std::size_t>(ptr) % 64 == 0,
4592 ExcMessage("Memory not aligned"));
4593 _mm512_stream_ps(ptr, data);
4594 }
4595
4609 void
4610 gather(const float *base_ptr, const unsigned int *offsets)
4611 {
4612# ifdef DEAL_II_USE_VECTORIZATION_GATHER
4613 // unfortunately, there does not appear to be a 512 bit integer load, so
4614 // do it by some reinterpret casts here. this is allowed because the Intel
4615 // API allows aliasing between different vector types.
4616 const __m512 index_val =
4617 _mm512_loadu_ps(reinterpret_cast<const float *>(offsets));
4618 const __m512i index = *reinterpret_cast<const __m512i *>(&index_val);
4619
4620 // work around a warning with gcc-12 about an uninitialized initial state
4621 // for gather by starting with a zero guess, even though all lanes will be
4622 // overwritten
4623 __m512 zero = {};
4624 __mmask16 mask = 0xFFFF;
4625
4626 data = _mm512_mask_i32gather_ps(zero, mask, index, base_ptr, 4);
4627# else
4628 for (unsigned int i = 0; i < 16; ++i)
4629 *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
4630# endif
4631 }
4632
4646 void
4647 scatter(const unsigned int *offsets, float *base_ptr) const
4648 {
4649# ifdef DEAL_II_USE_VECTORIZATION_GATHER
4650 for (unsigned int i = 0; i < 16; ++i)
4651 for (unsigned int j = i + 1; j < 16; ++j)
4652 Assert(offsets[i] != offsets[j],
4653 ExcMessage("Result of scatter undefined if two offset elements"
4654 " point to the same position"));
4655
4656 // unfortunately, there does not appear to be a 512 bit integer load, so
4657 // do it by some reinterpret casts here. this is allowed because the Intel
4658 // API allows aliasing between different vector types.
4659 const __m512 index_val =
4660 _mm512_loadu_ps(reinterpret_cast<const float *>(offsets));
4661 const __m512i index = *reinterpret_cast<const __m512i *>(&index_val);
4662 _mm512_i32scatter_ps(base_ptr, index, data, 4);
4663# else
4664 for (unsigned int i = 0; i < 16; ++i)
4665 base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
4666# endif
4667 }
4668
4673 float
4674 sum() const
4675 {
4677 t1.data = _mm256_add_ps(this->get_lower(), this->get_upper());
4678 return t1.sum();
4679 }
4680
4686 __m512 data;
4687
4688private:
4693 __m256
4694 get_lower() const
4695 {
4696 return _mm512_castps512_ps256(data);
4697 }
4698
4703 __m256
4704 get_upper() const
4705 {
4706 return _mm256_castpd_ps(_mm512_extractf64x4_pd(_mm512_castps_pd(data), 1));
4707 }
4708
4715 get_sqrt() const
4716 {
4717 VectorizedArray res;
4718 res.data = _mm512_sqrt_ps(data);
4719 return res;
4720 }
4721
4728 get_abs() const
4729 {
4730 // to compute the absolute value, perform bitwise andnot with -0. This
4731 // will leave all value and exponent bits unchanged but force the sign
4732 // value to +. Since there is no andnot for AVX512, we interpret the data
4733 // as 32 bit integers and do the andnot on those types (note that andnot
4734 // is a bitwise operation so the data type does not matter)
4735 __m512 mask = _mm512_set1_ps(-0.f);
4736 VectorizedArray res;
4737 res.data = reinterpret_cast<__m512>(
4738 _mm512_andnot_epi32(reinterpret_cast<__m512i>(mask),
4739 reinterpret_cast<__m512i>(data)));
4740 return res;
4741 }
4742
4749 get_max(const VectorizedArray &other) const
4750 {
4751 VectorizedArray res;
4752 res.data = _mm512_max_ps(data, other.data);
4753 return res;
4754 }
4755
4762 get_min(const VectorizedArray &other) const
4763 {
4764 VectorizedArray res;
4765 res.data = _mm512_min_ps(data, other.data);
4766 return res;
4767 }
4768
4769 // Make a few functions friends.
4770 template <typename Number2, std::size_t width2>
4773 template <typename Number2, std::size_t width2>
4776 template <typename Number2, std::size_t width2>
4780 template <typename Number2, std::size_t width2>
4784};
4785
4786
4787
4791template <>
4792inline DEAL_II_ALWAYS_INLINE void
4793vectorized_load_and_transpose(const unsigned int n_entries,
4794 const float *in,
4795 const unsigned int *offsets,
4797{
4798 // Similar to the double case, we perform the work on smaller entities. In
4799 // this case, we start from 128 bit arrays and insert them into a full 512
4800 // bit index. This reduces the code size and register pressure because we do
4801 // shuffles on 4 numbers rather than 16.
4802 const unsigned int n_chunks = n_entries / 4;
4803
4804 // To avoid warnings about uninitialized variables, need to initialize one
4805 // variable to a pre-existing value in out, which will never get used in
4806 // the end. Keep the initialization outside the loop because of a bug in
4807 // gcc-9.1 which generates a "vmovapd" instruction instead of "vmovupd" in
4808 // case t3 is initialized to zero (inside/outside of loop), see
4809 // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90991
4810 __m512 t0, t1, t2, t3;
4811 if (n_chunks > 0)
4812 t3 = out[0].data;
4813 for (unsigned int i = 0; i < n_chunks; ++i)
4814 {
4815 t0 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[0] + 4 * i), 0);
4816 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[4] + 4 * i), 1);
4817 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[8] + 4 * i), 2);
4818 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[12] + 4 * i), 3);
4819 t1 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[1] + 4 * i), 0);
4820 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[5] + 4 * i), 1);
4821 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[9] + 4 * i), 2);
4822 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[13] + 4 * i), 3);
4823 t2 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[2] + 4 * i), 0);
4824 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[6] + 4 * i), 1);
4825 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[10] + 4 * i), 2);
4826 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[14] + 4 * i), 3);
4827 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[3] + 4 * i), 0);
4828 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[7] + 4 * i), 1);
4829 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[11] + 4 * i), 2);
4830 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[15] + 4 * i), 3);
4831
4832 __m512 v0 = _mm512_shuffle_ps(t0, t1, 0x44);
4833 __m512 v1 = _mm512_shuffle_ps(t0, t1, 0xee);
4834 __m512 v2 = _mm512_shuffle_ps(t2, t3, 0x44);
4835 __m512 v3 = _mm512_shuffle_ps(t2, t3, 0xee);
4836
4837 out[4 * i + 0].data = _mm512_shuffle_ps(v0, v2, 0x88);
4838 out[4 * i + 1].data = _mm512_shuffle_ps(v0, v2, 0xdd);
4839 out[4 * i + 2].data = _mm512_shuffle_ps(v1, v3, 0x88);
4840 out[4 * i + 3].data = _mm512_shuffle_ps(v1, v3, 0xdd);
4841 }
4842
4843 // remainder loop of work that does not divide by 4
4844 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4845 out[i].gather(in + i, offsets);
4846}
4847
4848
4849
4853template <>
4854inline DEAL_II_ALWAYS_INLINE void
4855vectorized_load_and_transpose(const unsigned int n_entries,
4856 const std::array<float *, 16> &in,
4858{
4859 // see the comments in the vectorized_load_and_transpose above
4860
4861 const unsigned int n_chunks = n_entries / 4;
4862
4863 __m512 t0, t1, t2, t3;
4864 if (n_chunks > 0)
4865 t3 = out[0].data;
4866 for (unsigned int i = 0; i < n_chunks; ++i)
4867 {
4868 t0 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[0] + 4 * i), 0);
4869 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[4] + 4 * i), 1);
4870 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[8] + 4 * i), 2);
4871 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[12] + 4 * i), 3);
4872 t1 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[1] + 4 * i), 0);
4873 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[5] + 4 * i), 1);
4874 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[9] + 4 * i), 2);
4875 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[13] + 4 * i), 3);
4876 t2 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[2] + 4 * i), 0);
4877 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[6] + 4 * i), 1);
4878 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[10] + 4 * i), 2);
4879 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[14] + 4 * i), 3);
4880 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[3] + 4 * i), 0);
4881 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[7] + 4 * i), 1);
4882 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[11] + 4 * i), 2);
4883 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[15] + 4 * i), 3);
4884
4885 __m512 v0 = _mm512_shuffle_ps(t0, t1, 0x44);
4886 __m512 v1 = _mm512_shuffle_ps(t0, t1, 0xee);
4887 __m512 v2 = _mm512_shuffle_ps(t2, t3, 0x44);
4888 __m512 v3 = _mm512_shuffle_ps(t2, t3, 0xee);
4889
4890 out[4 * i + 0].data = _mm512_shuffle_ps(v0, v2, 0x88);
4891 out[4 * i + 1].data = _mm512_shuffle_ps(v0, v2, 0xdd);
4892 out[4 * i + 2].data = _mm512_shuffle_ps(v1, v3, 0x88);
4893 out[4 * i + 3].data = _mm512_shuffle_ps(v1, v3, 0xdd);
4894 }
4895
4896 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4897 gather(out[i], in, i);
4898}
4899
4900
4901
4905template <>
4906inline DEAL_II_ALWAYS_INLINE void
4907vectorized_transpose_and_store(const bool add_into,
4908 const unsigned int n_entries,
4910 const unsigned int *offsets,
4911 float *out)
4912{
4913 const unsigned int n_chunks = n_entries / 4;
4914 for (unsigned int i = 0; i < n_chunks; ++i)
4915 {
4916 __m512 t0 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0x44);
4917 __m512 t1 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0xee);
4918 __m512 t2 =
4919 _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0x44);
4920 __m512 t3 =
4921 _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0xee);
4922 __m512 u0 = _mm512_shuffle_ps(t0, t2, 0x88);
4923 __m512 u1 = _mm512_shuffle_ps(t0, t2, 0xdd);
4924 __m512 u2 = _mm512_shuffle_ps(t1, t3, 0x88);
4925 __m512 u3 = _mm512_shuffle_ps(t1, t3, 0xdd);
4926
4927 __m128 res0 = _mm512_extractf32x4_ps(u0, 0);
4928 __m128 res4 = _mm512_extractf32x4_ps(u0, 1);
4929 __m128 res8 = _mm512_extractf32x4_ps(u0, 2);
4930 __m128 res12 = _mm512_extractf32x4_ps(u0, 3);
4931 __m128 res1 = _mm512_extractf32x4_ps(u1, 0);
4932 __m128 res5 = _mm512_extractf32x4_ps(u1, 1);
4933 __m128 res9 = _mm512_extractf32x4_ps(u1, 2);
4934 __m128 res13 = _mm512_extractf32x4_ps(u1, 3);
4935 __m128 res2 = _mm512_extractf32x4_ps(u2, 0);
4936 __m128 res6 = _mm512_extractf32x4_ps(u2, 1);
4937 __m128 res10 = _mm512_extractf32x4_ps(u2, 2);
4938 __m128 res14 = _mm512_extractf32x4_ps(u2, 3);
4939 __m128 res3 = _mm512_extractf32x4_ps(u3, 0);
4940 __m128 res7 = _mm512_extractf32x4_ps(u3, 1);
4941 __m128 res11 = _mm512_extractf32x4_ps(u3, 2);
4942 __m128 res15 = _mm512_extractf32x4_ps(u3, 3);
4943
4944 // Cannot use the same store instructions in both paths of the 'if'
4945 // because the compiler cannot know that there is no aliasing between
4946 // pointers
4947 if (add_into)
4948 {
4949 res0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), res0);
4950 _mm_storeu_ps(out + 4 * i + offsets[0], res0);
4951 res1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), res1);
4952 _mm_storeu_ps(out + 4 * i + offsets[1], res1);
4953 res2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), res2);
4954 _mm_storeu_ps(out + 4 * i + offsets[2], res2);
4955 res3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), res3);
4956 _mm_storeu_ps(out + 4 * i + offsets[3], res3);
4957 res4 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[4]), res4);
4958 _mm_storeu_ps(out + 4 * i + offsets[4], res4);
4959 res5 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[5]), res5);
4960 _mm_storeu_ps(out + 4 * i + offsets[5], res5);
4961 res6 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[6]), res6);
4962 _mm_storeu_ps(out + 4 * i + offsets[6], res6);
4963 res7 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[7]), res7);
4964 _mm_storeu_ps(out + 4 * i + offsets[7], res7);
4965 res8 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[8]), res8);
4966 _mm_storeu_ps(out + 4 * i + offsets[8], res8);
4967 res9 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[9]), res9);
4968 _mm_storeu_ps(out + 4 * i + offsets[9], res9);
4969 res10 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[10]), res10);
4970 _mm_storeu_ps(out + 4 * i + offsets[10], res10);
4971 res11 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[11]), res11);
4972 _mm_storeu_ps(out + 4 * i + offsets[11], res11);
4973 res12 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[12]), res12);
4974 _mm_storeu_ps(out + 4 * i + offsets[12], res12);
4975 res13 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[13]), res13);
4976 _mm_storeu_ps(out + 4 * i + offsets[13], res13);
4977 res14 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[14]), res14);
4978 _mm_storeu_ps(out + 4 * i + offsets[14], res14);
4979 res15 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[15]), res15);
4980 _mm_storeu_ps(out + 4 * i + offsets[15], res15);
4981 }
4982 else
4983 {
4984 _mm_storeu_ps(out + 4 * i + offsets[0], res0);
4985 _mm_storeu_ps(out + 4 * i + offsets[1], res1);
4986 _mm_storeu_ps(out + 4 * i + offsets[2], res2);
4987 _mm_storeu_ps(out + 4 * i + offsets[3], res3);
4988 _mm_storeu_ps(out + 4 * i + offsets[4], res4);
4989 _mm_storeu_ps(out + 4 * i + offsets[5], res5);
4990 _mm_storeu_ps(out + 4 * i + offsets[6], res6);
4991 _mm_storeu_ps(out + 4 * i + offsets[7], res7);
4992 _mm_storeu_ps(out + 4 * i + offsets[8], res8);
4993 _mm_storeu_ps(out + 4 * i + offsets[9], res9);
4994 _mm_storeu_ps(out + 4 * i + offsets[10], res10);
4995 _mm_storeu_ps(out + 4 * i + offsets[11], res11);
4996 _mm_storeu_ps(out + 4 * i + offsets[12], res12);
4997 _mm_storeu_ps(out + 4 * i + offsets[13], res13);
4998 _mm_storeu_ps(out + 4 * i + offsets[14], res14);
4999 _mm_storeu_ps(out + 4 * i + offsets[15], res15);
5000 }
5001 }
5002
5003 // remainder loop of work that does not divide by 4
5004 if (add_into)
5005 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
5006 for (unsigned int v = 0; v < 16; ++v)
5007 out[offsets[v] + i] += in[i][v];
5008 else
5009 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
5010 for (unsigned int v = 0; v < 16; ++v)
5011 out[offsets[v] + i] = in[i][v];
5012}
5013
5014
5015
5019template <>
5020inline DEAL_II_ALWAYS_INLINE void
5021vectorized_transpose_and_store(const bool add_into,
5022 const unsigned int n_entries,
5024 std::array<float *, 16> &out)
5025{
5026 // see the comments in the vectorized_transpose_and_store above
5027
5028 const unsigned int n_chunks = n_entries / 4;
5029 for (unsigned int i = 0; i < n_chunks; ++i)
5030 {
5031 __m512 t0 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0x44);
5032 __m512 t1 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0xee);
5033 __m512 t2 =
5034 _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0x44);
5035 __m512 t3 =
5036 _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0xee);
5037 __m512 u0 = _mm512_shuffle_ps(t0, t2, 0x88);
5038 __m512 u1 = _mm512_shuffle_ps(t0, t2, 0xdd);
5039 __m512 u2 = _mm512_shuffle_ps(t1, t3, 0x88);
5040 __m512 u3 = _mm512_shuffle_ps(t1, t3, 0xdd);
5041
5042 __m128 res0 = _mm512_extractf32x4_ps(u0, 0);
5043 __m128 res4 = _mm512_extractf32x4_ps(u0, 1);
5044 __m128 res8 = _mm512_extractf32x4_ps(u0, 2);
5045 __m128 res12 = _mm512_extractf32x4_ps(u0, 3);
5046 __m128 res1 = _mm512_extractf32x4_ps(u1, 0);
5047 __m128 res5 = _mm512_extractf32x4_ps(u1, 1);
5048 __m128 res9 = _mm512_extractf32x4_ps(u1, 2);
5049 __m128 res13 = _mm512_extractf32x4_ps(u1, 3);
5050 __m128 res2 = _mm512_extractf32x4_ps(u2, 0);
5051 __m128 res6 = _mm512_extractf32x4_ps(u2, 1);
5052 __m128 res10 = _mm512_extractf32x4_ps(u2, 2);
5053 __m128 res14 = _mm512_extractf32x4_ps(u2, 3);
5054 __m128 res3 = _mm512_extractf32x4_ps(u3, 0);
5055 __m128 res7 = _mm512_extractf32x4_ps(u3, 1);
5056 __m128 res11 = _mm512_extractf32x4_ps(u3, 2);
5057 __m128 res15 = _mm512_extractf32x4_ps(u3, 3);
5058
5059 if (add_into)
5060 {
5061 res0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), res0);
5062 _mm_storeu_ps(out[0] + 4 * i, res0);
5063 res1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), res1);
5064 _mm_storeu_ps(out[1] + 4 * i, res1);
5065 res2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), res2);
5066 _mm_storeu_ps(out[2] + 4 * i, res2);
5067 res3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), res3);
5068 _mm_storeu_ps(out[3] + 4 * i, res3);
5069 res4 = _mm_add_ps(_mm_loadu_ps(out[4] + 4 * i), res4);
5070 _mm_storeu_ps(out[4] + 4 * i, res4);
5071 res5 = _mm_add_ps(_mm_loadu_ps(out[5] + 4 * i), res5);
5072 _mm_storeu_ps(out[5] + 4 * i, res5);
5073 res6 = _mm_add_ps(_mm_loadu_ps(out[6] + 4 * i), res6);
5074 _mm_storeu_ps(out[6] + 4 * i, res6);
5075 res7 = _mm_add_ps(_mm_loadu_ps(out[7] + 4 * i), res7);
5076 _mm_storeu_ps(out[7] + 4 * i, res7);
5077 res8 = _mm_add_ps(_mm_loadu_ps(out[8] + 4 * i), res8);
5078 _mm_storeu_ps(out[8] + 4 * i, res8);
5079 res9 = _mm_add_ps(_mm_loadu_ps(out[9] + 4 * i), res9);
5080 _mm_storeu_ps(out[9] + 4 * i, res9);
5081 res10 = _mm_add_ps(_mm_loadu_ps(out[10] + 4 * i), res10);
5082 _mm_storeu_ps(out[10] + 4 * i, res10);
5083 res11 = _mm_add_ps(_mm_loadu_ps(out[11] + 4 * i), res11);
5084 _mm_storeu_ps(out[11] + 4 * i, res11);
5085 res12 = _mm_add_ps(_mm_loadu_ps(out[12] + 4 * i), res12);
5086 _mm_storeu_ps(out[12] + 4 * i, res12);
5087 res13 = _mm_add_ps(_mm_loadu_ps(out[13] + 4 * i), res13);
5088 _mm_storeu_ps(out[13] + 4 * i, res13);
5089 res14 = _mm_add_ps(_mm_loadu_ps(out[14] + 4 * i), res14);
5090 _mm_storeu_ps(out[14] + 4 * i, res14);
5091 res15 = _mm_add_ps(_mm_loadu_ps(out[15] + 4 * i), res15);
5092 _mm_storeu_ps(out[15] + 4 * i, res15);
5093 }
5094 else
5095 {
5096 _mm_storeu_ps(out[0] + 4 * i, res0);
5097 _mm_storeu_ps(out[1] + 4 * i, res1);
5098 _mm_storeu_ps(out[2] + 4 * i, res2);
5099 _mm_storeu_ps(out[3] + 4 * i, res3);
5100 _mm_storeu_ps(out[4] + 4 * i, res4);
5101 _mm_storeu_ps(out[5] + 4 * i, res5);
5102 _mm_storeu_ps(out[6] + 4 * i, res6);
5103 _mm_storeu_ps(out[7] + 4 * i, res7);
5104 _mm_storeu_ps(out[8] + 4 * i, res8);
5105 _mm_storeu_ps(out[9] + 4 * i, res9);
5106 _mm_storeu_ps(out[10] + 4 * i, res10);
5107 _mm_storeu_ps(out[11] + 4 * i, res11);
5108 _mm_storeu_ps(out[12] + 4 * i, res12);
5109 _mm_storeu_ps(out[13] + 4 * i, res13);
5110 _mm_storeu_ps(out[14] + 4 * i, res14);
5111 _mm_storeu_ps(out[15] + 4 * i, res15);
5112 }
5113 }
5114
5115 if (add_into)
5116 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
5117 for (unsigned int v = 0; v < 16; ++v)
5118 out[v][i] += in[i][v];
5119 else
5120 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
5121 for (unsigned int v = 0; v < 16; ++v)
5122 out[v][i] = in[i][v];
5123}
5124
5125# endif
5126
5127# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__ALTIVEC__) && \
5128 defined(__VSX__)
5129
5130template <>
5131class VectorizedArray<double, 2>
5132 : public VectorizedArrayBase<VectorizedArray<double, 2>, 2>
5133{
5134public:
5138 using value_type = double;
5139
5144 static constexpr bool is_implemented = true;
5145
5150 VectorizedArray() = default;
5151
5155 VectorizedArray(const double scalar)
5156 {
5157 this->operator=(scalar);
5158 }
5159
5163 template <typename U>
5164 VectorizedArray(const std::initializer_list<U> &list)
5165 : VectorizedArrayBase<VectorizedArray<double, 2>, 2>(list)
5166 {}
5167
5173 operator=(const double x) &
5174 {
5175 data = vec_splats(x);
5176
5177 // Some compilers believe that vec_splats sets 'x', but that's not true.
5178 // They then warn about setting a variable and not using it. Suppress the
5179 // warning by "using" the variable:
5180 (void)x;
5181 return *this;
5182 }
5183
5190 operator=(const double scalar) && = delete;
5191
5196 double &
5197 operator[](const unsigned int comp)
5198 {
5199 AssertIndexRange(comp, 2);
5200 return *(reinterpret_cast<double *>(&data) + comp);
5201 }
5202
5207 const double &
5208 operator[](const unsigned int comp) const
5209 {
5210 AssertIndexRange(comp, 2);
5211 return *(reinterpret_cast<const double *>(&data) + comp);
5212 }
5213
5219 operator+=(const VectorizedArray &vec)
5220 {
5221 data = vec_add(data, vec.data);
5222 return *this;
5223 }
5224
5230 operator-=(const VectorizedArray &vec)
5231 {
5232 data = vec_sub(data, vec.data);
5233 return *this;
5234 }
5235
5241 operator*=(const VectorizedArray &vec)
5242 {
5243 data = vec_mul(data, vec.data);
5244 return *this;
5245 }
5246
5252 operator/=(const VectorizedArray &vec)
5253 {
5254 data = vec_div(data, vec.data);
5255 return *this;
5256 }
5257
5263 void
5264 load(const double *ptr)
5265 {
5266 data = vec_vsx_ld(0, ptr);
5267 }
5268
5274 void
5275 store(double *ptr) const
5276 {
5277 vec_vsx_st(data, 0, ptr);
5278 }
5279
5284 void
5285 streaming_store(double *ptr) const
5286 {
5287 store(ptr);
5288 }
5289
5294 void
5295 gather(const double *base_ptr, const unsigned int *offsets)
5296 {
5297 for (unsigned int i = 0; i < 2; ++i)
5298 *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
5299 }
5300
5305 void
5306 scatter(const unsigned int *offsets, double *base_ptr) const
5307 {
5308 for (unsigned int i = 0; i < 2; ++i)
5309 base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
5310 }
5311
5317 __vector double data;
5318
5319private:
5326 get_sqrt() const
5327 {
5328 VectorizedArray res;
5329 res.data = vec_sqrt(data);
5330 return res;
5331 }
5332
5339 get_abs() const
5340 {
5341 VectorizedArray res;
5342 res.data = vec_abs(data);
5343 return res;
5344 }
5345
5352 get_max(const VectorizedArray &other) const
5353 {
5354 VectorizedArray res;
5355 res.data = vec_max(data, other.data);
5356 return res;
5357 }
5358
5365 get_min(const VectorizedArray &other) const
5366 {
5367 VectorizedArray res;
5368 res.data = vec_min(data, other.data);
5369 return res;
5370 }
5371
5372 // Make a few functions friends.
5373 template <typename Number2, std::size_t width2>
5376 template <typename Number2, std::size_t width2>
5379 template <typename Number2, std::size_t width2>
5383 template <typename Number2, std::size_t width2>
5387};
5388
5389
5390
5391template <>
5392class VectorizedArray<float, 4>
5393 : public VectorizedArrayBase<VectorizedArray<float, 4>, 4>
5394{
5395public:
5399 using value_type = float;
5400
5405 static constexpr bool is_implemented = true;
5406
5411 VectorizedArray() = default;
5412
5416 VectorizedArray(const float scalar)
5417 {
5418 this->operator=(scalar);
5419 }
5420
5424 template <typename U>
5425 VectorizedArray(const std::initializer_list<U> &list)
5426 : VectorizedArrayBase<VectorizedArray<float, 4>, 4>(list)
5427 {}
5428
5434 operator=(const float x) &
5435 {
5436 data = vec_splats(x);
5437
5438 // Some compilers believe that vec_splats sets 'x', but that's not true.
5439 // They then warn about setting a variable and not using it. Suppress the
5440 // warning by "using" the variable:
5441 (void)x;
5442 return *this;
5443 }
5444
5451 operator=(const float scalar) && = delete;
5452
5457 float &
5458 operator[](const unsigned int comp)
5459 {
5460 AssertIndexRange(comp, 4);
5461 return *(reinterpret_cast<float *>(&data) + comp);
5462 }
5463
5468 const float &
5469 operator[](const unsigned int comp) const
5470 {
5471 AssertIndexRange(comp, 4);
5472 return *(reinterpret_cast<const float *>(&data) + comp);
5473 }
5474
5480 operator+=(const VectorizedArray &vec)
5481 {
5482 data = vec_add(data, vec.data);
5483 return *this;
5484 }
5485
5491 operator-=(const VectorizedArray &vec)
5492 {
5493 data = vec_sub(data, vec.data);
5494 return *this;
5495 }
5496
5502 operator*=(const VectorizedArray &vec)
5503 {
5504 data = vec_mul(data, vec.data);
5505 return *this;
5506 }
5507
5513 operator/=(const VectorizedArray &vec)
5514 {
5515 data = vec_div(data, vec.data);
5516 return *this;
5517 }
5518
5524 void
5525 load(const float *ptr)
5526 {
5527 data = vec_vsx_ld(0, ptr);
5528 }
5529
5535 void
5536 store(float *ptr) const
5537 {
5538 vec_vsx_st(data, 0, ptr);
5539 }
5540
5545 void
5546 streaming_store(float *ptr) const
5547 {
5548 store(ptr);
5549 }
5550
5555 void
5556 gather(const float *base_ptr, const unsigned int *offsets)
5557 {
5558 for (unsigned int i = 0; i < 4; ++i)
5559 *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
5560 }
5561
5566 void
5567 scatter(const unsigned int *offsets, float *base_ptr) const
5568 {
5569 for (unsigned int i = 0; i < 4; ++i)
5570 base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
5571 }
5572
5578 __vector float data;
5579
5580private:
5587 get_sqrt() const
5588 {
5589 VectorizedArray res;
5590 res.data = vec_sqrt(data);
5591 return res;
5592 }
5593
5600 get_abs() const
5601 {
5602 VectorizedArray res;
5603 res.data = vec_abs(data);
5604 return res;
5605 }
5606
5613 get_max(const VectorizedArray &other) const
5614 {
5615 VectorizedArray res;
5616 res.data = vec_max(data, other.data);
5617 return res;
5618 }
5619
5626 get_min(const VectorizedArray &other) const
5627 {
5628 VectorizedArray res;
5629 res.data = vec_min(data, other.data);
5630 return res;
5631 }
5632
5633 // Make a few functions friends.
5634 template <typename Number2, std::size_t width2>
5637 template <typename Number2, std::size_t width2>
5640 template <typename Number2, std::size_t width2>
5644 template <typename Number2, std::size_t width2>
5648};
5649
5650# endif // if DEAL_II_VECTORIZATION_LEVEL >=1 && defined(__ALTIVEC__) &&
5651 // defined(__VSX__)
5652
5653
5654#endif // DOXYGEN
5655
5656
5657
5668template <typename Number, std::size_t width>
5669inline DEAL_II_ALWAYS_INLINE bool
5672{
5673 for (unsigned int i = 0; i < VectorizedArray<Number, width>::size(); ++i)
5674 if (lhs[i] != rhs[i])
5675 return false;
5676
5677 return true;
5678}
5679
5680
5686template <typename Number, std::size_t width>
5690{
5692 return tmp += v;
5693}
5694
5700template <typename Number, std::size_t width>
5704{
5706 return tmp -= v;
5707}
5708
5714template <typename Number, std::size_t width>
5718{
5720 return tmp *= v;
5721}
5722
5728template <typename Number, std::size_t width>
5732{
5734 return tmp /= v;
5735}
5736
5743template <typename Number, std::size_t width>
5746{
5748 return tmp += v;
5749}
5750
5759template <std::size_t width>
5762{
5764 return tmp += v;
5765}
5766
5773template <typename Number, std::size_t width>
5776{
5777 return u + v;
5778}
5779
5788template <std::size_t width>
5791{
5792 return u + v;
5793}
5794
5801template <typename Number, std::size_t width>
5804{
5806 return tmp -= v;
5807}
5808
5817template <std::size_t width>
5820{
5821 VectorizedArray<float, width> tmp = static_cast<float>(u);
5822 return tmp -= v;
5823}
5824
5831template <typename Number, std::size_t width>
5834{
5836 return v - tmp;
5837}
5838
5847template <std::size_t width>
5850{
5851 VectorizedArray<float, width> tmp = static_cast<float>(u);
5852 return v - tmp;
5853}
5854
5861template <typename Number, std::size_t width>
5864{
5866 return tmp *= v;
5867}
5868
5877template <std::size_t width>
5880{
5881 VectorizedArray<float, width> tmp = static_cast<float>(u);
5882 return tmp *= v;
5883}
5884
5891template <typename Number, std::size_t width>
5894{
5895 return u * v;
5896}
5897
5906template <std::size_t width>
5909{
5910 return u * v;
5911}
5912
5919template <typename Number, std::size_t width>
5922{
5924 return tmp /= v;
5925}
5926
5935template <std::size_t width>
5938{
5939 VectorizedArray<float, width> tmp = static_cast<float>(u);
5940 return tmp /= v;
5941}
5942
5949template <typename Number, std::size_t width>
5952{
5954 return v / tmp;
5955}
5956
5965template <std::size_t width>
5968{
5969 VectorizedArray<float, width> tmp = static_cast<float>(u);
5970 return v / tmp;
5971}
5972
5978template <typename Number, std::size_t width>
5981{
5982 return u;
5983}
5984
5990template <typename Number, std::size_t width>
5993{
5994 // to get a negative sign, subtract the input from zero (could also
5995 // multiply by -1, but this one is slightly simpler)
5996 return VectorizedArray<Number, width>() - u;
5997}
5998
6004template <typename Number, std::size_t width>
6005inline std::ostream &
6006operator<<(std::ostream &out, const VectorizedArray<Number, width> &p)
6007{
6008 constexpr unsigned int n = VectorizedArray<Number, width>::size();
6009 for (unsigned int i = 0; i < n - 1; ++i)
6010 out << p[i] << ' ';
6011 out << p[n - 1];
6012
6013 return out;
6014}
6015
6030enum class SIMDComparison : int
6031{
6032#if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
6033 equal = _CMP_EQ_OQ,
6034 not_equal = _CMP_NEQ_OQ,
6035 less_than = _CMP_LT_OQ,
6036 less_than_or_equal = _CMP_LE_OQ,
6037 greater_than = _CMP_GT_OQ,
6038 greater_than_or_equal = _CMP_GE_OQ
6039#else
6040 equal,
6041 not_equal,
6042 less_than,
6046#endif
6047};
6048
6049
6113template <SIMDComparison predicate, typename Number>
6114DEAL_II_ALWAYS_INLINE inline Number
6115compare_and_apply_mask(const Number &left,
6116 const Number &right,
6117 const Number &true_value,
6118 const Number &false_value)
6119{
6120 bool mask;
6121 switch (predicate)
6122 {
6124 mask = (left == right);
6125 break;
6127 mask = (left != right);
6128 break;
6130 mask = (left < right);
6131 break;
6133 mask = (left <= right);
6134 break;
6136 mask = (left > right);
6137 break;
6139 mask = (left >= right);
6140 break;
6141 }
6142
6143 return mask ? true_value : false_value;
6144}
6145
6146
6151template <SIMDComparison predicate, typename Number>
6154 const VectorizedArray<Number, 1> &right,
6155 const VectorizedArray<Number, 1> &true_value,
6156 const VectorizedArray<Number, 1> &false_value)
6157{
6159 result.data = compare_and_apply_mask<predicate, Number>(left.data,
6160 right.data,
6161 true_value.data,
6162 false_value.data);
6163 return result;
6164}
6165
6168#ifndef DOXYGEN
6169# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && defined(__AVX512F__)
6170
6171template <SIMDComparison predicate>
6174 const VectorizedArray<float, 16> &right,
6175 const VectorizedArray<float, 16> &true_values,
6176 const VectorizedArray<float, 16> &false_values)
6177{
6178 const __mmask16 mask =
6179 _mm512_cmp_ps_mask(left.data, right.data, static_cast<int>(predicate));
6181 result.data = _mm512_mask_mov_ps(false_values.data, mask, true_values.data);
6182 return result;
6183}
6184
6185
6186
6187template <SIMDComparison predicate>
6190 const VectorizedArray<double, 8> &right,
6191 const VectorizedArray<double, 8> &true_values,
6192 const VectorizedArray<double, 8> &false_values)
6193{
6194 const __mmask16 mask =
6195 _mm512_cmp_pd_mask(left.data, right.data, static_cast<int>(predicate));
6197 result.data = _mm512_mask_mov_pd(false_values.data, mask, true_values.data);
6198 return result;
6199}
6200
6201# endif
6202
6203# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
6204
6205template <SIMDComparison predicate>
6208 const VectorizedArray<float, 8> &right,
6209 const VectorizedArray<float, 8> &true_values,
6210 const VectorizedArray<float, 8> &false_values)
6211{
6212 const auto mask =
6213 _mm256_cmp_ps(left.data, right.data, static_cast<int>(predicate));
6214
6216 result.data = _mm256_blendv_ps(false_values.data, true_values.data, mask);
6217 return result;
6218}
6219
6220
6221template <SIMDComparison predicate>
6224 const VectorizedArray<double, 4> &right,
6225 const VectorizedArray<double, 4> &true_values,
6226 const VectorizedArray<double, 4> &false_values)
6227{
6228 const auto mask =
6229 _mm256_cmp_pd(left.data, right.data, static_cast<int>(predicate));
6230
6232 result.data = _mm256_blendv_pd(false_values.data, true_values.data, mask);
6233 return result;
6234}
6235
6236# endif
6237
6238# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__SSE2__)
6239
6240template <SIMDComparison predicate>
6243 const VectorizedArray<float, 4> &right,
6244 const VectorizedArray<float, 4> &true_values,
6245 const VectorizedArray<float, 4> &false_values)
6246{
6247 __m128 mask;
6248 switch (predicate)
6249 {
6251 mask = _mm_cmpeq_ps(left.data, right.data);
6252 break;
6254 mask = _mm_cmpneq_ps(left.data, right.data);
6255 break;
6257 mask = _mm_cmplt_ps(left.data, right.data);
6258 break;
6260 mask = _mm_cmple_ps(left.data, right.data);
6261 break;
6263 mask = _mm_cmpgt_ps(left.data, right.data);
6264 break;
6266 mask = _mm_cmpge_ps(left.data, right.data);
6267 break;
6268 }
6269
6271 result.data = _mm_or_ps(_mm_and_ps(mask, true_values.data),
6272 _mm_andnot_ps(mask, false_values.data));
6273
6274 return result;
6275}
6276
6277
6278template <SIMDComparison predicate>
6281 const VectorizedArray<double, 2> &right,
6282 const VectorizedArray<double, 2> &true_values,
6283 const VectorizedArray<double, 2> &false_values)
6284{
6285 __m128d mask;
6286 switch (predicate)
6287 {
6289 mask = _mm_cmpeq_pd(left.data, right.data);
6290 break;
6292 mask = _mm_cmpneq_pd(left.data, right.data);
6293 break;
6295 mask = _mm_cmplt_pd(left.data, right.data);
6296 break;
6298 mask = _mm_cmple_pd(left.data, right.data);
6299 break;
6301 mask = _mm_cmpgt_pd(left.data, right.data);
6302 break;
6304 mask = _mm_cmpge_pd(left.data, right.data);
6305 break;
6306 }
6307
6309 result.data = _mm_or_pd(_mm_and_pd(mask, true_values.data),
6310 _mm_andnot_pd(mask, false_values.data));
6311
6312 return result;
6313}
6314
6315# endif
6316
6317# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__ARM_NEON)
6318
6319template <SIMDComparison predicate>
6322 const VectorizedArray<float, 4> &right,
6323 const VectorizedArray<float, 4> &true_values,
6324 const VectorizedArray<float, 4> &false_values)
6325{
6326 uint32x4_t mask;
6327 switch (predicate)
6328 {
6330 mask = vceqq_f32(left.data, right.data);
6331 break;
6333 mask = vmvnq_u32(vceqq_f32(left.data, right.data));
6334 break;
6336 mask = vcltq_f32(left.data, right.data);
6337 break;
6339 mask = vcleq_f32(left.data, right.data);
6340 break;
6342 mask = vcgtq_f32(left.data, right.data);
6343 break;
6345 mask = vcgeq_f32(left.data, right.data);
6346 break;
6347 }
6348
6350 result.data = vreinterpretq_f32_u32(vorrq_u32(
6351 vandq_u32(mask, vreinterpretq_u32_f32(true_values.data)),
6352 vandq_u32(vmvnq_u32(mask), vreinterpretq_u32_f32(false_values.data))));
6353
6354 return result;
6355}
6356
6357
6358template <SIMDComparison predicate>
6361 const VectorizedArray<double, 2> &right,
6362 const VectorizedArray<double, 2> &true_values,
6363 const VectorizedArray<double, 2> &false_values)
6364{
6365 uint64x2_t mask;
6366 switch (predicate)
6367 {
6369 mask = vceqq_f64(left.data, right.data);
6370 break;
6372 mask = vreinterpretq_u64_u32(
6373 vmvnq_u32(vreinterpretq_u32_u64(vceqq_f64(left.data, right.data))));
6374 break;
6376 mask = vcltq_f64(left.data, right.data);
6377 break;
6379 mask = vcleq_f64(left.data, right.data);
6380 break;
6382 mask = vcgtq_f64(left.data, right.data);
6383 break;
6385 mask = vcgeq_f64(left.data, right.data);
6386 break;
6387 }
6388
6390 result.data = vreinterpretq_f64_u64(vorrq_u64(
6391 vandq_u64(mask, vreinterpretq_u64_f64(true_values.data)),
6392 vandq_u64(vreinterpretq_u64_u32(vmvnq_u32(vreinterpretq_u32_u64(mask))),
6393 vreinterpretq_u64_f64(false_values.data))));
6394
6395 return result;
6396}
6397
6398# endif
6399#endif // DOXYGEN
6400
6401
6402namespace internal
6403{
6404 template <typename T>
6406 {
6410 using value_type = T;
6411
6415 static constexpr std::size_t
6417 {
6418 return 1;
6419 }
6420
6425
6432 static constexpr std::size_t
6434 {
6436 }
6437
6441 static value_type &
6442 get(value_type &value, unsigned int c)
6443 {
6444 AssertIndexRange(c, 1);
6445 (void)c;
6446
6447 return value;
6448 }
6449
6453 static const value_type &
6454 get(const value_type &value, unsigned int c)
6455 {
6456 AssertIndexRange(c, 1);
6457 (void)c;
6458
6459 return value;
6460 }
6461
6465 static value_type &
6467 {
6469
6470 return values[c];
6471 }
6472
6477 static const value_type &
6478 get_from_vectorized(const vectorized_value_type &values, unsigned int c)
6479 {
6481
6482 return values[c];
6483 }
6484 };
6485
6486 template <typename T, std::size_t width_>
6488 {
6492 using value_type = T;
6493
6497 static constexpr std::size_t
6499 {
6500 return width_;
6501 }
6502
6507
6515 static constexpr std::size_t
6517 {
6518 return 1;
6519 }
6520
6524 static value_type &
6525 get(vectorized_value_type &values, unsigned int c)
6526 {
6527 AssertIndexRange(c, width_);
6528
6529 return values[c];
6530 }
6531
6535 static const value_type &
6536 get(const vectorized_value_type &values, unsigned int c)
6537 {
6538 AssertIndexRange(c, width_);
6539
6540 return values[c];
6541 }
6542
6546 static vectorized_value_type &
6548 {
6549 (void)c;
6551
6552 return values;
6553 }
6554
6559 static const vectorized_value_type &
6560 get_from_vectorized(const vectorized_value_type &values, unsigned int c)
6561 {
6562 (void)c;
6564
6565 return values;
6566 }
6567 };
6568} // namespace internal
6569
6570
6572
6579namespace std
6580{
6588 template <typename Number, std::size_t width>
6589 inline ::VectorizedArray<Number, width>
6590 sin(const ::VectorizedArray<Number, width> &x)
6591 {
6593 for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6594 ++i)
6595 out[i] = std::sin(x[i]);
6596 return out;
6597 }
6598
6599
6600
6608 template <typename Number, std::size_t width>
6609 inline ::VectorizedArray<Number, width>
6610 cos(const ::VectorizedArray<Number, width> &x)
6611 {
6613 for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6614 ++i)
6615 out[i] = std::cos(x[i]);
6616 return out;
6617 }
6618
6619
6620
6628 template <typename Number, std::size_t width>
6629 inline ::VectorizedArray<Number, width>
6630 tan(const ::VectorizedArray<Number, width> &x)
6631 {
6633 for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6634 ++i)
6635 out[i] = std::tan(x[i]);
6636 return out;
6637 }
6638
6639
6640
6648 template <typename Number, std::size_t width>
6649 inline ::VectorizedArray<Number, width>
6650 acos(const ::VectorizedArray<Number, width> &x)
6651 {
6653 for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6654 ++i)
6655 out[i] = std::acos(x[i]);
6656 return out;
6657 }
6658
6659
6660
6668 template <typename Number, std::size_t width>
6669 inline ::VectorizedArray<Number, width>
6670 asin(const ::VectorizedArray<Number, width> &x)
6671 {
6673 for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6674 ++i)
6675 out[i] = std::asin(x[i]);
6676 return out;
6677 }
6678
6679
6680
6688 template <typename Number, std::size_t width>
6689 inline ::VectorizedArray<Number, width>
6690 atan(const ::VectorizedArray<Number, width> &x)
6691 {
6693 for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6694 ++i)
6695 out[i] = std::atan(x[i]);
6696 return out;
6697 }
6698
6699
6700
6708 template <typename Number, std::size_t width>
6709 inline ::VectorizedArray<Number, width>
6710 cosh(const ::VectorizedArray<Number, width> &x)
6711 {
6713 for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6714 ++i)
6715 out[i] = std::cosh(x[i]);
6716 return out;
6717 }
6718
6719
6720
6728 template <typename Number, std::size_t width>
6729 inline ::VectorizedArray<Number, width>
6730 sinh(const ::VectorizedArray<Number, width> &x)
6731 {
6733 for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6734 ++i)
6735 out[i] = std::sinh(x[i]);
6736 return out;
6737 }
6738
6739
6740
6748 template <typename Number, std::size_t width>
6749 inline ::VectorizedArray<Number, width>
6750 tanh(const ::VectorizedArray<Number, width> &x)
6751 {
6753 for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6754 ++i)
6755 out[i] = std::tanh(x[i]);
6756 return out;
6757 }
6758
6759
6760
6768 template <typename Number, std::size_t width>
6769 inline ::VectorizedArray<Number, width>
6770 acosh(const ::VectorizedArray<Number, width> &x)
6771 {
6773 for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6774 ++i)
6775 out[i] = std::acosh(x[i]);
6776 return out;
6777 }
6778
6779
6780
6788 template <typename Number, std::size_t width>
6789 inline ::VectorizedArray<Number, width>
6790 asinh(const ::VectorizedArray<Number, width> &x)
6791 {
6793 for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6794 ++i)
6795 out[i] = std::asinh(x[i]);
6796 return out;
6797 }
6798
6799
6800
6808 template <typename Number, std::size_t width>
6809 inline ::VectorizedArray<Number, width>
6810 atanh(const ::VectorizedArray<Number, width> &x)
6811 {
6813 for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6814 ++i)
6815 out[i] = std::atanh(x[i]);
6816 return out;
6817 }
6818
6819
6820
6828 template <typename Number, std::size_t width>
6829 inline ::VectorizedArray<Number, width>
6830 exp(const ::VectorizedArray<Number, width> &x)
6831 {
6833 for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6834 ++i)
6835 out[i] = std::exp(x[i]);
6836 return out;
6837 }
6838
6839
6840
6848 template <typename Number, std::size_t width>
6849 inline ::VectorizedArray<Number, width>
6850 log(const ::VectorizedArray<Number, width> &x)
6851 {
6853 for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6854 ++i)
6855 out[i] = std::log(x[i]);
6856 return out;
6857 }
6858
6859
6860
6868 template <typename Number, std::size_t width>
6869 inline ::VectorizedArray<Number, width>
6870 sqrt(const ::VectorizedArray<Number, width> &x)
6871 {
6872 return x.get_sqrt();
6873 }
6874
6875
6876
6884 template <typename Number, std::size_t width>
6885 inline ::VectorizedArray<Number, width>
6886 pow(const ::VectorizedArray<Number, width> &x, const Number p)
6887 {
6889 for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6890 ++i)
6891 out[i] = std::pow(x[i], p);
6892 return out;
6893 }
6894
6895
6896
6905 template <typename Number, std::size_t width>
6906 inline ::VectorizedArray<Number, width>
6907 pow(const ::VectorizedArray<Number, width> &x,
6908 const ::VectorizedArray<Number, width> &p)
6909 {
6911 for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6912 ++i)
6913 out[i] = std::pow(x[i], p[i]);
6914 return out;
6915 }
6916
6917
6918
6926 template <typename Number, std::size_t width>
6927 inline ::VectorizedArray<Number, width>
6928 abs(const ::VectorizedArray<Number, width> &x)
6929 {
6930 return x.get_abs();
6931 }
6932
6933
6934
6942 template <typename Number, std::size_t width>
6943 inline ::VectorizedArray<Number, width>
6944 max(const ::VectorizedArray<Number, width> &x,
6945 const ::VectorizedArray<Number, width> &y)
6946 {
6947 return x.get_max(y);
6948 }
6949
6950
6951
6959 template <typename Number, std::size_t width>
6960 inline ::VectorizedArray<Number, width>
6961 min(const ::VectorizedArray<Number, width> &x,
6962 const ::VectorizedArray<Number, width> &y)
6963 {
6964 return x.get_min(y);
6965 }
6966
6967
6968
6972 template <class T>
6974 {
6975#ifdef DEAL_II_HAVE_CXX20
6976 using iterator_category = contiguous_iterator_tag;
6977#else
6978 using iterator_category = random_access_iterator_tag;
6979#endif
6980 using value_type = typename T::value_type;
6981 using difference_type = std::ptrdiff_t;
6982 };
6983
6984} // namespace std
6985
6986#endif
constexpr VectorizedArrayBase()=default
constexpr VectorizedArrayIterator< const VectorizedArrayType > begin() const
constexpr VectorizedArrayBase(const std::initializer_list< U > &list)
constexpr VectorizedArrayIterator< const VectorizedArrayType > end() const
constexpr VectorizedArrayIterator< VectorizedArrayType > begin()
constexpr VectorizedArrayIterator< VectorizedArrayType > end()
static constexpr std::size_t size()
auto dot_product(const VectorizedArrayType &v) const
constexpr VectorizedArrayIterator< T > & operator++()
constexpr VectorizedArrayIterator(T &data, const std::size_t lane)
constexpr VectorizedArrayIterator< T > operator+(const std::size_t &offset) const
constexpr VectorizedArrayIterator< T > & operator+=(const std::size_t offset)
constexpr bool operator==(const VectorizedArrayIterator< T > &other) const
constexpr std::enable_if_t<!std::is_same_v< U, const U >, typename T::value_type > & operator*()
constexpr const T::value_type & operator*() const
constexpr VectorizedArrayIterator< T > & operator--()
constexpr std::ptrdiff_t operator-(const VectorizedArrayIterator< T > &other) const
constexpr bool operator!=(const VectorizedArrayIterator< T > &other) const
VectorizedArray< Number, width > operator-(const VectorizedArray< Number, width > &u)
VectorizedArray & operator=(const Number scalar) &
VectorizedArray< float, width > operator+(const VectorizedArray< float, width > &v, const double u)
VectorizedArray & operator/=(const VectorizedArray &vec)
void gather(const Number *base_ptr, const unsigned int *offsets)
void vectorized_load_and_transpose(const unsigned int n_entries, const Number *in, const unsigned int *offsets, VectorizedArray< Number, width > *out)
VectorizedArray< Number, width > operator+(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArrayType make_vectorized_array(const typename VectorizedArrayType::value_type &u)
VectorizedArray< Number, width > operator/(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArray< Number, width > asinh(const ::VectorizedArray< Number, width > &x)
VectorizedArray get_abs() const
VectorizedArray< float, width > operator/(const VectorizedArray< float, width > &v, const double u)
VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &x, const ::VectorizedArray< Number, width > &y)
VectorizedArray< Number, width > log(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > operator*(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArray< Number, width > exp(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > operator-(const VectorizedArray< Number, width > &v, const Number &u)
Number & operator[](const unsigned int comp)
VectorizedArray< float, width > operator-(const double u, const VectorizedArray< float, width > &v)
VectorizedArray< Number, width > operator+(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > operator+(const VectorizedArray< Number, width > &u)
Number sum() const
VectorizedArray()=default
bool operator==(const VectorizedArray< Number, width > &lhs, const VectorizedArray< Number, width > &rhs)
VectorizedArray< Number, width > tan(const ::VectorizedArray< Number, width > &x)
VectorizedArray(const Number scalar)
VectorizedArray< Number, width > operator-(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray< float, width > operator*(const VectorizedArray< float, width > &v, const double u)
VectorizedArray & operator*=(const VectorizedArray &vec)
VectorizedArray get_max(const VectorizedArray &other) const
VectorizedArray< Number, width > atan(const ::VectorizedArray< Number, width > &x)
const Number & operator[](const unsigned int comp) const
VectorizedArray< Number, width > tanh(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &x, const ::VectorizedArray< Number, width > &y)
VectorizedArray get_min(const VectorizedArray &other) const
VectorizedArray< Number, width > cosh(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &x, const Number p)
VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &x, const ::VectorizedArray< Number, width > &p)
VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &x)
void store(OtherNumber *ptr) const
VectorizedArray< float, width > operator-(const VectorizedArray< float, width > &v, const double u)
void load(const OtherNumber *ptr)
void scatter(const unsigned int *offsets, Number *base_ptr) const
VectorizedArray< Number, width > atanh(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > operator-(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray & operator=(const Number scalar) &&=delete
VectorizedArray< Number, width > operator*(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray & operator-=(const VectorizedArray &vec)
VectorizedArray< Number, width > acos(const ::VectorizedArray< Number, width > &x)
VectorizedArray< float, width > operator+(const double u, const VectorizedArray< float, width > &v)
VectorizedArray< Number, width > operator*(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > sinh(const ::VectorizedArray< Number, width > &x)
VectorizedArray get_sqrt() const
VectorizedArray< Number, width > asin(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > operator/(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray & operator+=(const VectorizedArray &vec)
VectorizedArray< Number, width > make_vectorized_array(const Number &u)
VectorizedArray< Number, width > operator/(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > operator+(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > acosh(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > cos(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &x)
void streaming_store(Number *ptr) const
VectorizedArray(const std::initializer_list< U > &list)
void vectorized_transpose_and_store(const bool add_into, const unsigned int n_entries, const VectorizedArray< Number, width > *in, const unsigned int *offsets, Number *out)
VectorizedArray< float, width > operator/(const double u, const VectorizedArray< float, width > &v)
VectorizedArray< float, width > operator*(const double u, const VectorizedArray< float, width > &v)
static constexpr bool is_implemented
#define DEAL_II_ALWAYS_INLINE
Definition config.h:161
#define DEAL_II_OPENMP_SIMD_PRAGMA
Definition config.h:208
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:35
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:36
const unsigned int v0
const unsigned int v1
#define Assert(cond, exc)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcMessage(std::string arg1)
std::vector< index_type > data
Definition mpi.cc:746
constexpr types::blas_int zero
STL namespace.
inline ::VectorizedArray< Number, width > acosh(const ::VectorizedArray< Number, width > &x)
::VectorizedArray< Number, width > log(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > exp(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > tan(const ::VectorizedArray< Number, width > &)
inline ::VectorizedArray< Number, width > asinh(const ::VectorizedArray< Number, width > &x)
inline ::VectorizedArray< Number, width > tanh(const ::VectorizedArray< Number, width > &x)
inline ::VectorizedArray< Number, width > sinh(const ::VectorizedArray< Number, width > &x)
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > cos(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &)
inline ::VectorizedArray< Number, width > cosh(const ::VectorizedArray< Number, width > &x)
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &, const Number p)
inline ::VectorizedArray< Number, width > atanh(const ::VectorizedArray< Number, width > &x)
inline ::VectorizedArray< Number, width > atan(const ::VectorizedArray< Number, width > &x)
::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)
inline ::VectorizedArray< Number, width > acos(const ::VectorizedArray< Number, width > &x)
inline ::VectorizedArray< Number, width > asin(const ::VectorizedArray< Number, width > &x)
static value_type & get(vectorized_value_type &values, unsigned int c)
static vectorized_value_type & get_from_vectorized(vectorized_value_type &values, unsigned int c)
static const value_type & get(const vectorized_value_type &values, unsigned int c)
static const vectorized_value_type & get_from_vectorized(const vectorized_value_type &values, unsigned int c)
static constexpr std::size_t width()
static constexpr std::size_t stride()
static const value_type & get(const value_type &value, unsigned int c)
VectorizedArray< T > vectorized_value_type
static const value_type & get_from_vectorized(const vectorized_value_type &values, unsigned int c)
static value_type & get_from_vectorized(vectorized_value_type &values, unsigned int c)
static value_type & get(value_type &value, unsigned int c)
void gather(VectorizedArray< Number, width > &out, const std::array< Number *, width > &ptrs, const unsigned int offset)
void vectorized_load_and_transpose(const unsigned int n_entries, const Number *in, const unsigned int *offsets, VectorizedArray< Number, width > *out)
SIMDComparison
std::ostream & operator<<(std::ostream &out, const VectorizedArray< Number, width > &p)
Number compare_and_apply_mask(const Number &left, const Number &right, const Number &true_value, const Number &false_value)
void vectorized_transpose_and_store(const bool add_into, const unsigned int n_entries, const VectorizedArray< Number, width > *in, const unsigned int *offsets, Number *out)