17 #ifndef dealii_vector_operations_internal_h 18 #define dealii_vector_operations_internal_h 20 #include <deal.II/base/memory_space.h> 21 #include <deal.II/base/multithread_info.h> 22 #include <deal.II/base/parallel.h> 23 #include <deal.II/base/thread_management.h> 24 #include <deal.II/base/types.h> 25 #include <deal.II/base/vectorization.h> 27 #include <deal.II/lac/cuda_kernels.h> 28 #include <deal.II/lac/cuda_kernels.templates.h> 33 DEAL_II_NAMESPACE_OPEN
37 namespace VectorOperations
43 is_non_negative(
const T &t)
51 is_non_negative(
const std::complex<T> &)
61 print(
const T &t,
const char *format)
63 if (format !=
nullptr)
64 std::printf(format, t);
66 std::printf(
" %5.2f",
double(t));
73 print(
const std::complex<T> &t,
const char *format)
75 if (format !=
nullptr)
76 std::printf(format, t.real(), t.imag());
78 std::printf(
" %5.2f+%5.2fi",
double(t.real()),
double(t.imag()));
85 template <
typename T,
typename U>
87 copy(
const T *begin,
const T *end, U *dest)
89 std::copy(begin, end, dest);
92 template <
typename T,
typename U>
94 copy(
const std::complex<T> *begin,
95 const std::complex<T> *end,
96 std::complex<U> * dest)
98 std::copy(begin, end, dest);
101 template <
typename T,
typename U>
103 copy(
const std::complex<T> *,
const std::complex<T> *, U *)
106 ExcMessage(
"Can't convert a vector of complex numbers " 107 "into a vector of reals/doubles"));
112 #ifdef DEAL_II_WITH_THREADS 121 template <
typename Functor>
125 const size_type start,
131 const size_type vec_size = end - start;
133 const unsigned int gs =
134 internal::VectorImplementation::minimum_parallel_grain_size;
138 chunk_size = vec_size / n_chunks;
144 if (chunk_size > 512)
145 chunk_size = ((chunk_size + 511) / 512) * 512;
146 n_chunks = (vec_size + chunk_size - 1) / chunk_size;
152 operator()(
const tbb::blocked_range<size_type> &range)
const 154 const size_type r_begin = start + range.begin() * chunk_size;
155 const size_type r_end = std::min(start + range.end() * chunk_size, end);
156 functor(r_begin, r_end);
160 const size_type start;
162 unsigned int n_chunks;
163 size_type chunk_size;
167 template <
typename Functor>
171 const size_type start,
173 const std::shared_ptr<parallel::internal::TBBPartitioner> &partitioner)
175 #ifdef DEAL_II_WITH_THREADS 176 const size_type vec_size = end - start;
180 4 * internal::VectorImplementation::minimum_parallel_grain_size &&
183 Assert(partitioner.get() !=
nullptr,
185 "Unexpected initialization of Vector that does " 186 "not set the TBB partitioner to a usable state."));
187 std::shared_ptr<tbb::affinity_partitioner> tbb_partitioner =
188 partitioner->acquire_one_partitioner();
199 static_cast<size_type>(
200 generic_functor.n_chunks),
204 partitioner->release_one_partitioner(tbb_partitioner);
206 else if (vec_size > 0)
218 template <
typename Number>
221 Vector_set(
const Number value, Number *
const dst)
229 operator()(
const size_type begin,
const size_type end)
const 233 if (value == Number())
235 #ifdef DEAL_II_WITH_CXX17 236 if constexpr (std::is_trivial<Number>::value)
238 if (std::is_trivial<Number>::value)
241 std::memset(dst + begin, 0,
sizeof(Number) * (end - begin));
245 std::fill(dst + begin, dst + end, value);
252 template <
typename Number,
typename OtherNumber>
255 Vector_copy(
const OtherNumber *
const src, Number *
const dst)
264 operator()(
const size_type begin,
const size_type end)
const 268 #if __GNUG__ && __GNUC__ < 5 269 if (__has_trivial_copy(Number) &&
270 std::is_same<Number, OtherNumber>::value)
272 # ifdef DEAL_II_WITH_CXX17 273 if constexpr (std::is_trivially_copyable<Number>() &&
274 std::is_same<Number, OtherNumber>::value)
276 if (std::is_trivially_copyable<Number>() &&
277 std::is_same<Number, OtherNumber>::value)
280 std::memcpy(dst + begin, src + begin, (end - begin) *
sizeof(Number));
283 DEAL_II_OPENMP_SIMD_PRAGMA
284 for (size_type i = begin; i < end; ++i)
289 const OtherNumber *
const src;
293 template <
typename Number>
294 struct Vectorization_multiply_factor
296 Vectorization_multiply_factor(Number *
const val,
const Number factor)
302 operator()(
const size_type begin,
const size_type end)
const 306 DEAL_II_OPENMP_SIMD_PRAGMA
307 for (size_type i = begin; i < end; ++i)
312 for (size_type i = begin; i < end; ++i)
321 template <
typename Number>
322 struct Vectorization_add_av
324 Vectorization_add_av(Number *
const val,
325 const Number *
const v_val,
333 operator()(
const size_type begin,
const size_type end)
const 337 DEAL_II_OPENMP_SIMD_PRAGMA
338 for (size_type i = begin; i < end; ++i)
339 val[i] += factor * v_val[i];
343 for (size_type i = begin; i < end; ++i)
344 val[i] += factor * v_val[i];
349 const Number *
const v_val;
353 template <
typename Number>
354 struct Vectorization_sadd_xav
356 Vectorization_sadd_xav(Number * val,
357 const Number *
const v_val,
367 operator()(
const size_type begin,
const size_type end)
const 371 DEAL_II_OPENMP_SIMD_PRAGMA
372 for (size_type i = begin; i < end; ++i)
373 val[i] = x * val[i] + a * v_val[i];
377 for (size_type i = begin; i < end; ++i)
378 val[i] = x * val[i] + a * v_val[i];
383 const Number *
const v_val;
388 template <
typename Number>
389 struct Vectorization_subtract_v
391 Vectorization_subtract_v(Number *val,
const Number *
const v_val)
397 operator()(
const size_type begin,
const size_type end)
const 401 DEAL_II_OPENMP_SIMD_PRAGMA
402 for (size_type i = begin; i < end; ++i)
407 for (size_type i = begin; i < end; ++i)
413 const Number *
const v_val;
416 template <
typename Number>
417 struct Vectorization_add_factor
419 Vectorization_add_factor(Number *
const val,
const Number factor)
425 operator()(
const size_type begin,
const size_type end)
const 429 DEAL_II_OPENMP_SIMD_PRAGMA
430 for (size_type i = begin; i < end; ++i)
435 for (size_type i = begin; i < end; ++i)
444 template <
typename Number>
445 struct Vectorization_add_v
447 Vectorization_add_v(Number *
const val,
const Number *
const v_val)
453 operator()(
const size_type begin,
const size_type end)
const 457 DEAL_II_OPENMP_SIMD_PRAGMA
458 for (size_type i = begin; i < end; ++i)
463 for (size_type i = begin; i < end; ++i)
469 const Number *
const v_val;
472 template <
typename Number>
473 struct Vectorization_add_avpbw
475 Vectorization_add_avpbw(Number *
const val,
476 const Number *
const v_val,
477 const Number *
const w_val,
488 operator()(
const size_type begin,
const size_type end)
const 492 DEAL_II_OPENMP_SIMD_PRAGMA
493 for (size_type i = begin; i < end; ++i)
494 val[i] = val[i] + a * v_val[i] + b * w_val[i];
498 for (size_type i = begin; i < end; ++i)
499 val[i] = val[i] + a * v_val[i] + b * w_val[i];
504 const Number *
const v_val;
505 const Number *
const w_val;
510 template <
typename Number>
511 struct Vectorization_sadd_xv
513 Vectorization_sadd_xv(Number *
const val,
514 const Number *
const v_val,
522 operator()(
const size_type begin,
const size_type end)
const 526 DEAL_II_OPENMP_SIMD_PRAGMA
527 for (size_type i = begin; i < end; ++i)
528 val[i] = x * val[i] + v_val[i];
532 for (size_type i = begin; i < end; ++i)
533 val[i] = x * val[i] + v_val[i];
538 const Number *
const v_val;
542 template <
typename Number>
543 struct Vectorization_sadd_xavbw
545 Vectorization_sadd_xavbw(Number * val,
560 operator()(
const size_type begin,
const size_type end)
const 564 DEAL_II_OPENMP_SIMD_PRAGMA
565 for (size_type i = begin; i < end; ++i)
566 val[i] = x * val[i] + a * v_val[i] + b * w_val[i];
570 for (size_type i = begin; i < end; ++i)
571 val[i] = x * val[i] + a * v_val[i] + b * w_val[i];
576 const Number *
const v_val;
577 const Number *
const w_val;
583 template <
typename Number>
584 struct Vectorization_scale
586 Vectorization_scale(Number *
const val,
const Number *
const v_val)
592 operator()(
const size_type begin,
const size_type end)
const 596 DEAL_II_OPENMP_SIMD_PRAGMA
597 for (size_type i = begin; i < end; ++i)
602 for (size_type i = begin; i < end; ++i)
608 const Number *
const v_val;
611 template <
typename Number>
612 struct Vectorization_equ_au
614 Vectorization_equ_au(Number *
const val,
615 const Number *
const u_val,
623 operator()(
const size_type begin,
const size_type end)
const 627 DEAL_II_OPENMP_SIMD_PRAGMA
628 for (size_type i = begin; i < end; ++i)
629 val[i] = a * u_val[i];
633 for (size_type i = begin; i < end; ++i)
634 val[i] = a * u_val[i];
639 const Number *
const u_val;
643 template <
typename Number>
644 struct Vectorization_equ_aubv
646 Vectorization_equ_aubv(Number *
const val,
647 const Number *
const u_val,
648 const Number *
const v_val,
659 operator()(
const size_type begin,
const size_type end)
const 663 DEAL_II_OPENMP_SIMD_PRAGMA
664 for (size_type i = begin; i < end; ++i)
665 val[i] = a * u_val[i] + b * v_val[i];
669 for (size_type i = begin; i < end; ++i)
670 val[i] = a * u_val[i] + b * v_val[i];
675 const Number *
const u_val;
676 const Number *
const v_val;
681 template <
typename Number>
682 struct Vectorization_equ_aubvcw
684 Vectorization_equ_aubvcw(Number * val,
701 operator()(
const size_type begin,
const size_type end)
const 705 DEAL_II_OPENMP_SIMD_PRAGMA
706 for (size_type i = begin; i < end; ++i)
707 val[i] = a * u_val[i] + b * v_val[i] + c * w_val[i];
711 for (size_type i = begin; i < end; ++i)
712 val[i] = a * u_val[i] + b * v_val[i] + c * w_val[i];
717 const Number *
const u_val;
718 const Number *
const v_val;
719 const Number *
const w_val;
725 template <
typename Number>
726 struct Vectorization_ratio
728 Vectorization_ratio(Number *val,
const Number *a_val,
const Number *b_val)
735 operator()(
const size_type begin,
const size_type end)
const 739 DEAL_II_OPENMP_SIMD_PRAGMA
740 for (size_type i = begin; i < end; ++i)
741 val[i] = a_val[i] / b_val[i];
745 for (size_type i = begin; i < end; ++i)
746 val[i] = a_val[i] / b_val[i];
751 const Number *
const a_val;
752 const Number *
const b_val;
762 template <
typename Number,
typename Number2>
765 static const bool vectorizes =
766 std::is_same<Number, Number2>::value &&
769 Dot(
const Number *
const X,
const Number2 *
const Y)
775 operator()(
const size_type i)
const 781 do_vectorized(
const size_type i)
const 795 "This operation is not correctly implemented for " 796 "complex-valued objects.");
800 const Number *
const X;
801 const Number2 *
const Y;
804 template <
typename Number,
typename RealType>
807 static const bool vectorizes =
810 Norm2(
const Number *
const X)
815 operator()(
const size_type i)
const 821 do_vectorized(
const size_type i)
const 828 const Number *
const X;
831 template <
typename Number,
typename RealType>
834 static const bool vectorizes =
837 Norm1(
const Number *X)
842 operator()(
const size_type i)
const 848 do_vectorized(
const size_type i)
const 858 template <
typename Number,
typename RealType>
861 static const bool vectorizes =
864 NormP(
const Number *X, RealType p)
870 operator()(
const size_type i)
const 876 do_vectorized(
const size_type i)
const 880 return std::pow(std::abs(x), p);
887 template <
typename Number>
890 static const bool vectorizes =
893 MeanValue(
const Number *X)
898 operator()(
const size_type i)
const 904 do_vectorized(
const size_type i)
const 914 template <
typename Number>
917 static const bool vectorizes =
920 AddAndDot(Number *
const X,
921 const Number *
const V,
922 const Number *
const W,
931 operator()(
const size_type i)
const 938 do_vectorized(
const size_type i)
const 957 "This operation is not correctly implemented for " 958 "complex-valued objects.");
963 const Number *
const V;
964 const Number *
const W;
1010 const unsigned int vector_accumulation_recursion_threshold = 128;
1012 template <
typename Operation,
typename ResultType>
1014 accumulate_recursive(
const Operation &op,
1015 const size_type first,
1016 const size_type last,
1017 ResultType & result)
1019 const size_type vec_size = last - first;
1020 if (vec_size <= vector_accumulation_recursion_threshold * 32)
1025 size_type index = first;
1026 ResultType outer_results[vector_accumulation_recursion_threshold];
1030 outer_results[0] = ResultType();
1037 size_type n_chunks = vec_size / 32;
1038 const size_type remainder = vec_size % 32;
1040 n_chunks < vector_accumulation_recursion_threshold,
1052 std::integral_constant<bool, Operation::vectorizes>());
1063 vector_accumulation_recursion_threshold + 1);
1068 const size_type inner_chunks = remainder / 8;
1070 const size_type remainder_inner = remainder % 8;
1071 ResultType r0 = ResultType(), r1 = ResultType(),
1073 switch (inner_chunks)
1077 for (size_type j = 1; j < 8; ++j)
1079 DEAL_II_FALLTHROUGH;
1082 for (size_type j = 1; j < 8; ++j)
1085 DEAL_II_FALLTHROUGH;
1088 for (size_type j = 1; j < 8; ++j)
1090 DEAL_II_FALLTHROUGH;
1092 for (size_type j = 0; j < remainder_inner; ++j)
1096 if (n_chunks == vector_accumulation_recursion_threshold)
1097 outer_results[vector_accumulation_recursion_threshold -
1101 outer_results[n_chunks] = r0;
1112 while (n_chunks > 1)
1114 if (n_chunks % 2 == 1)
1115 outer_results[n_chunks++] = ResultType();
1116 for (size_type i = 0; i < n_chunks; i += 2)
1117 outer_results[i / 2] = outer_results[i] + outer_results[i + 1];
1120 result = outer_results[0];
1127 const size_type new_size =
1128 (vec_size / (vector_accumulation_recursion_threshold * 32)) *
1129 vector_accumulation_recursion_threshold * 8;
1131 ResultType r0, r1, r2, r3;
1132 accumulate_recursive(op, first, first + new_size, r0);
1133 accumulate_recursive(op, first + new_size, first + 2 * new_size, r1);
1134 accumulate_recursive(op,
1135 first + 2 * new_size,
1136 first + 3 * new_size,
1138 accumulate_recursive(op, first + 3 * new_size, last, r3);
1150 template <
typename Operation,
typename ResultType>
1153 const Operation &op,
1154 const size_type &n_chunks,
1156 ResultType (&outer_results)[vector_accumulation_recursion_threshold],
1157 std::integral_constant<bool, false>)
1161 for (size_type i = 0; i < n_chunks; ++i)
1163 ResultType r0 = op(index);
1164 ResultType r1 = op(index + 1);
1165 ResultType r2 = op(index + 2);
1166 ResultType r3 = op(index + 3);
1168 for (size_type j = 1; j < 8; ++j, index += 4)
1171 r1 += op(index + 1);
1172 r2 += op(index + 2);
1173 r3 += op(index + 3);
1177 outer_results[i] = r0 + r2;
1188 template <
typename Operation,
typename Number>
1191 const Operation &op,
1192 size_type & n_chunks,
1194 Number (&outer_results)[vector_accumulation_recursion_threshold],
1195 std::integral_constant<bool, true>)
1204 const size_type regular_chunks = n_chunks / nvecs;
1205 for (size_type i = 0; i < regular_chunks; ++i)
1212 for (size_type j = 1; j < 8; ++j, index += nvecs * 4)
1214 r0 += op.do_vectorized(index);
1215 r1 += op.do_vectorized(index + nvecs);
1216 r2 += op.do_vectorized(index + 2 * nvecs);
1217 r3 += op.do_vectorized(index + 3 * nvecs);
1238 const size_type start_irreg = regular_chunks * nvecs;
1239 for (size_type c = start_irreg; c < n_chunks; ++c)
1240 for (size_type j = 0; j < 32; j += 2 * nvecs, index += 2 * nvecs)
1242 r0 += op.do_vectorized(index);
1243 r1 += op.do_vectorized(index + nvecs);
1246 r0.
store(&outer_results[start_irreg]);
1255 #ifdef DEAL_II_WITH_THREADS 1284 template <
typename Operation,
typename ResultType>
1287 static const unsigned int threshold_array_allocate = 512;
1290 const size_type start,
1291 const size_type end)
1296 const size_type vec_size = end - start;
1298 const unsigned int gs =
1299 internal::VectorImplementation::minimum_parallel_grain_size;
1303 chunk_size = vec_size / n_chunks;
1309 if (chunk_size > 512)
1310 chunk_size = ((chunk_size + 511) / 512) * 512;
1311 n_chunks = (vec_size + chunk_size - 1) / chunk_size;
1315 if (n_chunks > threshold_array_allocate)
1319 large_array.resize(2 * ((n_chunks + 1) / 2));
1320 array_ptr = large_array.data();
1323 array_ptr = &small_array[0];
1333 for (size_type i = range.begin(); i < range.end(); ++i)
1334 accumulate_recursive(op,
1335 start + i * chunk_size,
1336 std::min(start + (i + 1) * chunk_size, end),
1343 while (n_chunks > 1)
1345 if (n_chunks % 2 == 1)
1346 array_ptr[n_chunks++] = ResultType();
1347 for (size_type i = 0; i < n_chunks; i += 2)
1348 array_ptr[i / 2] = array_ptr[i] + array_ptr[i + 1];
1351 return array_ptr[0];
1354 const Operation &op;
1355 const size_type start;
1356 const size_type end;
1358 mutable unsigned int n_chunks;
1359 unsigned int chunk_size;
1360 ResultType small_array[threshold_array_allocate];
1361 std::vector<ResultType> large_array;
1364 mutable ResultType *array_ptr;
1374 template <
typename Operation,
typename ResultType>
1377 const Operation & op,
1378 const size_type start,
1379 const size_type end,
1380 ResultType & result,
1381 const std::shared_ptr<parallel::internal::TBBPartitioner> &partitioner)
1383 #ifdef DEAL_II_WITH_THREADS 1384 const size_type vec_size = end - start;
1388 4 * internal::VectorImplementation::minimum_parallel_grain_size &&
1391 Assert(partitioner.get() !=
nullptr,
1393 "Unexpected initialization of Vector that does " 1394 "not set the TBB partitioner to a usable state."));
1395 std::shared_ptr<tbb::affinity_partitioner> tbb_partitioner =
1396 partitioner->acquire_one_partitioner();
1398 TBBReduceFunctor<Operation, ResultType> generic_functor(op,
1409 static_cast<size_type>(
1410 generic_functor.n_chunks),
1414 partitioner->release_one_partitioner(tbb_partitioner);
1415 result = generic_functor.do_sum();
1418 accumulate_recursive(op, start, end, result);
1420 accumulate_recursive(op, start, end, result);
1426 template <
typename Number,
typename Number2,
typename MemorySpace>
1431 const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1434 const ::MemorySpace::MemorySpaceData<Number2, MemorySpace>
1439 std::is_same<MemorySpace, ::MemorySpace::CUDA>::value &&
1440 std::is_same<Number, Number2>::value,
1441 "For the CUDA MemorySpace Number and Number2 should be the same type");
1446 const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1455 const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1458 const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1465 const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1468 const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1475 const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1484 const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1488 const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1495 const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1500 const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1502 const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1509 const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1513 const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1520 const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1525 const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1532 const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1538 const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1540 const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1547 const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1556 const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1559 const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1566 const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1570 const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1577 const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1582 const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1584 const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1591 const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1594 const ::MemorySpace::MemorySpaceData<Number2, MemorySpace>
1601 template <
typename real_type>
1604 const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1608 const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1615 const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1618 const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1624 template <
typename real_type>
1627 const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1635 template <
typename real_type>
1638 const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1648 const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1652 const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1654 const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1664 template <
typename Number,
typename Number2>
1668 copy(
const std::shared_ptr<::parallel::internal::TBBPartitioner>
1669 & thread_loop_partitioner,
1670 const size_type size,
1671 const ::MemorySpace::
1672 MemorySpaceData<Number2, ::MemorySpace::Host> &v_data,
1677 Vector_copy<Number, Number2> copier(v_data.values.get(),
1679 parallel_for(copier, 0, size, thread_loop_partitioner);
1683 set(
const std::shared_ptr<::parallel::internal::TBBPartitioner>
1684 & thread_loop_partitioner,
1685 const size_type size,
1691 Vector_set<Number> setter(s, data.values.get());
1692 parallel_for(setter, 0, size, thread_loop_partitioner);
1697 const std::shared_ptr<::parallel::internal::TBBPartitioner>
1698 & thread_loop_partitioner,
1699 const size_type size,
1700 const ::MemorySpace::
1701 MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1706 Vectorization_add_v<Number> vector_add(data.values.get(),
1707 v_data.values.get());
1708 parallel_for(vector_add, 0, size, thread_loop_partitioner);
1713 const std::shared_ptr<::parallel::internal::TBBPartitioner>
1714 & thread_loop_partitioner,
1715 const size_type size,
1716 const ::MemorySpace::
1717 MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1722 Vectorization_subtract_v<Number> vector_subtract(data.values.get(),
1723 v_data.values.get());
1724 parallel_for(vector_subtract, 0, size, thread_loop_partitioner);
1729 const std::shared_ptr<::parallel::internal::TBBPartitioner>
1730 & thread_loop_partitioner,
1731 const size_type size,
1737 Vectorization_add_factor<Number> vector_add(data.values.get(), a);
1738 parallel_for(vector_add, 0, size, thread_loop_partitioner);
1742 add_av(
const std::shared_ptr<::parallel::internal::TBBPartitioner>
1743 & thread_loop_partitioner,
1744 const size_type size,
1746 const ::MemorySpace::
1747 MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1752 Vectorization_add_av<Number> vector_add(data.values.get(),
1753 v_data.values.get(),
1755 parallel_for(vector_add, 0, size, thread_loop_partitioner);
1760 const std::shared_ptr<::parallel::internal::TBBPartitioner>
1761 & thread_loop_partitioner,
1762 const size_type size,
1765 const ::MemorySpace::
1766 MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1767 const ::MemorySpace::
1768 MemorySpaceData<Number, ::MemorySpace::Host> &w_data,
1773 Vectorization_add_avpbw<Number> vector_add(
1774 data.values.get(), v_data.values.get(), w_data.values.get(), a,
b);
1775 parallel_for(vector_add, 0, size, thread_loop_partitioner);
1780 const std::shared_ptr<::parallel::internal::TBBPartitioner>
1781 & thread_loop_partitioner,
1782 const size_type size,
1784 const ::MemorySpace::
1785 MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1790 Vectorization_sadd_xv<Number> vector_sadd(data.values.get(),
1791 v_data.values.get(),
1793 parallel_for(vector_sadd, 0, size, thread_loop_partitioner);
1798 const std::shared_ptr<::parallel::internal::TBBPartitioner>
1799 & thread_loop_partitioner,
1800 const size_type size,
1803 const ::MemorySpace::
1804 MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1809 Vectorization_sadd_xav<Number> vector_sadd(data.values.get(),
1810 v_data.values.get(),
1813 parallel_for(vector_sadd, 0, size, thread_loop_partitioner);
1818 const std::shared_ptr<::parallel::internal::TBBPartitioner>
1819 & thread_loop_partitioner,
1820 const size_type size,
1824 const ::MemorySpace::
1825 MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1826 const ::MemorySpace::
1827 MemorySpaceData<Number, ::MemorySpace::Host> &w_data,
1832 Vectorization_sadd_xavbw<Number> vector_sadd(
1833 data.values.get(), v_data.values.get(), w_data.values.get(), x, a,
b);
1834 parallel_for(vector_sadd, 0, size, thread_loop_partitioner);
1839 const std::shared_ptr<::parallel::internal::TBBPartitioner>
1840 & thread_loop_partitioner,
1841 const size_type size,
1842 const Number factor,
1847 Vectorization_multiply_factor<Number> vector_multiply(data.values.get(),
1849 parallel_for(vector_multiply, 0, size, thread_loop_partitioner);
1853 scale(
const std::shared_ptr<::parallel::internal::TBBPartitioner>
1854 & thread_loop_partitioner,
1855 const size_type size,
1856 const ::MemorySpace::
1857 MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1862 Vectorization_scale<Number> vector_scale(data.values.get(),
1863 v_data.values.get());
1864 parallel_for(vector_scale, 0, size, thread_loop_partitioner);
1868 equ_au(
const std::shared_ptr<::parallel::internal::TBBPartitioner>
1869 & thread_loop_partitioner,
1870 const size_type size,
1872 const ::MemorySpace::
1873 MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1878 Vectorization_equ_au<Number> vector_equ(data.values.get(),
1879 v_data.values.get(),
1881 parallel_for(vector_equ, 0, size, thread_loop_partitioner);
1886 const std::shared_ptr<::parallel::internal::TBBPartitioner>
1887 & thread_loop_partitioner,
1888 const size_type size,
1891 const ::MemorySpace::
1892 MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1893 const ::MemorySpace::
1894 MemorySpaceData<Number, ::MemorySpace::Host> &w_data,
1899 Vectorization_equ_aubv<Number> vector_equ(
1900 data.values.get(), v_data.values.get(), w_data.values.get(), a,
b);
1901 parallel_for(vector_equ, 0, size, thread_loop_partitioner);
1905 dot(
const std::shared_ptr<::parallel::internal::TBBPartitioner>
1906 & thread_loop_partitioner,
1907 const size_type size,
1908 const ::MemorySpace::
1909 MemorySpaceData<Number2, ::MemorySpace::Host> &v_data,
1915 ::internal::VectorOperations::Dot<Number, Number2> dot(
1916 data.values.get(), v_data.values.get());
1917 ::internal::VectorOperations::parallel_reduce(
1918 dot, 0, size, sum, thread_loop_partitioner);
1924 template <
typename real_type>
1926 norm_2(
const std::shared_ptr<::parallel::internal::TBBPartitioner>
1927 & thread_loop_partitioner,
1928 const size_type size,
1934 Norm2<Number, real_type> norm2(data.values.get());
1935 parallel_reduce(norm2, 0, size, sum, thread_loop_partitioner);
1940 const std::shared_ptr<::parallel::internal::TBBPartitioner>
1941 & thread_loop_partitioner,
1942 const size_type size,
1943 const ::MemorySpace::
1944 MemorySpaceData<Number, ::MemorySpace::Host> &data)
1947 MeanValue<Number>
mean(data.values.get());
1948 parallel_reduce(mean, 0, size, sum, thread_loop_partitioner);
1953 template <
typename real_type>
1955 norm_1(
const std::shared_ptr<::parallel::internal::TBBPartitioner>
1956 & thread_loop_partitioner,
1957 const size_type size,
1963 Norm1<Number, real_type> norm1(data.values.get());
1964 parallel_reduce(norm1, 0, size, sum, thread_loop_partitioner);
1967 template <
typename real_type>
1969 norm_p(
const std::shared_ptr<::parallel::internal::TBBPartitioner>
1970 & thread_loop_partitioner,
1971 const size_type size,
1978 NormP<Number, real_type> normp(data.values.get(), p);
1979 parallel_reduce(normp, 0, size, sum, thread_loop_partitioner);
1984 const std::shared_ptr<::parallel::internal::TBBPartitioner>
1985 & thread_loop_partitioner,
1986 const size_type size,
1988 const ::MemorySpace::
1989 MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1990 const ::MemorySpace::
1991 MemorySpaceData<Number, ::MemorySpace::Host> &w_data,
1997 AddAndDot<Number> adder(data.values.get(),
1998 v_data.values.get(),
1999 w_data.values.get(),
2001 parallel_reduce(adder, 0, size, sum, thread_loop_partitioner);
2009 #ifdef DEAL_II_COMPILER_CUDA_AWARE 2010 template <
typename Number>
2014 ::LinearAlgebra::CUDAWrappers::kernel::block_size;
2016 ::LinearAlgebra::CUDAWrappers::kernel::chunk_size;
2020 const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2021 const size_type size,
2022 const ::MemorySpace::
2023 MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2028 cudaError_t cuda_error_code = cudaMemcpy(data.values_dev.get(),
2029 v_data.values_dev.get(),
2030 size *
sizeof(Number),
2031 cudaMemcpyDeviceToDevice);
2036 set(
const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2037 const size_type size,
2044 ::LinearAlgebra::CUDAWrappers::kernel::set<Number>
2045 <<<n_blocks,
block_size>>>(data.values_dev.get(), s, size);
2057 const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2058 const size_type size,
2059 const ::MemorySpace::
2060 MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2066 ::LinearAlgebra::CUDAWrappers::kernel::add_aV<Number>
2067 <<<n_blocks,
block_size>>>(data.values_dev.get(),
2069 v_data.values_dev.get(),
2082 const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2083 const size_type size,
2084 const ::MemorySpace::
2085 MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2091 ::LinearAlgebra::CUDAWrappers::kernel::add_aV<Number>
2092 <<<n_blocks,
block_size>>>(data.values_dev.get(),
2094 v_data.values_dev.get(),
2107 const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2108 const size_type size,
2115 ::LinearAlgebra::CUDAWrappers::kernel::vec_add<Number>
2116 <<<n_blocks,
block_size>>>(data.values_dev.get(), a, size);
2128 const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2129 const size_type size,
2131 const ::MemorySpace::
2132 MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2138 ::LinearAlgebra::CUDAWrappers::kernel::add_aV<Number>
2139 <<<n_blocks,
block_size>>>(data.values_dev.get(),
2141 v_data.values_dev.get(),
2154 const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2155 const size_type size,
2158 const ::MemorySpace::
2159 MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2160 const ::MemorySpace::
2161 MemorySpaceData<Number, ::MemorySpace::CUDA> &w_data,
2167 ::LinearAlgebra::CUDAWrappers::kernel::add_aVbW<Number>
2168 <<<dim3(n_blocks, 1), dim3(block_size)>>>(data.values_dev.get(),
2170 v_data.values_dev.get(),
2172 w_data.values_dev.get(),
2185 const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2186 const size_type size,
2188 const ::MemorySpace::
2189 MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2195 ::LinearAlgebra::CUDAWrappers::kernel::sadd<Number>
2196 <<<dim3(n_blocks, 1), dim3(block_size)>>>(
2197 x, data.values_dev.get(), 1., v_data.values_dev.get(), size);
2209 const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2210 const size_type size,
2213 const ::MemorySpace::
2214 MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2220 ::LinearAlgebra::CUDAWrappers::kernel::sadd<Number>
2221 <<<dim3(n_blocks, 1), dim3(block_size)>>>(
2222 x, data.values_dev.get(), a, v_data.values_dev.get(), size);
2234 const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2235 const size_type size,
2239 const ::MemorySpace::
2240 MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2241 const ::MemorySpace::
2242 MemorySpaceData<Number, ::MemorySpace::CUDA> &w_data,
2248 ::LinearAlgebra::CUDAWrappers::kernel::sadd<Number>
2249 <<<dim3(n_blocks, 1), dim3(block_size)>>>(x,
2250 data.values_dev.get(),
2252 v_data.values_dev.get(),
2254 w_data.values_dev.get(),
2267 const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2268 const size_type size,
2269 const Number factor,
2275 ::LinearAlgebra::CUDAWrappers::kernel::vec_scale<Number>
2276 <<<n_blocks,
block_size>>>(data.values_dev.get(), factor, size);
2288 const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2289 const size_type size,
2290 const ::MemorySpace::
2291 MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2297 ::LinearAlgebra::CUDAWrappers::kernel::scale<Number>
2298 <<<dim3(n_blocks, 1), dim3(block_size)>>>(data.values_dev.get(),
2299 v_data.values_dev.get(),
2312 const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2313 const size_type size,
2315 const ::MemorySpace::
2316 MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2322 ::LinearAlgebra::CUDAWrappers::kernel::equ<Number>
2323 <<<dim3(n_blocks, 1), dim3(block_size)>>>(data.values_dev.get(),
2325 v_data.values_dev.get(),
2338 const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2339 const size_type size,
2342 const ::MemorySpace::
2343 MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2344 const ::MemorySpace::
2345 MemorySpaceData<Number, ::MemorySpace::CUDA> &w_data,
2351 ::LinearAlgebra::CUDAWrappers::kernel::equ<Number>
2352 <<<dim3(n_blocks, 1), dim3(block_size)>>>(data.values_dev.get(),
2354 v_data.values_dev.get(),
2356 w_data.values_dev.get(),
2368 dot(
const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2369 const size_type size,
2370 const ::MemorySpace::
2371 MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2376 Number * result_device;
2377 cudaError_t error_code = cudaMalloc(&result_device,
sizeof(Number));
2379 error_code = cudaMemset(result_device, 0,
sizeof(Number));
2386 <<<dim3(n_blocks, 1), dim3(block_size)>>>(result_device,
2387 data.values_dev.get(),
2388 v_data.values_dev.get(),
2389 static_cast<unsigned int>(
2401 error_code = cudaMemcpy(&result,
2404 cudaMemcpyDeviceToHost);
2407 error_code = cudaFree(result_device);
2415 template <
typename real_type>
2417 norm_2(
const std::shared_ptr<::parallel::internal::TBBPartitioner>
2418 & thread_loop_partitioner,
2419 const size_type size,
2425 sum = dot(thread_loop_partitioner, size, data, data);
2430 const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2431 const size_type size,
2432 const ::MemorySpace::
2433 MemorySpaceData<Number, ::MemorySpace::CUDA> &data)
2435 Number * result_device;
2436 cudaError_t error_code = cudaMalloc(&result_device,
sizeof(Number));
2438 error_code = cudaMemset(result_device, 0,
sizeof(Number));
2444 <<<dim3(n_blocks, 1), dim3(block_size)>>>(result_device,
2445 data.values_dev.get(),
2450 error_code = cudaMemcpy(&result,
2453 cudaMemcpyDeviceToHost);
2456 error_code = cudaFree(result_device);
2462 template <
typename real_type>
2465 const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2466 const size_type size,
2472 Number * result_device;
2473 cudaError_t error_code = cudaMalloc(&result_device,
sizeof(Number));
2475 error_code = cudaMemset(result_device, 0,
sizeof(Number));
2481 <<<dim3(n_blocks, 1), dim3(block_size)>>>(result_device,
2482 data.values_dev.get(),
2486 error_code = cudaMemcpy(&sum,
2489 cudaMemcpyDeviceToHost);
2492 error_code = cudaFree(result_device);
2496 template <
typename real_type>
2499 const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2511 const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2512 const size_type size,
2514 const ::MemorySpace::
2515 MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2516 const ::MemorySpace::
2517 MemorySpaceData<Number, ::MemorySpace::CUDA> &w_data,
2523 cudaError_t error_code = cudaMalloc(&res_d,
sizeof(Number));
2525 error_code = cudaMemset(res_d, 0,
sizeof(Number));
2529 ::LinearAlgebra::CUDAWrappers::kernel::add_and_dot<Number>
2530 <<<dim3(n_blocks, 1), dim3(block_size)>>>(res_d,
2531 data.values_dev.get(),
2532 v_data.values_dev.get(),
2533 w_data.values_dev.get(),
2539 cudaMemcpy(&res, res_d,
sizeof(Number), cudaMemcpyDeviceToHost);
2541 error_code = cudaFree(res_d);
2550 DEAL_II_NAMESPACE_CLOSE
void store(Number *ptr) const
__global__ void reduction(Number *result, const Number *v, const size_type N)
#define AssertDimension(dim1, dim2)
void operator()(const tbb::blocked_range< size_type > &range) const
__global__ void double_vector_reduction(Number *result, const Number *v1, const Number *v2, const size_type N)
void parallel_for(Iterator x_begin, Iterator x_end, const Functor &functor, const unsigned int grainsize)
static constexpr std::enable_if< std::is_same< Dummy, number >::value &&is_cuda_compatible< Dummy >::value, real_type >::type abs_square(const number &x)
#define AssertIndexRange(index, range)
__global__ void add_and_dot(Number *res, Number *v1, const Number *v2, const Number *v3, const Number a, const size_type N)
static real_type abs(const number &x)
__global__ void scale(Number *val, const Number *V_val, const size_type N)
static ::ExceptionBase & ExcMessage(std::string arg1)
T sum(const T &t, const MPI_Comm &mpi_communicator)
#define Assert(cond, exc)
#define AssertCuda(error_code)
SymmetricTensor< 2, dim, Number > b(const Tensor< 2, dim, Number > &F)
Tensor< 2, dim, Number > w(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
unsigned int global_dof_index
void load(const Number *ptr)
static ::ExceptionBase & ExcNotImplemented()
static unsigned int n_threads()
#define AssertIsFinite(number)
static ::ExceptionBase & ExcInternalError()