Loading [MathJax]/extensions/TeX/newcommand.js
 Reference documentation for deal.II version 9.1.1
\newcommand{\dealcoloneq}{\mathrel{\vcenter{:}}=}
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Modules Pages
vector_operations_internal.h
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2016 - 2019 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #ifndef dealii_vector_operations_internal_h
18 #define dealii_vector_operations_internal_h
19 
20 #include <deal.II/base/memory_space.h>
21 #include <deal.II/base/multithread_info.h>
22 #include <deal.II/base/parallel.h>
23 #include <deal.II/base/thread_management.h>
24 #include <deal.II/base/types.h>
25 #include <deal.II/base/vectorization.h>
26 
27 #include <deal.II/lac/cuda_kernels.h>
28 #include <deal.II/lac/cuda_kernels.templates.h>
29 
30 #include <cstdio>
31 #include <cstring>
32 
33 DEAL_II_NAMESPACE_OPEN
34 
35 namespace internal
36 {
37  namespace VectorOperations
38  {
39  using size_type = types::global_dof_index;
40 
41  template <typename T>
42  bool
43  is_non_negative(const T &t)
44  {
45  return t >= 0;
46  }
47 
48 
49  template <typename T>
50  bool
51  is_non_negative(const std::complex<T> &)
52  {
53  Assert(false, ExcMessage("Complex numbers do not have an ordering."));
54 
55  return false;
56  }
57 
58 
59  template <typename T>
60  void
61  print(const T &t, const char *format)
62  {
63  if (format != nullptr)
64  std::printf(format, t);
65  else
66  std::printf(" %5.2f", double(t));
67  }
68 
69 
70 
71  template <typename T>
72  void
73  print(const std::complex<T> &t, const char *format)
74  {
75  if (format != nullptr)
76  std::printf(format, t.real(), t.imag());
77  else
78  std::printf(" %5.2f+%5.2fi", double(t.real()), double(t.imag()));
79  }
80 
81  // call std::copy, except for in
82  // the case where we want to copy
83  // from std::complex to a
84  // non-complex type
85  template <typename T, typename U>
86  void
87  copy(const T *begin, const T *end, U *dest)
88  {
89  std::copy(begin, end, dest);
90  }
91 
92  template <typename T, typename U>
93  void
94  copy(const std::complex<T> *begin,
95  const std::complex<T> *end,
96  std::complex<U> * dest)
97  {
98  std::copy(begin, end, dest);
99  }
100 
101  template <typename T, typename U>
102  void
103  copy(const std::complex<T> *, const std::complex<T> *, U *)
104  {
105  Assert(false,
106  ExcMessage("Can't convert a vector of complex numbers "
107  "into a vector of reals/doubles"));
108  }
109 
110 
111 
112 #ifdef DEAL_II_WITH_THREADS
113 
121  template <typename Functor>
123  {
124  TBBForFunctor(Functor & functor,
125  const size_type start,
126  const size_type end)
127  : functor(functor)
128  , start(start)
129  , end(end)
130  {
131  const size_type vec_size = end - start;
132  // set chunk size for sub-tasks
133  const unsigned int gs =
134  internal::VectorImplementation::minimum_parallel_grain_size;
135  n_chunks =
136  std::min(static_cast<size_type>(4 * MultithreadInfo::n_threads()),
137  vec_size / gs);
138  chunk_size = vec_size / n_chunks;
139 
140  // round to next multiple of 512 (or minimum grain size if that happens
141  // to be smaller). this is advantageous because our accumulation
142  // algorithms favor lengths of a power of 2 due to pairwise summation ->
143  // at most one 'oddly' sized chunk
144  if (chunk_size > 512)
145  chunk_size = ((chunk_size + 511) / 512) * 512;
146  n_chunks = (vec_size + chunk_size - 1) / chunk_size;
147  AssertIndexRange((n_chunks - 1) * chunk_size, vec_size);
148  AssertIndexRange(vec_size, n_chunks * chunk_size + 1);
149  }
150 
151  void
152  operator()(const tbb::blocked_range<size_type> &range) const
153  {
154  const size_type r_begin = start + range.begin() * chunk_size;
155  const size_type r_end = std::min(start + range.end() * chunk_size, end);
156  functor(r_begin, r_end);
157  }
158 
159  Functor & functor;
160  const size_type start;
161  const size_type end;
162  unsigned int n_chunks;
163  size_type chunk_size;
164  };
165 #endif
166 
167  template <typename Functor>
168  void
169  parallel_for(
170  Functor & functor,
171  const size_type start,
172  const size_type end,
173  const std::shared_ptr<parallel::internal::TBBPartitioner> &partitioner)
174  {
175 #ifdef DEAL_II_WITH_THREADS
176  const size_type vec_size = end - start;
177  // only go to the parallel function in case there are at least 4 parallel
178  // items, otherwise the overhead is too large
179  if (vec_size >=
180  4 * internal::VectorImplementation::minimum_parallel_grain_size &&
182  {
183  Assert(partitioner.get() != nullptr,
185  "Unexpected initialization of Vector that does "
186  "not set the TBB partitioner to a usable state."));
187  std::shared_ptr<tbb::affinity_partitioner> tbb_partitioner =
188  partitioner->acquire_one_partitioner();
189 
190  TBBForFunctor<Functor> generic_functor(functor, start, end);
191  // We use a minimum grain size of 1 here since the grains at this
192  // stage of dividing the work refer to the number of vector chunks
193  // that are processed by (possibly different) threads in the
194  // parallelized for loop (i.e., they do not refer to individual
195  // vector entries). The number of chunks here is calculated inside
196  // TBBForFunctor. See also GitHub issue #2496 for further discussion
197  // of this strategy.
198  parallel::internal::parallel_for(static_cast<size_type>(0),
199  static_cast<size_type>(
200  generic_functor.n_chunks),
201  generic_functor,
202  1,
203  tbb_partitioner);
204  partitioner->release_one_partitioner(tbb_partitioner);
205  }
206  else if (vec_size > 0)
207  functor(start, end);
208 #else
209  functor(start, end);
210  (void)partitioner;
211 #endif
212  }
213 
214 
215  // Define the functors necessary to use SIMD with TBB. we also include the
216  // simple copy and set operations
217 
218  template <typename Number>
219  struct Vector_set
220  {
221  Vector_set(const Number value, Number *const dst)
222  : value(value)
223  , dst(dst)
224  {
225  Assert(dst != nullptr, ExcInternalError());
226  }
227 
228  void
229  operator()(const size_type begin, const size_type end) const
230  {
231  Assert(end >= begin, ExcInternalError());
232 
233  if (value == Number())
234  {
235 #ifdef DEAL_II_WITH_CXX17
236  if constexpr (std::is_trivial<Number>::value)
237 #else
238  if (std::is_trivial<Number>::value)
239 #endif
240  {
241  std::memset(dst + begin, 0, sizeof(Number) * (end - begin));
242  return;
243  }
244  }
245  std::fill(dst + begin, dst + end, value);
246  }
247 
248  const Number value;
249  Number *const dst;
250  };
251 
252  template <typename Number, typename OtherNumber>
253  struct Vector_copy
254  {
255  Vector_copy(const OtherNumber *const src, Number *const dst)
256  : src(src)
257  , dst(dst)
258  {
259  Assert(src != nullptr, ExcInternalError());
260  Assert(dst != nullptr, ExcInternalError());
261  }
262 
263  void
264  operator()(const size_type begin, const size_type end) const
265  {
266  Assert(end >= begin, ExcInternalError());
267 
268 #if __GNUG__ && __GNUC__ < 5
269  if (__has_trivial_copy(Number) &&
270  std::is_same<Number, OtherNumber>::value)
271 #else
272 # ifdef DEAL_II_WITH_CXX17
273  if constexpr (std::is_trivially_copyable<Number>() &&
274  std::is_same<Number, OtherNumber>::value)
275 # else
276  if (std::is_trivially_copyable<Number>() &&
277  std::is_same<Number, OtherNumber>::value)
278 # endif
279 #endif
280  std::memcpy(dst + begin, src + begin, (end - begin) * sizeof(Number));
281  else
282  {
283  DEAL_II_OPENMP_SIMD_PRAGMA
284  for (size_type i = begin; i < end; ++i)
285  dst[i] = src[i];
286  }
287  }
288 
289  const OtherNumber *const src;
290  Number *const dst;
291  };
292 
293  template <typename Number>
294  struct Vectorization_multiply_factor
295  {
296  Vectorization_multiply_factor(Number *const val, const Number factor)
297  : val(val)
298  , factor(factor)
299  {}
300 
301  void
302  operator()(const size_type begin, const size_type end) const
303  {
305  {
306  DEAL_II_OPENMP_SIMD_PRAGMA
307  for (size_type i = begin; i < end; ++i)
308  val[i] *= factor;
309  }
310  else
311  {
312  for (size_type i = begin; i < end; ++i)
313  val[i] *= factor;
314  }
315  }
316 
317  Number *const val;
318  const Number factor;
319  };
320 
321  template <typename Number>
322  struct Vectorization_add_av
323  {
324  Vectorization_add_av(Number *const val,
325  const Number *const v_val,
326  const Number factor)
327  : val(val)
328  , v_val(v_val)
329  , factor(factor)
330  {}
331 
332  void
333  operator()(const size_type begin, const size_type end) const
334  {
336  {
337  DEAL_II_OPENMP_SIMD_PRAGMA
338  for (size_type i = begin; i < end; ++i)
339  val[i] += factor * v_val[i];
340  }
341  else
342  {
343  for (size_type i = begin; i < end; ++i)
344  val[i] += factor * v_val[i];
345  }
346  }
347 
348  Number *const val;
349  const Number *const v_val;
350  const Number factor;
351  };
352 
353  template <typename Number>
354  struct Vectorization_sadd_xav
355  {
356  Vectorization_sadd_xav(Number * val,
357  const Number *const v_val,
358  const Number a,
359  const Number x)
360  : val(val)
361  , v_val(v_val)
362  , a(a)
363  , x(x)
364  {}
365 
366  void
367  operator()(const size_type begin, const size_type end) const
368  {
370  {
371  DEAL_II_OPENMP_SIMD_PRAGMA
372  for (size_type i = begin; i < end; ++i)
373  val[i] = x * val[i] + a * v_val[i];
374  }
375  else
376  {
377  for (size_type i = begin; i < end; ++i)
378  val[i] = x * val[i] + a * v_val[i];
379  }
380  }
381 
382  Number *const val;
383  const Number *const v_val;
384  const Number a;
385  const Number x;
386  };
387 
388  template <typename Number>
389  struct Vectorization_subtract_v
390  {
391  Vectorization_subtract_v(Number *val, const Number *const v_val)
392  : val(val)
393  , v_val(v_val)
394  {}
395 
396  void
397  operator()(const size_type begin, const size_type end) const
398  {
400  {
401  DEAL_II_OPENMP_SIMD_PRAGMA
402  for (size_type i = begin; i < end; ++i)
403  val[i] -= v_val[i];
404  }
405  else
406  {
407  for (size_type i = begin; i < end; ++i)
408  val[i] -= v_val[i];
409  }
410  }
411 
412  Number *const val;
413  const Number *const v_val;
414  };
415 
416  template <typename Number>
417  struct Vectorization_add_factor
418  {
419  Vectorization_add_factor(Number *const val, const Number factor)
420  : val(val)
421  , factor(factor)
422  {}
423 
424  void
425  operator()(const size_type begin, const size_type end) const
426  {
428  {
429  DEAL_II_OPENMP_SIMD_PRAGMA
430  for (size_type i = begin; i < end; ++i)
431  val[i] += factor;
432  }
433  else
434  {
435  for (size_type i = begin; i < end; ++i)
436  val[i] += factor;
437  }
438  }
439 
440  Number *const val;
441  const Number factor;
442  };
443 
444  template <typename Number>
445  struct Vectorization_add_v
446  {
447  Vectorization_add_v(Number *const val, const Number *const v_val)
448  : val(val)
449  , v_val(v_val)
450  {}
451 
452  void
453  operator()(const size_type begin, const size_type end) const
454  {
456  {
457  DEAL_II_OPENMP_SIMD_PRAGMA
458  for (size_type i = begin; i < end; ++i)
459  val[i] += v_val[i];
460  }
461  else
462  {
463  for (size_type i = begin; i < end; ++i)
464  val[i] += v_val[i];
465  }
466  }
467 
468  Number *const val;
469  const Number *const v_val;
470  };
471 
472  template <typename Number>
473  struct Vectorization_add_avpbw
474  {
475  Vectorization_add_avpbw(Number *const val,
476  const Number *const v_val,
477  const Number *const w_val,
478  const Number a,
479  const Number b)
480  : val(val)
481  , v_val(v_val)
482  , w_val(w_val)
483  , a(a)
484  , b(b)
485  {}
486 
487  void
488  operator()(const size_type begin, const size_type end) const
489  {
491  {
492  DEAL_II_OPENMP_SIMD_PRAGMA
493  for (size_type i = begin; i < end; ++i)
494  val[i] = val[i] + a * v_val[i] + b * w_val[i];
495  }
496  else
497  {
498  for (size_type i = begin; i < end; ++i)
499  val[i] = val[i] + a * v_val[i] + b * w_val[i];
500  }
501  }
502 
503  Number *const val;
504  const Number *const v_val;
505  const Number *const w_val;
506  const Number a;
507  const Number b;
508  };
509 
510  template <typename Number>
511  struct Vectorization_sadd_xv
512  {
513  Vectorization_sadd_xv(Number *const val,
514  const Number *const v_val,
515  const Number x)
516  : val(val)
517  , v_val(v_val)
518  , x(x)
519  {}
520 
521  void
522  operator()(const size_type begin, const size_type end) const
523  {
525  {
526  DEAL_II_OPENMP_SIMD_PRAGMA
527  for (size_type i = begin; i < end; ++i)
528  val[i] = x * val[i] + v_val[i];
529  }
530  else
531  {
532  for (size_type i = begin; i < end; ++i)
533  val[i] = x * val[i] + v_val[i];
534  }
535  }
536 
537  Number *const val;
538  const Number *const v_val;
539  const Number x;
540  };
541 
542  template <typename Number>
543  struct Vectorization_sadd_xavbw
544  {
545  Vectorization_sadd_xavbw(Number * val,
546  const Number *v_val,
547  const Number *w_val,
548  Number x,
549  Number a,
550  Number b)
551  : val(val)
552  , v_val(v_val)
553  , w_val(w_val)
554  , x(x)
555  , a(a)
556  , b(b)
557  {}
558 
559  void
560  operator()(const size_type begin, const size_type end) const
561  {
563  {
564  DEAL_II_OPENMP_SIMD_PRAGMA
565  for (size_type i = begin; i < end; ++i)
566  val[i] = x * val[i] + a * v_val[i] + b * w_val[i];
567  }
568  else
569  {
570  for (size_type i = begin; i < end; ++i)
571  val[i] = x * val[i] + a * v_val[i] + b * w_val[i];
572  }
573  }
574 
575  Number *const val;
576  const Number *const v_val;
577  const Number *const w_val;
578  const Number x;
579  const Number a;
580  const Number b;
581  };
582 
583  template <typename Number>
584  struct Vectorization_scale
585  {
586  Vectorization_scale(Number *const val, const Number *const v_val)
587  : val(val)
588  , v_val(v_val)
589  {}
590 
591  void
592  operator()(const size_type begin, const size_type end) const
593  {
595  {
596  DEAL_II_OPENMP_SIMD_PRAGMA
597  for (size_type i = begin; i < end; ++i)
598  val[i] *= v_val[i];
599  }
600  else
601  {
602  for (size_type i = begin; i < end; ++i)
603  val[i] *= v_val[i];
604  }
605  }
606 
607  Number *const val;
608  const Number *const v_val;
609  };
610 
611  template <typename Number>
612  struct Vectorization_equ_au
613  {
614  Vectorization_equ_au(Number *const val,
615  const Number *const u_val,
616  const Number a)
617  : val(val)
618  , u_val(u_val)
619  , a(a)
620  {}
621 
622  void
623  operator()(const size_type begin, const size_type end) const
624  {
626  {
627  DEAL_II_OPENMP_SIMD_PRAGMA
628  for (size_type i = begin; i < end; ++i)
629  val[i] = a * u_val[i];
630  }
631  else
632  {
633  for (size_type i = begin; i < end; ++i)
634  val[i] = a * u_val[i];
635  }
636  }
637 
638  Number *const val;
639  const Number *const u_val;
640  const Number a;
641  };
642 
643  template <typename Number>
644  struct Vectorization_equ_aubv
645  {
646  Vectorization_equ_aubv(Number *const val,
647  const Number *const u_val,
648  const Number *const v_val,
649  const Number a,
650  const Number b)
651  : val(val)
652  , u_val(u_val)
653  , v_val(v_val)
654  , a(a)
655  , b(b)
656  {}
657 
658  void
659  operator()(const size_type begin, const size_type end) const
660  {
662  {
663  DEAL_II_OPENMP_SIMD_PRAGMA
664  for (size_type i = begin; i < end; ++i)
665  val[i] = a * u_val[i] + b * v_val[i];
666  }
667  else
668  {
669  for (size_type i = begin; i < end; ++i)
670  val[i] = a * u_val[i] + b * v_val[i];
671  }
672  }
673 
674  Number *const val;
675  const Number *const u_val;
676  const Number *const v_val;
677  const Number a;
678  const Number b;
679  };
680 
681  template <typename Number>
682  struct Vectorization_equ_aubvcw
683  {
684  Vectorization_equ_aubvcw(Number * val,
685  const Number *u_val,
686  const Number *v_val,
687  const Number *w_val,
688  const Number a,
689  const Number b,
690  const Number c)
691  : val(val)
692  , u_val(u_val)
693  , v_val(v_val)
694  , w_val(w_val)
695  , a(a)
696  , b(b)
697  , c(c)
698  {}
699 
700  void
701  operator()(const size_type begin, const size_type end) const
702  {
704  {
705  DEAL_II_OPENMP_SIMD_PRAGMA
706  for (size_type i = begin; i < end; ++i)
707  val[i] = a * u_val[i] + b * v_val[i] + c * w_val[i];
708  }
709  else
710  {
711  for (size_type i = begin; i < end; ++i)
712  val[i] = a * u_val[i] + b * v_val[i] + c * w_val[i];
713  }
714  }
715 
716  Number *const val;
717  const Number *const u_val;
718  const Number *const v_val;
719  const Number *const w_val;
720  const Number a;
721  const Number b;
722  const Number c;
723  };
724 
725  template <typename Number>
726  struct Vectorization_ratio
727  {
728  Vectorization_ratio(Number *val, const Number *a_val, const Number *b_val)
729  : val(val)
730  , a_val(a_val)
731  , b_val(b_val)
732  {}
733 
734  void
735  operator()(const size_type begin, const size_type end) const
736  {
738  {
739  DEAL_II_OPENMP_SIMD_PRAGMA
740  for (size_type i = begin; i < end; ++i)
741  val[i] = a_val[i] / b_val[i];
742  }
743  else
744  {
745  for (size_type i = begin; i < end; ++i)
746  val[i] = a_val[i] / b_val[i];
747  }
748  }
749 
750  Number *const val;
751  const Number *const a_val;
752  const Number *const b_val;
753  };
754 
755 
756 
757  // All sums over all the vector entries (l2-norm, inner product, etc.) are
758  // performed with the same code, using a templated operation defined
759  // here. There are always two versions defined, a standard one that covers
760  // most cases and a vectorized one which is only for equal types and float
761  // and double.
762  template <typename Number, typename Number2>
763  struct Dot
764  {
765  static const bool vectorizes =
766  std::is_same<Number, Number2>::value &&
768 
769  Dot(const Number *const X, const Number2 *const Y)
770  : X(X)
771  , Y(Y)
772  {}
773 
774  Number
775  operator()(const size_type i) const
776  {
777  return X[i] * Number(numbers::NumberTraits<Number2>::conjugate(Y[i]));
778  }
779 
781  do_vectorized(const size_type i) const
782  {
784  x.load(X + i);
785  y.load(Y + i);
786 
787  // the following operation in VectorizedArray does an element-wise
788  // scalar product without taking into account complex values and
789  // the need to take the complex-conjugate of one argument. this
790  // may be a bug, but because all VectorizedArray classes only
791  // work on real scalars, it doesn't really matter very much.
792  // in any case, assert that we really don't get here for
793  // complex-valued objects
794  static_assert(numbers::NumberTraits<Number>::is_complex == false,
795  "This operation is not correctly implemented for "
796  "complex-valued objects.");
797  return x * y;
798  }
799 
800  const Number *const X;
801  const Number2 *const Y;
802  };
803 
804  template <typename Number, typename RealType>
805  struct Norm2
806  {
807  static const bool vectorizes =
809 
810  Norm2(const Number *const X)
811  : X(X)
812  {}
813 
814  RealType
815  operator()(const size_type i) const
816  {
818  }
819 
821  do_vectorized(const size_type i) const
822  {
824  x.load(X + i);
825  return x * x;
826  }
827 
828  const Number *const X;
829  };
830 
831  template <typename Number, typename RealType>
832  struct Norm1
833  {
834  static const bool vectorizes =
836 
837  Norm1(const Number *X)
838  : X(X)
839  {}
840 
841  RealType
842  operator()(const size_type i) const
843  {
845  }
846 
848  do_vectorized(const size_type i) const
849  {
851  x.load(X + i);
852  return std::abs(x);
853  }
854 
855  const Number *X;
856  };
857 
858  template <typename Number, typename RealType>
859  struct NormP
860  {
861  static const bool vectorizes =
863 
864  NormP(const Number *X, RealType p)
865  : X(X)
866  , p(p)
867  {}
868 
869  RealType
870  operator()(const size_type i) const
871  {
872  return std::pow(numbers::NumberTraits<Number>::abs(X[i]), p);
873  }
874 
876  do_vectorized(const size_type i) const
877  {
879  x.load(X + i);
880  return std::pow(std::abs(x), p);
881  }
882 
883  const Number * X;
884  const RealType p;
885  };
886 
887  template <typename Number>
888  struct MeanValue
889  {
890  static const bool vectorizes =
892 
893  MeanValue(const Number *X)
894  : X(X)
895  {}
896 
897  Number
898  operator()(const size_type i) const
899  {
900  return X[i];
901  }
902 
904  do_vectorized(const size_type i) const
905  {
907  x.load(X + i);
908  return x;
909  }
910 
911  const Number *X;
912  };
913 
914  template <typename Number>
915  struct AddAndDot
916  {
917  static const bool vectorizes =
919 
920  AddAndDot(Number *const X,
921  const Number *const V,
922  const Number *const W,
923  const Number a)
924  : X(X)
925  , V(V)
926  , W(W)
927  , a(a)
928  {}
929 
930  Number
931  operator()(const size_type i) const
932  {
933  X[i] += a * V[i];
934  return X[i] * Number(numbers::NumberTraits<Number>::conjugate(W[i]));
935  }
936 
938  do_vectorized(const size_type i) const
939  {
941  x.load(X + i);
942  v.load(V + i);
943  x += a * v;
944  x.store(X + i);
945  // may only load from W after storing in X because the pointers might
946  // point to the same memory
947  w.load(W + i);
948 
949  // the following operation in VectorizedArray does an element-wise
950  // scalar product without taking into account complex values and
951  // the need to take the complex-conjugate of one argument. this
952  // may be a bug, but because all VectorizedArray classes only
953  // work on real scalars, it doesn't really matter very much.
954  // in any case, assert that we really don't get here for
955  // complex-valued objects
956  static_assert(numbers::NumberTraits<Number>::is_complex == false,
957  "This operation is not correctly implemented for "
958  "complex-valued objects.");
959  return x * w;
960  }
961 
962  Number *const X;
963  const Number *const V;
964  const Number *const W;
965  const Number a;
966  };
967 
968 
969 
970  // this is the main working loop for all vector sums using the templated
971  // operation above. it accumulates the sums using a block-wise summation
972  // algorithm with post-update. this blocked algorithm has been proposed in
973  // a similar form by Castaldo, Whaley and Chronopoulos (SIAM
974  // J. Sci. Comput. 31, 1156-1174, 2008) and we use the smallest possible
975  // block size, 2. Sometimes it is referred to as pairwise summation. The
976  // worst case error made by this algorithm is on the order O(eps *
977  // log2(vec_size)), whereas a naive summation is O(eps * vec_size). Even
978  // though the Kahan summation is even more accurate with an error O(eps)
979  // by carrying along remainders not captured by the main sum, that involves
980  // additional costs which are not worthwhile. See the Wikipedia article on
981  // the Kahan summation algorithm.
982 
983  // The algorithm implemented here has the additional benefit that it is
984  // easily parallelized without changing the order of how the elements are
985  // added (floating point addition is not associative). For the same vector
986  // size and minimum_parallel_grainsize, the blocks are always the
987  // same and added pairwise.
988 
989  // The depth of recursion is controlled by the 'magic' parameter
990  // vector_accumulation_recursion_threshold: If the length is below
991  // vector_accumulation_recursion_threshold * 32 (32 is the part of code we
992  // unroll), a straight loop instead of recursion will be used. At the
993  // innermost level, eight values are added consecutively in order to better
994  // balance multiplications and additions.
995 
996  // Loops are unrolled as follows: the range [first,last) is broken into
997  // @p n_chunks each of size 32 plus the @p remainder.
998  // accumulate_regular() does the work on 32*n_chunks elements employing SIMD
999  // if possible and stores the result of the operation for each chunk in @p outer_results.
1000 
1001  // The code returns the result as the last argument in order to make
1002  // spawning tasks simpler and use automatic template deduction.
1003 
1004 
1010  const unsigned int vector_accumulation_recursion_threshold = 128;
1011 
1012  template <typename Operation, typename ResultType>
1013  void
1014  accumulate_recursive(const Operation &op,
1015  const size_type first,
1016  const size_type last,
1017  ResultType & result)
1018  {
1019  const size_type vec_size = last - first;
1020  if (vec_size <= vector_accumulation_recursion_threshold * 32)
1021  {
1022  // the vector is short enough so we perform the summation. first
1023  // work on the regular part. The innermost 32 values are expanded in
1024  // order to obtain known loop bounds for most of the work.
1025  size_type index = first;
1026  ResultType outer_results[vector_accumulation_recursion_threshold];
1027 
1028  // set the zeroth element to zero to correctly handle the case where
1029  // vec_size == 0
1030  outer_results[0] = ResultType();
1031 
1032  // the variable serves two purposes: (i) number of chunks (each 32
1033  // indices) for the given size; all results are stored in
1034  // outer_results[0,n_chunks) (ii) in the SIMD case n_chunks is also a
1035  // next free index in outer_results[] to which we can write after
1036  // accumulate_regular() is executed.
1037  size_type n_chunks = vec_size / 32;
1038  const size_type remainder = vec_size % 32;
1039  Assert(remainder == 0 ||
1040  n_chunks < vector_accumulation_recursion_threshold,
1041  ExcInternalError());
1042 
1043  // Select between the regular version and vectorized version based
1044  // on the number types we are given. To choose the vectorized
1045  // version often enough, we need to have all tasks but the last one
1046  // to be divisible by the vectorization length
1047  accumulate_regular(
1048  op,
1049  n_chunks,
1050  index,
1051  outer_results,
1052  std::integral_constant<bool, Operation::vectorizes>());
1053 
1054  // now work on the remainder, i.e., the last up to 32 values. Use
1055  // switch statement with fall-through to work on these values.
1056  if (remainder > 0)
1057  {
1058  // if we got here, it means that (vec_size <=
1059  // vector_accumulation_recursion_threshold * 32), which is to say
1060  // that the domain can be split into n_chunks <=
1061  // vector_accumulation_recursion_threshold:
1062  AssertIndexRange(n_chunks,
1063  vector_accumulation_recursion_threshold + 1);
1064  // split the remainder into chunks of 8, there could be up to 3
1065  // such chunks since remainder < 32.
1066  // Work on those chunks without any SIMD, that is we call
1067  // op(index).
1068  const size_type inner_chunks = remainder / 8;
1069  Assert(inner_chunks <= 3, ExcInternalError());
1070  const size_type remainder_inner = remainder % 8;
1071  ResultType r0 = ResultType(), r1 = ResultType(),
1072  r2 = ResultType();
1073  switch (inner_chunks)
1074  {
1075  case 3:
1076  r2 = op(index++);
1077  for (size_type j = 1; j < 8; ++j)
1078  r2 += op(index++);
1079  DEAL_II_FALLTHROUGH;
1080  case 2:
1081  r1 = op(index++);
1082  for (size_type j = 1; j < 8; ++j)
1083  r1 += op(index++);
1084  r1 += r2;
1085  DEAL_II_FALLTHROUGH;
1086  case 1:
1087  r2 = op(index++);
1088  for (size_type j = 1; j < 8; ++j)
1089  r2 += op(index++);
1090  DEAL_II_FALLTHROUGH;
1091  default:
1092  for (size_type j = 0; j < remainder_inner; ++j)
1093  r0 += op(index++);
1094  r0 += r2;
1095  r0 += r1;
1096  if (n_chunks == vector_accumulation_recursion_threshold)
1097  outer_results[vector_accumulation_recursion_threshold -
1098  1] += r0;
1099  else
1100  {
1101  outer_results[n_chunks] = r0;
1102  n_chunks++;
1103  }
1104  break;
1105  }
1106  }
1107  // make sure we worked through all indices
1108  AssertDimension(index, last);
1109 
1110  // now sum the results from the chunks stored in
1111  // outer_results[0,n_chunks) recursively
1112  while (n_chunks > 1)
1113  {
1114  if (n_chunks % 2 == 1)
1115  outer_results[n_chunks++] = ResultType();
1116  for (size_type i = 0; i < n_chunks; i += 2)
1117  outer_results[i / 2] = outer_results[i] + outer_results[i + 1];
1118  n_chunks /= 2;
1119  }
1120  result = outer_results[0];
1121  }
1122  else
1123  {
1124  // split vector into four pieces and work on the pieces
1125  // recursively. Make pieces (except last) divisible by one fourth the
1126  // recursion threshold.
1127  const size_type new_size =
1128  (vec_size / (vector_accumulation_recursion_threshold * 32)) *
1129  vector_accumulation_recursion_threshold * 8;
1130  Assert(first + 3 * new_size < last, ExcInternalError());
1131  ResultType r0, r1, r2, r3;
1132  accumulate_recursive(op, first, first + new_size, r0);
1133  accumulate_recursive(op, first + new_size, first + 2 * new_size, r1);
1134  accumulate_recursive(op,
1135  first + 2 * new_size,
1136  first + 3 * new_size,
1137  r2);
1138  accumulate_recursive(op, first + 3 * new_size, last, r3);
1139  r0 += r1;
1140  r2 += r3;
1141  result = r0 + r2;
1142  }
1143  }
1144 
1145 
1146  // this is the inner working routine for the accumulation loops
1147  // below. This is the standard case where the loop bounds are known. We
1148  // pulled this function out of the regular accumulate routine because we
1149  // might do this thing vectorized (see specialized function below)
1150  template <typename Operation, typename ResultType>
1151  void
1152  accumulate_regular(
1153  const Operation &op,
1154  const size_type &n_chunks,
1155  size_type & index,
1156  ResultType (&outer_results)[vector_accumulation_recursion_threshold],
1157  std::integral_constant<bool, false>)
1158  {
1159  // note that each chunk is chosen to have a width of 32, thereby the index
1160  // is incremented by 4*8 for each @p i.
1161  for (size_type i = 0; i < n_chunks; ++i)
1162  {
1163  ResultType r0 = op(index);
1164  ResultType r1 = op(index + 1);
1165  ResultType r2 = op(index + 2);
1166  ResultType r3 = op(index + 3);
1167  index += 4;
1168  for (size_type j = 1; j < 8; ++j, index += 4)
1169  {
1170  r0 += op(index);
1171  r1 += op(index + 1);
1172  r2 += op(index + 2);
1173  r3 += op(index + 3);
1174  }
1175  r0 += r1;
1176  r2 += r3;
1177  outer_results[i] = r0 + r2;
1178  }
1179  }
1180 
1181 
1182 
1183  // this is the inner working routine for the accumulation loops
1184  // below. This is the specialized case where the loop bounds are known and
1185  // where we can vectorize. In that case, we request the 'do_vectorized'
1186  // routine of the operation instead of the regular one which does several
1187  // operations at once.
1188  template <typename Operation, typename Number>
1189  void
1190  accumulate_regular(
1191  const Operation &op,
1192  size_type & n_chunks,
1193  size_type & index,
1194  Number (&outer_results)[vector_accumulation_recursion_threshold],
1195  std::integral_constant<bool, true>)
1196  {
1197  // we start from @p index and workout @p n_chunks each of size 32.
1198  // in order employ SIMD and work on @p nvecs at a time, we split this
1199  // loop yet again:
1200  // First we work on (n_chunks/nvecs) chunks, where each chunk processes
1201  // nvecs*(4*8) elements.
1202 
1203  const unsigned int nvecs = VectorizedArray<Number>::n_array_elements;
1204  const size_type regular_chunks = n_chunks / nvecs;
1205  for (size_type i = 0; i < regular_chunks; ++i)
1206  {
1207  VectorizedArray<Number> r0 = op.do_vectorized(index);
1208  VectorizedArray<Number> r1 = op.do_vectorized(index + nvecs);
1209  VectorizedArray<Number> r2 = op.do_vectorized(index + 2 * nvecs);
1210  VectorizedArray<Number> r3 = op.do_vectorized(index + 3 * nvecs);
1211  index += nvecs * 4;
1212  for (size_type j = 1; j < 8; ++j, index += nvecs * 4)
1213  {
1214  r0 += op.do_vectorized(index);
1215  r1 += op.do_vectorized(index + nvecs);
1216  r2 += op.do_vectorized(index + 2 * nvecs);
1217  r3 += op.do_vectorized(index + 3 * nvecs);
1218  }
1219  r0 += r1;
1220  r2 += r3;
1221  r0 += r2;
1222  r0.store(
1223  &outer_results[i * VectorizedArray<Number>::n_array_elements]);
1224  }
1225 
1226  // If we are treating a case where the vector length is not divisible by
1227  // the vectorization length, need a cleanup loop
1228  // The remaining chunks are processed one by one starting from
1229  // regular_chunks * nvecs; We do as much as possible with 2 SIMD
1230  // operations within each chunk. Here we assume that nvecs < 32/2 = 16 as
1231  // well as 16%nvecs==0.
1233  Assert(16 % nvecs == 0, ExcInternalError());
1234  if (n_chunks % VectorizedArray<Number>::n_array_elements != 0)
1235  {
1237  r1 = VectorizedArray<Number>();
1238  const size_type start_irreg = regular_chunks * nvecs;
1239  for (size_type c = start_irreg; c < n_chunks; ++c)
1240  for (size_type j = 0; j < 32; j += 2 * nvecs, index += 2 * nvecs)
1241  {
1242  r0 += op.do_vectorized(index);
1243  r1 += op.do_vectorized(index + nvecs);
1244  }
1245  r0 += r1;
1246  r0.store(&outer_results[start_irreg]);
1247  // update n_chunks to denote unused element in outer_results[] from
1248  // which we can keep writing.
1249  n_chunks = start_irreg + VectorizedArray<Number>::n_array_elements;
1250  }
1251  }
1252 
1253 
1254 
1255 #ifdef DEAL_II_WITH_THREADS
1256 
1284  template <typename Operation, typename ResultType>
1286  {
1287  static const unsigned int threshold_array_allocate = 512;
1288 
1289  TBBReduceFunctor(const Operation &op,
1290  const size_type start,
1291  const size_type end)
1292  : op(op)
1293  , start(start)
1294  , end(end)
1295  {
1296  const size_type vec_size = end - start;
1297  // set chunk size for sub-tasks
1298  const unsigned int gs =
1299  internal::VectorImplementation::minimum_parallel_grain_size;
1300  n_chunks =
1301  std::min(static_cast<size_type>(4 * MultithreadInfo::n_threads()),
1302  vec_size / gs);
1303  chunk_size = vec_size / n_chunks;
1304 
1305  // round to next multiple of 512 (or leave it at the minimum grain size
1306  // if that happens to be smaller). this is advantageous because our
1307  // algorithm favors lengths of a power of 2 due to pairwise summation ->
1308  // at most one 'oddly' sized chunk
1309  if (chunk_size > 512)
1310  chunk_size = ((chunk_size + 511) / 512) * 512;
1311  n_chunks = (vec_size + chunk_size - 1) / chunk_size;
1312  AssertIndexRange((n_chunks - 1) * chunk_size, vec_size);
1313  AssertIndexRange(vec_size, n_chunks * chunk_size + 1);
1314 
1315  if (n_chunks > threshold_array_allocate)
1316  {
1317  // make sure we allocate an even number of elements,
1318  // access to the new last element is needed in do_sum()
1319  large_array.resize(2 * ((n_chunks + 1) / 2));
1320  array_ptr = large_array.data();
1321  }
1322  else
1323  array_ptr = &small_array[0];
1324  }
1325 
1330  void
1331  operator()(const tbb::blocked_range<size_type> &range) const
1332  {
1333  for (size_type i = range.begin(); i < range.end(); ++i)
1334  accumulate_recursive(op,
1335  start + i * chunk_size,
1336  std::min(start + (i + 1) * chunk_size, end),
1337  array_ptr[i]);
1338  }
1339 
1340  ResultType
1341  do_sum() const
1342  {
1343  while (n_chunks > 1)
1344  {
1345  if (n_chunks % 2 == 1)
1346  array_ptr[n_chunks++] = ResultType();
1347  for (size_type i = 0; i < n_chunks; i += 2)
1348  array_ptr[i / 2] = array_ptr[i] + array_ptr[i + 1];
1349  n_chunks /= 2;
1350  }
1351  return array_ptr[0];
1352  }
1353 
1354  const Operation &op;
1355  const size_type start;
1356  const size_type end;
1357 
1358  mutable unsigned int n_chunks;
1359  unsigned int chunk_size;
1360  ResultType small_array[threshold_array_allocate];
1361  std::vector<ResultType> large_array;
1362  // this variable either points to small_array or large_array depending on
1363  // the number of threads we want to feed
1364  mutable ResultType *array_ptr;
1365  };
1366 #endif
1367 
1368 
1369 
1374  template <typename Operation, typename ResultType>
1375  void
1376  parallel_reduce(
1377  const Operation & op,
1378  const size_type start,
1379  const size_type end,
1380  ResultType & result,
1381  const std::shared_ptr<parallel::internal::TBBPartitioner> &partitioner)
1382  {
1383 #ifdef DEAL_II_WITH_THREADS
1384  const size_type vec_size = end - start;
1385  // only go to the parallel function in case there are at least 4 parallel
1386  // items, otherwise the overhead is too large
1387  if (vec_size >=
1388  4 * internal::VectorImplementation::minimum_parallel_grain_size &&
1390  {
1391  Assert(partitioner.get() != nullptr,
1393  "Unexpected initialization of Vector that does "
1394  "not set the TBB partitioner to a usable state."));
1395  std::shared_ptr<tbb::affinity_partitioner> tbb_partitioner =
1396  partitioner->acquire_one_partitioner();
1397 
1398  TBBReduceFunctor<Operation, ResultType> generic_functor(op,
1399  start,
1400  end);
1401  // We use a minimum grain size of 1 here since the grains at this
1402  // stage of dividing the work refer to the number of vector chunks
1403  // that are processed by (possibly different) threads in the
1404  // parallelized for loop (i.e., they do not refer to individual
1405  // vector entries). The number of chunks here is calculated inside
1406  // TBBForFunctor. See also GitHub issue #2496 for further discussion
1407  // of this strategy.
1408  parallel::internal::parallel_for(static_cast<size_type>(0),
1409  static_cast<size_type>(
1410  generic_functor.n_chunks),
1411  generic_functor,
1412  1,
1413  tbb_partitioner);
1414  partitioner->release_one_partitioner(tbb_partitioner);
1415  result = generic_functor.do_sum();
1416  }
1417  else
1418  accumulate_recursive(op, start, end, result);
1419 #else
1420  accumulate_recursive(op, start, end, result);
1421  (void)partitioner;
1422 #endif
1423  }
1424 
1425 
1426  template <typename Number, typename Number2, typename MemorySpace>
1427  struct functions
1428  {
1429  static void
1430  copy(
1431  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1432  /*thread_loop_partitioner*/,
1433  const size_type /*size*/,
1434  const ::MemorySpace::MemorySpaceData<Number2, MemorySpace>
1435  & /*v_data*/,
1437  {
1438  static_assert(
1439  std::is_same<MemorySpace, ::MemorySpace::CUDA>::value &&
1440  std::is_same<Number, Number2>::value,
1441  "For the CUDA MemorySpace Number and Number2 should be the same type");
1442  }
1443 
1444  static void
1445  set(
1446  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1447  /*thread_loop_partitioner*/,
1448  const size_type /*size*/,
1449  const Number /*s*/,
1451  {}
1452 
1453  static void
1454  add_vector(
1455  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1456  /*thread_loop_partitioner*/,
1457  const size_type /*size*/,
1458  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1459  & /*v_data*/,
1461  {}
1462 
1463  static void
1464  subtract_vector(
1465  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1466  /*thread_loop_partitioner*/,
1467  const size_type /*size*/,
1468  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1469  & /*v_data*/,
1471  {}
1472 
1473  static void
1474  add_factor(
1475  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1476  /*thread_loop_partitioner*/,
1477  const size_type /*size*/,
1478  Number /*a*/,
1480  {}
1481 
1482  static void
1483  add_av(
1484  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1485  /*thread_loop_partitioner*/,
1486  const size_type /*size*/,
1487  const Number /*a*/,
1488  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1489  & /*v_data*/,
1491  {}
1492 
1493  static void
1494  add_avpbw(
1495  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1496  /*thread_loop_partitioner*/,
1497  const size_type /*size*/,
1498  const Number /*a*/,
1499  const Number /*b*/,
1500  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1501  & /*v_data*/,
1502  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1503  & /*w_data*/,
1505  {}
1506 
1507  static void
1508  sadd_xv(
1509  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1510  /*thread_loop_partitioner*/,
1511  const size_type /*size*/,
1512  const Number /*x*/,
1513  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1514  & /*v_data*/,
1516  {}
1517 
1518  static void
1519  sadd_xav(
1520  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1521  /*thread_loop_partitioner*/,
1522  const size_type /*size*/,
1523  const Number /*x*/,
1524  const Number /*a*/,
1525  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1526  & /*v_data*/,
1528  {}
1529 
1530  static void
1531  sadd_xavbw(
1532  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1533  /*thread_loop_partitioner*/,
1534  const size_type /*size*/,
1535  const Number /*x*/,
1536  const Number /*a*/,
1537  const Number /*b*/,
1538  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1539  & /*v_data*/,
1540  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1541  & /*w_data*/,
1543  {}
1544 
1545  static void
1546  multiply_factor(
1547  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1548  /*thread_loop_partitioner*/,
1549  const size_type /*size*/,
1550  const Number /*factor*/,
1552  {}
1553 
1554  static void
1555  scale(
1556  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1557  /*thread_loop_partitioner*/,
1558  const size_type /*size*/,
1559  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1560  & /*v_data*/,
1562  {}
1563 
1564  static void
1565  equ_au(
1566  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1567  /*thread_loop_partitioner*/,
1568  const size_type /*size*/,
1569  const Number /*a*/,
1570  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1571  & /*v_data*/,
1573  {}
1574 
1575  static void
1576  equ_aubv(
1577  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1578  /*thread_loop_partitioner*/,
1579  const size_type /*size*/,
1580  const Number /*a*/,
1581  const Number /*b*/,
1582  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1583  & /*v_data*/,
1584  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1585  & /*w_data*/,
1587  {}
1588 
1589  static Number
1590  dot(
1591  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1592  /*thread_loop_partitioner*/,
1593  const size_type /*size*/,
1594  const ::MemorySpace::MemorySpaceData<Number2, MemorySpace>
1595  & /*v_data*/,
1597  {
1598  return Number();
1599  }
1600 
1601  template <typename real_type>
1602  static void
1603  norm_2(
1604  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1605  /*thread_loop_partitioner*/,
1606  const size_type /*size*/,
1607  real_type & /*sum*/,
1608  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1609  & /*v_data*/,
1611  {}
1612 
1613  static Number
1614  mean_value(
1615  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1616  /*thread_loop_partitioner*/,
1617  const size_type /*size*/,
1618  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1619  & /*data*/)
1620  {
1621  return Number();
1622  }
1623 
1624  template <typename real_type>
1625  static void
1626  norm_1(
1627  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1628  /*thread_loop_partitioner*/,
1629  const size_type /*size*/,
1630  real_type & /*sum*/,
1631  Number * /*values*/,
1632  Number * /*values_dev*/)
1633  {}
1634 
1635  template <typename real_type>
1636  static void
1637  norm_p(
1638  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1639  /*thread_loop_partitioner*/,
1640  const size_type /*size*/,
1641  real_type & /*sum*/,
1642  real_type /*p*/,
1644  {}
1645 
1646  static Number
1647  add_and_dot(
1648  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1649  /*thread_loop_partitioner*/,
1650  const size_type /*size*/,
1651  const Number /*a*/,
1652  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1653  & /*v_data*/,
1654  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1655  & /*w_data*/,
1657  {
1658  return Number();
1659  }
1660  };
1661 
1662 
1663 
1664  template <typename Number, typename Number2>
1665  struct functions<Number, Number2, ::MemorySpace::Host>
1666  {
1667  static void
1668  copy(const std::shared_ptr<::parallel::internal::TBBPartitioner>
1669  & thread_loop_partitioner,
1670  const size_type size,
1671  const ::MemorySpace::
1672  MemorySpaceData<Number2, ::MemorySpace::Host> &v_data,
1675  &data)
1676  {
1677  Vector_copy<Number, Number2> copier(v_data.values.get(),
1678  data.values.get());
1679  parallel_for(copier, 0, size, thread_loop_partitioner);
1680  }
1681 
1682  static void
1683  set(const std::shared_ptr<::parallel::internal::TBBPartitioner>
1684  & thread_loop_partitioner,
1685  const size_type size,
1686  const Number s,
1689  &data)
1690  {
1691  Vector_set<Number> setter(s, data.values.get());
1692  parallel_for(setter, 0, size, thread_loop_partitioner);
1693  }
1694 
1695  static void
1696  add_vector(
1697  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1698  & thread_loop_partitioner,
1699  const size_type size,
1700  const ::MemorySpace::
1701  MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1704  &data)
1705  {
1706  Vectorization_add_v<Number> vector_add(data.values.get(),
1707  v_data.values.get());
1708  parallel_for(vector_add, 0, size, thread_loop_partitioner);
1709  }
1710 
1711  static void
1712  subtract_vector(
1713  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1714  & thread_loop_partitioner,
1715  const size_type size,
1716  const ::MemorySpace::
1717  MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1720  &data)
1721  {
1722  Vectorization_subtract_v<Number> vector_subtract(data.values.get(),
1723  v_data.values.get());
1724  parallel_for(vector_subtract, 0, size, thread_loop_partitioner);
1725  }
1726 
1727  static void
1728  add_factor(
1729  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1730  & thread_loop_partitioner,
1731  const size_type size,
1732  Number a,
1735  &data)
1736  {
1737  Vectorization_add_factor<Number> vector_add(data.values.get(), a);
1738  parallel_for(vector_add, 0, size, thread_loop_partitioner);
1739  }
1740 
1741  static void
1742  add_av(const std::shared_ptr<::parallel::internal::TBBPartitioner>
1743  & thread_loop_partitioner,
1744  const size_type size,
1745  const Number a,
1746  const ::MemorySpace::
1747  MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1750  &data)
1751  {
1752  Vectorization_add_av<Number> vector_add(data.values.get(),
1753  v_data.values.get(),
1754  a);
1755  parallel_for(vector_add, 0, size, thread_loop_partitioner);
1756  }
1757 
1758  static void
1759  add_avpbw(
1760  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1761  & thread_loop_partitioner,
1762  const size_type size,
1763  const Number a,
1764  const Number b,
1765  const ::MemorySpace::
1766  MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1767  const ::MemorySpace::
1768  MemorySpaceData<Number, ::MemorySpace::Host> &w_data,
1771  &data)
1772  {
1773  Vectorization_add_avpbw<Number> vector_add(
1774  data.values.get(), v_data.values.get(), w_data.values.get(), a, b);
1775  parallel_for(vector_add, 0, size, thread_loop_partitioner);
1776  }
1777 
1778  static void
1779  sadd_xv(
1780  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1781  & thread_loop_partitioner,
1782  const size_type size,
1783  const Number x,
1784  const ::MemorySpace::
1785  MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1788  &data)
1789  {
1790  Vectorization_sadd_xv<Number> vector_sadd(data.values.get(),
1791  v_data.values.get(),
1792  x);
1793  parallel_for(vector_sadd, 0, size, thread_loop_partitioner);
1794  }
1795 
1796  static void
1797  sadd_xav(
1798  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1799  & thread_loop_partitioner,
1800  const size_type size,
1801  const Number x,
1802  const Number a,
1803  const ::MemorySpace::
1804  MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1807  &data)
1808  {
1809  Vectorization_sadd_xav<Number> vector_sadd(data.values.get(),
1810  v_data.values.get(),
1811  a,
1812  x);
1813  parallel_for(vector_sadd, 0, size, thread_loop_partitioner);
1814  }
1815 
1816  static void
1817  sadd_xavbw(
1818  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1819  & thread_loop_partitioner,
1820  const size_type size,
1821  const Number x,
1822  const Number a,
1823  const Number b,
1824  const ::MemorySpace::
1825  MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1826  const ::MemorySpace::
1827  MemorySpaceData<Number, ::MemorySpace::Host> &w_data,
1830  &data)
1831  {
1832  Vectorization_sadd_xavbw<Number> vector_sadd(
1833  data.values.get(), v_data.values.get(), w_data.values.get(), x, a, b);
1834  parallel_for(vector_sadd, 0, size, thread_loop_partitioner);
1835  }
1836 
1837  static void
1838  multiply_factor(
1839  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1840  & thread_loop_partitioner,
1841  const size_type size,
1842  const Number factor,
1845  &data)
1846  {
1847  Vectorization_multiply_factor<Number> vector_multiply(data.values.get(),
1848  factor);
1849  parallel_for(vector_multiply, 0, size, thread_loop_partitioner);
1850  }
1851 
1852  static void
1853  scale(const std::shared_ptr<::parallel::internal::TBBPartitioner>
1854  & thread_loop_partitioner,
1855  const size_type size,
1856  const ::MemorySpace::
1857  MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1860  &data)
1861  {
1862  Vectorization_scale<Number> vector_scale(data.values.get(),
1863  v_data.values.get());
1864  parallel_for(vector_scale, 0, size, thread_loop_partitioner);
1865  }
1866 
1867  static void
1868  equ_au(const std::shared_ptr<::parallel::internal::TBBPartitioner>
1869  & thread_loop_partitioner,
1870  const size_type size,
1871  const Number a,
1872  const ::MemorySpace::
1873  MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1876  &data)
1877  {
1878  Vectorization_equ_au<Number> vector_equ(data.values.get(),
1879  v_data.values.get(),
1880  a);
1881  parallel_for(vector_equ, 0, size, thread_loop_partitioner);
1882  }
1883 
1884  static void
1885  equ_aubv(
1886  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1887  & thread_loop_partitioner,
1888  const size_type size,
1889  const Number a,
1890  const Number b,
1891  const ::MemorySpace::
1892  MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1893  const ::MemorySpace::
1894  MemorySpaceData<Number, ::MemorySpace::Host> &w_data,
1897  &data)
1898  {
1899  Vectorization_equ_aubv<Number> vector_equ(
1900  data.values.get(), v_data.values.get(), w_data.values.get(), a, b);
1901  parallel_for(vector_equ, 0, size, thread_loop_partitioner);
1902  }
1903 
1904  static Number
1905  dot(const std::shared_ptr<::parallel::internal::TBBPartitioner>
1906  & thread_loop_partitioner,
1907  const size_type size,
1908  const ::MemorySpace::
1909  MemorySpaceData<Number2, ::MemorySpace::Host> &v_data,
1912  &data)
1913  {
1914  Number sum;
1915  ::internal::VectorOperations::Dot<Number, Number2> dot(
1916  data.values.get(), v_data.values.get());
1917  ::internal::VectorOperations::parallel_reduce(
1918  dot, 0, size, sum, thread_loop_partitioner);
1919  AssertIsFinite(sum);
1920 
1921  return sum;
1922  }
1923 
1924  template <typename real_type>
1925  static void
1926  norm_2(const std::shared_ptr<::parallel::internal::TBBPartitioner>
1927  & thread_loop_partitioner,
1928  const size_type size,
1929  real_type & sum,
1932  &data)
1933  {
1934  Norm2<Number, real_type> norm2(data.values.get());
1935  parallel_reduce(norm2, 0, size, sum, thread_loop_partitioner);
1936  }
1937 
1938  static Number
1939  mean_value(
1940  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1941  & thread_loop_partitioner,
1942  const size_type size,
1943  const ::MemorySpace::
1944  MemorySpaceData<Number, ::MemorySpace::Host> &data)
1945  {
1946  Number sum;
1947  MeanValue<Number> mean(data.values.get());
1948  parallel_reduce(mean, 0, size, sum, thread_loop_partitioner);
1949 
1950  return sum;
1951  }
1952 
1953  template <typename real_type>
1954  static void
1955  norm_1(const std::shared_ptr<::parallel::internal::TBBPartitioner>
1956  & thread_loop_partitioner,
1957  const size_type size,
1958  real_type & sum,
1961  &data)
1962  {
1963  Norm1<Number, real_type> norm1(data.values.get());
1964  parallel_reduce(norm1, 0, size, sum, thread_loop_partitioner);
1965  }
1966 
1967  template <typename real_type>
1968  static void
1969  norm_p(const std::shared_ptr<::parallel::internal::TBBPartitioner>
1970  & thread_loop_partitioner,
1971  const size_type size,
1972  real_type & sum,
1973  const real_type p,
1976  &data)
1977  {
1978  NormP<Number, real_type> normp(data.values.get(), p);
1979  parallel_reduce(normp, 0, size, sum, thread_loop_partitioner);
1980  }
1981 
1982  static Number
1983  add_and_dot(
1984  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1985  & thread_loop_partitioner,
1986  const size_type size,
1987  const Number a,
1988  const ::MemorySpace::
1989  MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1990  const ::MemorySpace::
1991  MemorySpaceData<Number, ::MemorySpace::Host> &w_data,
1994  &data)
1995  {
1996  Number sum;
1997  AddAndDot<Number> adder(data.values.get(),
1998  v_data.values.get(),
1999  w_data.values.get(),
2000  a);
2001  parallel_reduce(adder, 0, size, sum, thread_loop_partitioner);
2002 
2003  return sum;
2004  }
2005  };
2006 
2007 
2008 
2009 #ifdef DEAL_II_COMPILER_CUDA_AWARE
2010  template <typename Number>
2011  struct functions<Number, Number, ::MemorySpace::CUDA>
2012  {
2013  static const int block_size =
2014  ::LinearAlgebra::CUDAWrappers::kernel::block_size;
2015  static const int chunk_size =
2016  ::LinearAlgebra::CUDAWrappers::kernel::chunk_size;
2017 
2018  static void
2019  copy(
2020  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2021  const size_type size,
2022  const ::MemorySpace::
2023  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2026  &data)
2027  {
2028  cudaError_t cuda_error_code = cudaMemcpy(data.values_dev.get(),
2029  v_data.values_dev.get(),
2030  size * sizeof(Number),
2031  cudaMemcpyDeviceToDevice);
2032  AssertCuda(cuda_error_code);
2033  }
2034 
2035  static void
2036  set(const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2037  const size_type size,
2038  const Number s,
2041  &data)
2042  {
2043  const int n_blocks = 1 + size / (chunk_size * block_size);
2044  ::LinearAlgebra::CUDAWrappers::kernel::set<Number>
2045  <<<n_blocks, block_size>>>(data.values_dev.get(), s, size);
2046 
2047 # ifdef DEBUG
2048  // Check that the kernel was launched correctly
2049  AssertCuda(cudaGetLastError());
2050  // Check that there was no problem during the execution of the kernel
2051  AssertCuda(cudaDeviceSynchronize());
2052 # endif
2053  }
2054 
2055  static void
2056  add_vector(
2057  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2058  const size_type size,
2059  const ::MemorySpace::
2060  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2063  &data)
2064  {
2065  const int n_blocks = 1 + size / (chunk_size * block_size);
2066  ::LinearAlgebra::CUDAWrappers::kernel::add_aV<Number>
2067  <<<n_blocks, block_size>>>(data.values_dev.get(),
2068  1.,
2069  v_data.values_dev.get(),
2070  size);
2071 
2072 # ifdef DEBUG
2073  // Check that the kernel was launched correctly
2074  AssertCuda(cudaGetLastError());
2075  // Check that there was no problem during the execution of the kernel
2076  AssertCuda(cudaDeviceSynchronize());
2077 # endif
2078  }
2079 
2080  static void
2081  subtract_vector(
2082  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2083  const size_type size,
2084  const ::MemorySpace::
2085  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2088  &data)
2089  {
2090  const int n_blocks = 1 + size / (chunk_size * block_size);
2091  ::LinearAlgebra::CUDAWrappers::kernel::add_aV<Number>
2092  <<<n_blocks, block_size>>>(data.values_dev.get(),
2093  -1.,
2094  v_data.values_dev.get(),
2095  size);
2096 
2097 # ifdef DEBUG
2098  // Check that the kernel was launched correctly
2099  AssertCuda(cudaGetLastError());
2100  // Check that there was no problem during the execution of the kernel
2101  AssertCuda(cudaDeviceSynchronize());
2102 # endif
2103  }
2104 
2105  static void
2106  add_factor(
2107  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2108  const size_type size,
2109  Number a,
2112  &data)
2113  {
2114  const int n_blocks = 1 + size / (chunk_size * block_size);
2115  ::LinearAlgebra::CUDAWrappers::kernel::vec_add<Number>
2116  <<<n_blocks, block_size>>>(data.values_dev.get(), a, size);
2117 
2118 # ifdef DEBUG
2119  // Check that the kernel was launched correctly
2120  AssertCuda(cudaGetLastError());
2121  // Check that there was no problem during the execution of the kernel
2122  AssertCuda(cudaDeviceSynchronize());
2123 # endif
2124  }
2125 
2126  static void
2127  add_av(
2128  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2129  const size_type size,
2130  const Number a,
2131  const ::MemorySpace::
2132  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2135  &data)
2136  {
2137  const int n_blocks = 1 + size / (chunk_size * block_size);
2138  ::LinearAlgebra::CUDAWrappers::kernel::add_aV<Number>
2139  <<<n_blocks, block_size>>>(data.values_dev.get(),
2140  a,
2141  v_data.values_dev.get(),
2142  size);
2143 
2144 # ifdef DEBUG
2145  // Check that the kernel was launched correctly
2146  AssertCuda(cudaGetLastError());
2147  // Check that there was no problem during the execution of the kernel
2148  AssertCuda(cudaDeviceSynchronize());
2149 # endif
2150  }
2151 
2152  static void
2153  add_avpbw(
2154  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2155  const size_type size,
2156  const Number a,
2157  const Number b,
2158  const ::MemorySpace::
2159  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2160  const ::MemorySpace::
2161  MemorySpaceData<Number, ::MemorySpace::CUDA> &w_data,
2164  &data)
2165  {
2166  const int n_blocks = 1 + size / (chunk_size * block_size);
2167  ::LinearAlgebra::CUDAWrappers::kernel::add_aVbW<Number>
2168  <<<dim3(n_blocks, 1), dim3(block_size)>>>(data.values_dev.get(),
2169  a,
2170  v_data.values_dev.get(),
2171  b,
2172  w_data.values_dev.get(),
2173  size);
2174 
2175 # ifdef DEBUG
2176  // Check that the kernel was launched correctly
2177  AssertCuda(cudaGetLastError());
2178  // Check that there was no problem during the execution of the kernel
2179  AssertCuda(cudaDeviceSynchronize());
2180 # endif
2181  }
2182 
2183  static void
2184  sadd_xv(
2185  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2186  const size_type size,
2187  const Number x,
2188  const ::MemorySpace::
2189  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2192  &data)
2193  {
2194  const int n_blocks = 1 + size / (chunk_size * block_size);
2195  ::LinearAlgebra::CUDAWrappers::kernel::sadd<Number>
2196  <<<dim3(n_blocks, 1), dim3(block_size)>>>(
2197  x, data.values_dev.get(), 1., v_data.values_dev.get(), size);
2198 
2199 # ifdef DEBUG
2200  // Check that the kernel was launched correctly
2201  AssertCuda(cudaGetLastError());
2202  // Check that there was no problem during the execution of the kernel
2203  AssertCuda(cudaDeviceSynchronize());
2204 # endif
2205  }
2206 
2207  static void
2208  sadd_xav(
2209  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2210  const size_type size,
2211  const Number x,
2212  const Number a,
2213  const ::MemorySpace::
2214  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2217  &data)
2218  {
2219  const int n_blocks = 1 + size / (chunk_size * block_size);
2220  ::LinearAlgebra::CUDAWrappers::kernel::sadd<Number>
2221  <<<dim3(n_blocks, 1), dim3(block_size)>>>(
2222  x, data.values_dev.get(), a, v_data.values_dev.get(), size);
2223 
2224 # ifdef DEBUG
2225  // Check that the kernel was launched correctly
2226  AssertCuda(cudaGetLastError());
2227  // Check that there was no problem during the execution of the kernel
2228  AssertCuda(cudaDeviceSynchronize());
2229 # endif
2230  }
2231 
2232  static void
2233  sadd_xavbw(
2234  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2235  const size_type size,
2236  const Number x,
2237  const Number a,
2238  const Number b,
2239  const ::MemorySpace::
2240  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2241  const ::MemorySpace::
2242  MemorySpaceData<Number, ::MemorySpace::CUDA> &w_data,
2245  &data)
2246  {
2247  const int n_blocks = 1 + size / (chunk_size * block_size);
2248  ::LinearAlgebra::CUDAWrappers::kernel::sadd<Number>
2249  <<<dim3(n_blocks, 1), dim3(block_size)>>>(x,
2250  data.values_dev.get(),
2251  a,
2252  v_data.values_dev.get(),
2253  b,
2254  w_data.values_dev.get(),
2255  size);
2256 
2257 # ifdef DEBUG
2258  // Check that the kernel was launched correctly
2259  AssertCuda(cudaGetLastError());
2260  // Check that there was no problem during the execution of the kernel
2261  AssertCuda(cudaDeviceSynchronize());
2262 # endif
2263  }
2264 
2265  static void
2266  multiply_factor(
2267  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2268  const size_type size,
2269  const Number factor,
2272  &data)
2273  {
2274  const int n_blocks = 1 + size / (chunk_size * block_size);
2275  ::LinearAlgebra::CUDAWrappers::kernel::vec_scale<Number>
2276  <<<n_blocks, block_size>>>(data.values_dev.get(), factor, size);
2277 
2278 # ifdef DEBUG
2279  // Check that the kernel was launched correctly
2280  AssertCuda(cudaGetLastError());
2281  // Check that there was no problem during the execution of the kernel
2282  AssertCuda(cudaDeviceSynchronize());
2283 # endif
2284  }
2285 
2286  static void
2287  scale(
2288  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2289  const size_type size,
2290  const ::MemorySpace::
2291  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2294  &data)
2295  {
2296  const int n_blocks = 1 + size / (chunk_size * block_size);
2297  ::LinearAlgebra::CUDAWrappers::kernel::scale<Number>
2298  <<<dim3(n_blocks, 1), dim3(block_size)>>>(data.values_dev.get(),
2299  v_data.values_dev.get(),
2300  size);
2301 
2302 # ifdef DEBUG
2303  // Check that the kernel was launched correctly
2304  AssertCuda(cudaGetLastError());
2305  // Check that there was no problem during the execution of the kernel
2306  AssertCuda(cudaDeviceSynchronize());
2307 # endif
2308  }
2309 
2310  static void
2311  equ_au(
2312  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2313  const size_type size,
2314  const Number a,
2315  const ::MemorySpace::
2316  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2319  &data)
2320  {
2321  const int n_blocks = 1 + size / (chunk_size * block_size);
2322  ::LinearAlgebra::CUDAWrappers::kernel::equ<Number>
2323  <<<dim3(n_blocks, 1), dim3(block_size)>>>(data.values_dev.get(),
2324  a,
2325  v_data.values_dev.get(),
2326  size);
2327 
2328 # ifdef DEBUG
2329  // Check that the kernel was launched correctly
2330  AssertCuda(cudaGetLastError());
2331  // Check that there was no problem during the execution of the kernel
2332  AssertCuda(cudaDeviceSynchronize());
2333 # endif
2334  }
2335 
2336  static void
2337  equ_aubv(
2338  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2339  const size_type size,
2340  const Number a,
2341  const Number b,
2342  const ::MemorySpace::
2343  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2344  const ::MemorySpace::
2345  MemorySpaceData<Number, ::MemorySpace::CUDA> &w_data,
2348  &data)
2349  {
2350  const int n_blocks = 1 + size / (chunk_size * block_size);
2351  ::LinearAlgebra::CUDAWrappers::kernel::equ<Number>
2352  <<<dim3(n_blocks, 1), dim3(block_size)>>>(data.values_dev.get(),
2353  a,
2354  v_data.values_dev.get(),
2355  b,
2356  w_data.values_dev.get(),
2357  size);
2358 
2359 # ifdef DEBUG
2360  // Check that the kernel was launched correctly
2361  AssertCuda(cudaGetLastError());
2362  // Check that there was no problem during the execution of the kernel
2363  AssertCuda(cudaDeviceSynchronize());
2364 # endif
2365  }
2366 
2367  static Number
2368  dot(const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2369  const size_type size,
2370  const ::MemorySpace::
2371  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2374  &data)
2375  {
2376  Number * result_device;
2377  cudaError_t error_code = cudaMalloc(&result_device, sizeof(Number));
2378  AssertCuda(error_code);
2379  error_code = cudaMemset(result_device, 0, sizeof(Number));
2380  AssertCuda(error_code);
2381 
2382  const int n_blocks = 1 + size / (chunk_size * block_size);
2384  Number,
2386  <<<dim3(n_blocks, 1), dim3(block_size)>>>(result_device,
2387  data.values_dev.get(),
2388  v_data.values_dev.get(),
2389  static_cast<unsigned int>(
2390  size));
2391 
2392 # ifdef DEBUG
2393  // Check that the kernel was launched correctly
2394  AssertCuda(cudaGetLastError());
2395  // Check that there was no problem during the execution of the kernel
2396  AssertCuda(cudaDeviceSynchronize());
2397 # endif
2398 
2399  // Copy the result back to the host
2400  Number result;
2401  error_code = cudaMemcpy(&result,
2402  result_device,
2403  sizeof(Number),
2404  cudaMemcpyDeviceToHost);
2405  AssertCuda(error_code);
2406  // Free the memory on the device
2407  error_code = cudaFree(result_device);
2408  AssertCuda(error_code);
2409 
2410  AssertIsFinite(result);
2411 
2412  return result;
2413  }
2414 
2415  template <typename real_type>
2416  static void
2417  norm_2(const std::shared_ptr<::parallel::internal::TBBPartitioner>
2418  & thread_loop_partitioner,
2419  const size_type size,
2420  real_type & sum,
2423  &data)
2424  {
2425  sum = dot(thread_loop_partitioner, size, data, data);
2426  }
2427 
2428  static Number
2429  mean_value(
2430  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2431  const size_type size,
2432  const ::MemorySpace::
2433  MemorySpaceData<Number, ::MemorySpace::CUDA> &data)
2434  {
2435  Number * result_device;
2436  cudaError_t error_code = cudaMalloc(&result_device, sizeof(Number));
2437  AssertCuda(error_code);
2438  error_code = cudaMemset(result_device, 0, sizeof(Number));
2439 
2440  const int n_blocks = 1 + size / (chunk_size * block_size);
2442  Number,
2444  <<<dim3(n_blocks, 1), dim3(block_size)>>>(result_device,
2445  data.values_dev.get(),
2446  size);
2447 
2448  // Copy the result back to the host
2449  Number result;
2450  error_code = cudaMemcpy(&result,
2451  result_device,
2452  sizeof(Number),
2453  cudaMemcpyDeviceToHost);
2454  AssertCuda(error_code);
2455  // Free the memory on the device
2456  error_code = cudaFree(result_device);
2457  AssertCuda(error_code);
2458 
2459  return result;
2460  }
2461 
2462  template <typename real_type>
2463  static void
2464  norm_1(
2465  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2466  const size_type size,
2467  real_type & sum,
2470  &data)
2471  {
2472  Number * result_device;
2473  cudaError_t error_code = cudaMalloc(&result_device, sizeof(Number));
2474  AssertCuda(error_code);
2475  error_code = cudaMemset(result_device, 0, sizeof(Number));
2476 
2477  const int n_blocks = 1 + size / (chunk_size * block_size);
2479  Number,
2481  <<<dim3(n_blocks, 1), dim3(block_size)>>>(result_device,
2482  data.values_dev.get(),
2483  size);
2484 
2485  // Copy the result back to the host
2486  error_code = cudaMemcpy(&sum,
2487  result_device,
2488  sizeof(Number),
2489  cudaMemcpyDeviceToHost);
2490  AssertCuda(error_code);
2491  // Free the memory on the device
2492  error_code = cudaFree(result_device);
2493  AssertCuda(error_code);
2494  }
2495 
2496  template <typename real_type>
2497  static void
2498  norm_p(
2499  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2500  const size_type,
2501  real_type &,
2502  real_type,
2504  ::MemorySpace::CUDA> &)
2505  {
2506  Assert(false, ExcNotImplemented());
2507  }
2508 
2509  static Number
2510  add_and_dot(
2511  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2512  const size_type size,
2513  const Number a,
2514  const ::MemorySpace::
2515  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2516  const ::MemorySpace::
2517  MemorySpaceData<Number, ::MemorySpace::CUDA> &w_data,
2520  &data)
2521  {
2522  Number * res_d;
2523  cudaError_t error_code = cudaMalloc(&res_d, sizeof(Number));
2524  AssertCuda(error_code);
2525  error_code = cudaMemset(res_d, 0, sizeof(Number));
2526  AssertCuda(error_code);
2527 
2528  const int n_blocks = 1 + size / (chunk_size * block_size);
2529  ::LinearAlgebra::CUDAWrappers::kernel::add_and_dot<Number>
2530  <<<dim3(n_blocks, 1), dim3(block_size)>>>(res_d,
2531  data.values_dev.get(),
2532  v_data.values_dev.get(),
2533  w_data.values_dev.get(),
2534  a,
2535  size);
2536 
2537  Number res;
2538  error_code =
2539  cudaMemcpy(&res, res_d, sizeof(Number), cudaMemcpyDeviceToHost);
2540  AssertCuda(error_code);
2541  error_code = cudaFree(res_d);
2542 
2543  return res;
2544  }
2545  };
2546 #endif
2547  } // namespace VectorOperations
2548 } // namespace internal
2549 
2550 DEAL_II_NAMESPACE_CLOSE
2551 
2552 #endif
void store(Number *ptr) const
__global__ void reduction(Number *result, const Number *v, const size_type N)
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1567
void operator()(const tbb::blocked_range< size_type > &range) const
__global__ void double_vector_reduction(Number *result, const Number *v1, const Number *v2, const size_type N)
void parallel_for(Iterator x_begin, Iterator x_end, const Functor &functor, const unsigned int grainsize)
Definition: parallel.h:156
static constexpr std::enable_if< std::is_same< Dummy, number >::value &&is_cuda_compatible< Dummy >::value, real_type >::type abs_square(const number &x)
Definition: numbers.h:513
#define AssertIndexRange(index, range)
Definition: exceptions.h:1637
__global__ void add_and_dot(Number *res, Number *v1, const Number *v2, const Number *v3, const Number a, const size_type N)
constexpr int block_size
Definition: cuda_size.h:29
static real_type abs(const number &x)
Definition: numbers.h:535
__global__ void scale(Number *val, const Number *V_val, const size_type N)
static ::ExceptionBase & ExcMessage(std::string arg1)
T sum(const T &t, const MPI_Comm &mpi_communicator)
#define Assert(cond, exc)
Definition: exceptions.h:1407
#define AssertCuda(error_code)
Definition: exceptions.h:1722
SymmetricTensor< 2, dim, Number > b(const Tensor< 2, dim, Number > &F)
Tensor< 2, dim, Number > w(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
unsigned int global_dof_index
Definition: types.h:89
void load(const Number *ptr)
constexpr int chunk_size
Definition: cuda_size.h:35
static ::ExceptionBase & ExcNotImplemented()
static unsigned int n_threads()
#define AssertIsFinite(number)
Definition: exceptions.h:1669
static ::ExceptionBase & ExcInternalError()