Reference documentation for deal.II version 9.0.0
vector_operations_internal.h
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2016 - 2018 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE at
12 // the top level of the deal.II distribution.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #ifndef dealii_vector_operations_internal_h
18 #define dealii_vector_operations_internal_h
19 
20 #include <deal.II/base/multithread_info.h>
21 #include <deal.II/base/parallel.h>
22 #include <deal.II/base/thread_management.h>
23 #include <deal.II/base/vectorization.h>
24 
25 #include <cstring>
26 #include <cstdio>
27 
28 DEAL_II_NAMESPACE_OPEN
29 
30 namespace internal
31 {
32  namespace VectorOperations
33  {
35 
36  template <typename T>
37  bool is_non_negative (const T &t)
38  {
39  return t >= 0;
40  }
41 
42 
43  template <typename T>
44  bool is_non_negative (const std::complex<T> &)
45  {
46  Assert (false,
47  ExcMessage ("Complex numbers do not have an ordering."));
48 
49  return false;
50  }
51 
52 
53  template <typename T>
54  void print (const T &t,
55  const char *format)
56  {
57  if (format != nullptr)
58  std::printf (format, t);
59  else
60  std::printf (" %5.2f", double(t));
61  }
62 
63 
64 
65  template <typename T>
66  void print (const std::complex<T> &t,
67  const char *format)
68  {
69  if (format != nullptr)
70  std::printf (format, t.real(), t.imag());
71  else
72  std::printf (" %5.2f+%5.2fi",
73  double(t.real()), double(t.imag()));
74  }
75 
76  // call std::copy, except for in
77  // the case where we want to copy
78  // from std::complex to a
79  // non-complex type
80  template <typename T, typename U>
81  void copy (const T *begin,
82  const T *end,
83  U *dest)
84  {
85  std::copy (begin, end, dest);
86  }
87 
88  template <typename T, typename U>
89  void copy (const std::complex<T> *begin,
90  const std::complex<T> *end,
91  std::complex<U> *dest)
92  {
93  std::copy (begin, end, dest);
94  }
95 
96  template <typename T, typename U>
97  void copy (const std::complex<T> *,
98  const std::complex<T> *,
99  U *)
100  {
101  Assert (false, ExcMessage ("Can't convert a vector of complex numbers "
102  "into a vector of reals/doubles"));
103  }
104 
105 
106 
107 #ifdef DEAL_II_WITH_THREADS
108 
116  template <typename Functor>
118  {
119  TBBForFunctor(Functor &functor,
120  const size_type start,
121  const size_type end)
122  :
123  functor(functor),
124  start(start),
125  end(end)
126  {
127  const size_type vec_size = end-start;
128  // set chunk size for sub-tasks
129  const unsigned int gs = internal::VectorImplementation::minimum_parallel_grain_size;
130  n_chunks = std::min(static_cast<size_type>(4*MultithreadInfo::n_threads()),
131  vec_size / gs);
132  chunk_size = vec_size / n_chunks;
133 
134  // round to next multiple of 512 (or minimum grain size if that happens
135  // to be smaller). this is advantageous because our accumulation
136  // algorithms favor lengths of a power of 2 due to pairwise summation ->
137  // at most one 'oddly' sized chunk
138  if (chunk_size > 512)
139  chunk_size = ((chunk_size + 511)/512)*512;
140  n_chunks = (vec_size + chunk_size - 1) / chunk_size;
141  AssertIndexRange((n_chunks-1)*chunk_size, vec_size);
142  AssertIndexRange(vec_size, n_chunks*chunk_size+1);
143  }
144 
145  void operator() (const tbb::blocked_range<size_type> &range) const
146  {
147  const size_type r_begin = start + range.begin()*chunk_size;
148  const size_type r_end = std::min(start + range.end()*chunk_size, end);
149  functor(r_begin, r_end);
150  }
151 
152  Functor &functor;
153  const size_type start;
154  const size_type end;
155  unsigned int n_chunks;
156  size_type chunk_size;
157  };
158 #endif
159 
160  template <typename Functor>
161  void parallel_for(Functor &functor,
162  size_type start,
163  size_type end,
164  std::shared_ptr<parallel::internal::TBBPartitioner> &partitioner)
165  {
166 #ifdef DEAL_II_WITH_THREADS
167  size_type vec_size = end-start;
168  // only go to the parallel function in case there are at least 4 parallel
169  // items, otherwise the overhead is too large
170  if (vec_size >= 4*internal::VectorImplementation::minimum_parallel_grain_size &&
172  {
173  Assert(partitioner.get() != nullptr,
174  ExcInternalError("Unexpected initialization of Vector that does "
175  "not set the TBB partitioner to a usable state."));
176  std::shared_ptr<tbb::affinity_partitioner> tbb_partitioner =
177  partitioner->acquire_one_partitioner();
178 
179  TBBForFunctor<Functor> generic_functor(functor, start, end);
180  tbb::parallel_for (tbb::blocked_range<size_type> (0,
181  generic_functor.n_chunks,
182  1),
183  generic_functor,
184  *tbb_partitioner);
185  partitioner->release_one_partitioner(tbb_partitioner);
186  }
187  else if (vec_size > 0)
188  functor(start,end);
189 #else
190  functor(start,end);
191  (void)partitioner;
192 #endif
193  }
194 
195 
196  // Define the functors necessary to use SIMD with TBB. we also include the
197  // simple copy and set operations
198 
199  template <typename Number>
200  struct Vector_set
201  {
202  Vector_set(Number value, Number *dst)
203  :
204  value(value),
205  dst(dst)
206  {
207  Assert(dst != nullptr, ExcInternalError());
208  }
209 
210  void operator() (const size_type begin, const size_type end) const
211  {
212  Assert (end>=begin, ExcInternalError());
213 
214  if (value == Number())
215  {
216 #ifdef DEAL_II_WITH_CXX17
217  if constexpr (std::is_trivial<Number>::value)
218 #else
219  if (std::is_trivial<Number>::value)
220 #endif
221  {
222  std::memset(dst+begin, 0, sizeof(Number)*(end-begin));
223  return;
224  }
225  }
226  std::fill (dst+begin, dst+end, value);
227  }
228 
229  const Number value;
230  Number *const dst;
231  };
232 
233  template <typename Number, typename OtherNumber>
234  struct Vector_copy
235  {
236  Vector_copy(const OtherNumber *src, Number *dst)
237  :
238  src(src),
239  dst(dst)
240  {
241  Assert(src!=nullptr, ExcInternalError());
242  Assert(dst!=nullptr, ExcInternalError());
243  }
244 
245  void operator() (const size_type begin, const size_type end) const
246  {
247  Assert (end>=begin, ExcInternalError());
248 
249 #if __GNUG__ && __GNUC__ < 5
250  if ( __has_trivial_copy(Number) && std::is_same<Number,OtherNumber>::value)
251 #else
252 # ifdef DEAL_II_WITH_CXX17
253  if constexpr (std::is_trivially_copyable<Number>() && std::is_same<Number,OtherNumber>::value)
254 # else
255  if (std::is_trivially_copyable<Number>() && std::is_same<Number,OtherNumber>::value)
256 # endif
257 #endif
258  std::memcpy(dst+begin, src+begin, (end-begin)*sizeof(Number));
259  else
260  {
261  DEAL_II_OPENMP_SIMD_PRAGMA
262  for (size_type i=begin; i<end; ++i)
263  dst[i] = src[i];
264  }
265  }
266 
267  const OtherNumber *const src;
268  Number *const dst;
269  };
270 
271  template <typename Number>
272  struct Vectorization_multiply_factor
273  {
274  Vectorization_multiply_factor(Number *val, Number factor)
275  :
276  val(val),
277  factor(factor)
278  {}
279 
280  void operator() (const size_type begin, const size_type end) const
281  {
283  {
284  DEAL_II_OPENMP_SIMD_PRAGMA
285  for (size_type i=begin; i<end; ++i)
286  val[i] *= factor;
287  }
288  else
289  {
290  for (size_type i=begin; i<end; ++i)
291  val[i] *= factor;
292  }
293  }
294 
295  Number *val;
296  Number factor;
297  };
298 
299  template <typename Number>
300  struct Vectorization_add_av
301  {
302  Vectorization_add_av(Number *val, Number *v_val, Number factor)
303  :
304  val(val),
305  v_val(v_val),
306  factor(factor)
307  {}
308 
309  void operator() (const size_type begin, const size_type end) const
310  {
312  {
313  DEAL_II_OPENMP_SIMD_PRAGMA
314  for (size_type i=begin; i<end; ++i)
315  val[i] += factor*v_val[i];
316  }
317  else
318  {
319  for (size_type i=begin; i<end; ++i)
320  val[i] += factor*v_val[i];
321  }
322  }
323 
324  Number *val;
325  Number *v_val;
326  Number factor;
327  };
328 
329  template <typename Number>
330  struct Vectorization_sadd_xav
331  {
332  Vectorization_sadd_xav(Number *val, Number *v_val, Number a, Number x)
333  :
334  val(val),
335  v_val(v_val),
336  a(a),
337  x(x)
338  {}
339 
340  void operator() (const size_type begin, const size_type end) const
341  {
343  {
344  DEAL_II_OPENMP_SIMD_PRAGMA
345  for (size_type i=begin; i<end; ++i)
346  val[i] = x*val[i] + a*v_val[i];
347  }
348  else
349  {
350  for (size_type i=begin; i<end; ++i)
351  val[i] = x*val[i] + a*v_val[i];
352  }
353  }
354 
355  Number *val;
356  Number *v_val;
357  Number a;
358  Number x;
359  };
360 
361  template <typename Number>
362  struct Vectorization_subtract_v
363  {
364  Vectorization_subtract_v(Number *val, Number *v_val)
365  :
366  val(val),
367  v_val(v_val)
368  {}
369 
370  void operator() (const size_type begin, const size_type end) const
371  {
373  {
374  DEAL_II_OPENMP_SIMD_PRAGMA
375  for (size_type i=begin; i<end; ++i)
376  val[i] -= v_val[i];
377  }
378  else
379  {
380  for (size_type i=begin; i<end; ++i)
381  val[i] -= v_val[i];
382  }
383  }
384 
385  Number *val;
386  Number *v_val;
387  };
388 
389  template <typename Number>
390  struct Vectorization_add_factor
391  {
392  Vectorization_add_factor(Number *val, Number factor)
393  :
394  val(val),
395  factor(factor)
396  {}
397 
398  void operator() (const size_type begin, const size_type end) const
399  {
401  {
402  DEAL_II_OPENMP_SIMD_PRAGMA
403  for (size_type i=begin; i<end; ++i)
404  val[i] += factor;
405  }
406  else
407  {
408  for (size_type i=begin; i<end; ++i)
409  val[i] += factor;
410  }
411  }
412 
413  Number *val;
414  Number factor;
415  };
416 
417  template <typename Number>
418  struct Vectorization_add_v
419  {
420  Vectorization_add_v(Number *val, Number *v_val)
421  :
422  val(val),
423  v_val(v_val)
424  {}
425 
426  void operator() (const size_type begin, const size_type end) const
427  {
429  {
430  DEAL_II_OPENMP_SIMD_PRAGMA
431  for (size_type i=begin; i<end; ++i)
432  val[i] += v_val[i];
433  }
434  else
435  {
436  for (size_type i=begin; i<end; ++i)
437  val[i] += v_val[i];
438  }
439  }
440 
441  Number *val;
442  Number *v_val;
443  };
444 
445  template <typename Number>
446  struct Vectorization_add_avpbw
447  {
448  Vectorization_add_avpbw(Number *val, Number *v_val, Number *w_val, Number a, Number b)
449  :
450  val(val),
451  v_val(v_val),
452  w_val(w_val),
453  a(a),
454  b(b)
455  {}
456 
457  void operator() (const size_type begin, const size_type end) const
458  {
460  {
461  DEAL_II_OPENMP_SIMD_PRAGMA
462  for (size_type i=begin; i<end; ++i)
463  val[i] = val[i] + a*v_val[i] + b*w_val[i];
464  }
465  else
466  {
467  for (size_type i=begin; i<end; ++i)
468  val[i] = val[i] + a*v_val[i] + b*w_val[i];
469  }
470  }
471 
472  Number *val;
473  Number *v_val;
474  Number *w_val;
475  Number a;
476  Number b;
477  };
478 
479  template <typename Number>
480  struct Vectorization_sadd_xv
481  {
482  Vectorization_sadd_xv(Number *val, Number *v_val, Number x)
483  :
484  val(val),
485  v_val(v_val),
486  x(x)
487  {}
488 
489  void operator() (const size_type begin, const size_type end) const
490  {
492  {
493  DEAL_II_OPENMP_SIMD_PRAGMA
494  for (size_type i=begin; i<end; ++i)
495  val[i] = x*val[i] + v_val[i];
496  }
497  else
498  {
499  for (size_type i=begin; i<end; ++i)
500  val[i] = x*val[i] + v_val[i];
501  }
502  }
503 
504  Number *val;
505  Number *v_val;
506  Number x;
507  };
508 
509  template <typename Number>
510  struct Vectorization_sadd_xavbw
511  {
512  Vectorization_sadd_xavbw(Number *val, Number *v_val, Number *w_val,
513  Number x, Number a, Number b)
514  :
515  val(val),
516  v_val(v_val),
517  w_val(w_val),
518  x(x),
519  a(a),
520  b(b)
521  {}
522 
523  void operator() (const size_type begin, const size_type end) const
524  {
526  {
527  DEAL_II_OPENMP_SIMD_PRAGMA
528  for (size_type i=begin; i<end; ++i)
529  val[i] = x*val[i] + a*v_val[i] + b*w_val[i];
530  }
531  else
532  {
533  for (size_type i=begin; i<end; ++i)
534  val[i] = x*val[i] + a*v_val[i] + b*w_val[i];
535  }
536  }
537 
538  Number *val;
539  Number *v_val;
540  Number *w_val;
541  Number x;
542  Number a;
543  Number b;
544  };
545 
546  template <typename Number>
547  struct Vectorization_scale
548  {
549  Vectorization_scale(Number *val, Number *v_val)
550  :
551  val(val),
552  v_val(v_val)
553  {}
554 
555  void operator() (const size_type begin, const size_type end) const
556  {
558  {
559  DEAL_II_OPENMP_SIMD_PRAGMA
560  for (size_type i=begin; i<end; ++i)
561  val[i] *= v_val[i];
562  }
563  else
564  {
565  for (size_type i=begin; i<end; ++i)
566  val[i] *= v_val[i];
567  }
568  }
569 
570  Number *val;
571  Number *v_val;
572  };
573 
574  template <typename Number>
575  struct Vectorization_equ_au
576  {
577  Vectorization_equ_au(Number *val, Number *u_val, Number a)
578  :
579  val(val),
580  u_val(u_val),
581  a(a)
582  {}
583 
584  void operator() (const size_type begin, const size_type end) const
585  {
587  {
588  DEAL_II_OPENMP_SIMD_PRAGMA
589  for (size_type i=begin; i<end; ++i)
590  val[i] = a*u_val[i];
591  }
592  else
593  {
594  for (size_type i=begin; i<end; ++i)
595  val[i] = a*u_val[i];
596  }
597  }
598 
599  Number *val;
600  Number *u_val;
601  Number a;
602  };
603 
604  template <typename Number>
605  struct Vectorization_equ_aubv
606  {
607  Vectorization_equ_aubv(Number *val, Number *u_val, Number *v_val,
608  Number a, Number b)
609  :
610  val(val),
611  u_val(u_val),
612  v_val(v_val),
613  a(a),
614  b(b)
615  {}
616 
617  void operator() (const size_type begin, const size_type end) const
618  {
620  {
621  DEAL_II_OPENMP_SIMD_PRAGMA
622  for (size_type i=begin; i<end; ++i)
623  val[i] = a*u_val[i] + b*v_val[i];
624  }
625  else
626  {
627  for (size_type i=begin; i<end; ++i)
628  val[i] = a*u_val[i] + b*v_val[i];
629  }
630  }
631 
632  Number *val;
633  Number *u_val;
634  Number *v_val;
635  Number a;
636  Number b;
637  };
638 
639  template <typename Number>
640  struct Vectorization_equ_aubvcw
641  {
642  Vectorization_equ_aubvcw(Number *val, Number *u_val, Number *v_val,
643  Number *w_val, Number a, Number b, Number c)
644  :
645  val(val),
646  u_val(u_val),
647  v_val(v_val),
648  w_val(w_val),
649  a(a),
650  b(b),
651  c(c)
652  {}
653 
654  void operator() (const size_type begin, const size_type end) const
655  {
657  {
658  DEAL_II_OPENMP_SIMD_PRAGMA
659  for (size_type i=begin; i<end; ++i)
660  val[i] = a*u_val[i] + b*v_val[i] + c*w_val[i];
661  }
662  else
663  {
664  for (size_type i=begin; i<end; ++i)
665  val[i] = a*u_val[i] + b*v_val[i] + c*w_val[i];
666  }
667  }
668 
669  Number *val;
670  Number *u_val;
671  Number *v_val;
672  Number *w_val;
673  Number a;
674  Number b;
675  Number c;
676  };
677 
678  template <typename Number>
679  struct Vectorization_ratio
680  {
681  Vectorization_ratio(Number *val, Number *a_val, Number *b_val)
682  :
683  val(val),
684  a_val(a_val),
685  b_val(b_val)
686  {}
687 
688  void operator() (const size_type begin, const size_type end) const
689  {
691  {
692  DEAL_II_OPENMP_SIMD_PRAGMA
693  for (size_type i=begin; i<end; ++i)
694  val[i] = a_val[i]/b_val[i];
695  }
696  else
697  {
698  for (size_type i=begin; i<end; ++i)
699  val[i] = a_val[i]/b_val[i];
700  }
701  }
702 
703  Number *val;
704  Number *a_val;
705  Number *b_val;
706  };
707 
708 
709 
710  // All sums over all the vector entries (l2-norm, inner product, etc.) are
711  // performed with the same code, using a templated operation defined
712  // here. There are always two versions defined, a standard one that covers
713  // most cases and a vectorized one which is only for equal types and float
714  // and double.
715  template <typename Number, typename Number2>
716  struct Dot
717  {
718  static const bool vectorizes = std::is_same<Number,Number2>::value &&
720 
721  Dot(const Number *X, const Number2 *Y)
722  :
723  X(X),
724  Y(Y)
725  {}
726 
727  Number
728  operator() (const size_type i) const
729  {
730  return X[i] * Number(numbers::NumberTraits<Number2>::conjugate(Y[i]));
731  }
732 
734  do_vectorized(const size_type i) const
735  {
737  x.load(X+i);
738  y.load(Y+i);
739 
740  // the following operation in VectorizedArray does an element-wise
741  // scalar product without taking into account complex values and
742  // the need to take the complex-conjugate of one argument. this
743  // may be a bug, but because all VectorizedArray classes only
744  // work on real scalars, it doesn't really matter very much.
745  // in any case, assert that we really don't get here for
746  // complex-valued objects
747  static_assert (numbers::NumberTraits<Number>::is_complex == false,
748  "This operation is not correctly implemented for "
749  "complex-valued objects.");
750  return x * y;
751  }
752 
753  const Number *X;
754  const Number2 *Y;
755  };
756 
757  template <typename Number, typename RealType>
758  struct Norm2
759  {
760  static const bool vectorizes = VectorizedArray<Number>::n_array_elements > 1;
761 
762  Norm2(const Number *X)
763  :
764  X(X)
765  {}
766 
767  RealType
768  operator() (const size_type i) const
769  {
771  }
772 
774  do_vectorized(const size_type i) const
775  {
777  x.load(X+i);
778  return x * x;
779  }
780 
781  const Number *X;
782  };
783 
784  template <typename Number, typename RealType>
785  struct Norm1
786  {
787  static const bool vectorizes = VectorizedArray<Number>::n_array_elements > 1;
788 
789  Norm1(const Number *X)
790  :
791  X(X)
792  {}
793 
794  RealType
795  operator() (const size_type i) const
796  {
798  }
799 
801  do_vectorized(const size_type i) const
802  {
804  x.load(X+i);
805  return std::abs(x);
806  }
807 
808  const Number *X;
809  };
810 
811  template <typename Number, typename RealType>
812  struct NormP
813  {
814  static const bool vectorizes = VectorizedArray<Number>::n_array_elements > 1;
815 
816  NormP(const Number *X, RealType p)
817  :
818  X(X),
819  p(p)
820  {}
821 
822  RealType
823  operator() (const size_type i) const
824  {
825  return std::pow(numbers::NumberTraits<Number>::abs(X[i]), p);
826  }
827 
829  do_vectorized(const size_type i) const
830  {
832  x.load(X+i);
833  return std::pow(std::abs(x),p);
834  }
835 
836  const Number *X;
837  RealType p;
838  };
839 
840  template <typename Number>
841  struct MeanValue
842  {
843  static const bool vectorizes = VectorizedArray<Number>::n_array_elements > 1;
844 
845  MeanValue(const Number *X)
846  :
847  X(X)
848  {}
849 
850  Number
851  operator() (const size_type i) const
852  {
853  return X[i];
854  }
855 
857  do_vectorized(const size_type i) const
858  {
860  x.load(X+i);
861  return x;
862  }
863 
864  const Number *X;
865  };
866 
867  template <typename Number>
868  struct AddAndDot
869  {
870  static const bool vectorizes = VectorizedArray<Number>::n_array_elements > 1;
871 
872  AddAndDot(Number *X, const Number *V, const Number *W, Number a)
873  :
874  X(X),
875  V(V),
876  W(W),
877  a(a)
878  {}
879 
880  Number
881  operator() (const size_type i) const
882  {
883  X[i] += a * V[i];
884  return X[i] * Number(numbers::NumberTraits<Number>::conjugate(W[i]));
885  }
886 
888  do_vectorized(const size_type i) const
889  {
891  x.load(X+i);
892  v.load(V+i);
893  x += a * v;
894  x.store(X+i);
895  // may only load from W after storing in X because the pointers might
896  // point to the same memory
897  w.load(W+i);
898 
899  // the following operation in VectorizedArray does an element-wise
900  // scalar product without taking into account complex values and
901  // the need to take the complex-conjugate of one argument. this
902  // may be a bug, but because all VectorizedArray classes only
903  // work on real scalars, it doesn't really matter very much.
904  // in any case, assert that we really don't get here for
905  // complex-valued objects
906  static_assert (numbers::NumberTraits<Number>::is_complex == false,
907  "This operation is not correctly implemented for "
908  "complex-valued objects.");
909  return x * w;
910  }
911 
912  Number *X;
913  const Number *V, *W;
914  Number a;
915  };
916 
917 
918 
919  // this is the main working loop for all vector sums using the templated
920  // operation above. it accumulates the sums using a block-wise summation
921  // algorithm with post-update. this blocked algorithm has been proposed in
922  // a similar form by Castaldo, Whaley and Chronopoulos (SIAM
923  // J. Sci. Comput. 31, 1156-1174, 2008) and we use the smallest possible
924  // block size, 2. Sometimes it is referred to as pairwise summation. The
925  // worst case error made by this algorithm is on the order O(eps *
926  // log2(vec_size)), whereas a naive summation is O(eps * vec_size). Even
927  // though the Kahan summation is even more accurate with an error O(eps)
928  // by carrying along remainders not captured by the main sum, that involves
929  // additional costs which are not worthwhile. See the Wikipedia article on
930  // the Kahan summation algorithm.
931 
932  // The algorithm implemented here has the additional benefit that it is
933  // easily parallelized without changing the order of how the elements are
934  // added (floating point addition is not associative). For the same vector
935  // size and minimum_parallel_grainsize, the blocks are always the
936  // same and added pairwise.
937 
938  // The depth of recursion is controlled by the 'magic' parameter
939  // vector_accumulation_recursion_threshold: If the length is below
940  // vector_accumulation_recursion_threshold * 32 (32 is the part of code we
941  // unroll), a straight loop instead of recursion will be used. At the
942  // innermost level, eight values are added consecutively in order to better
943  // balance multiplications and additions.
944 
945  // Loops are unrolled as follows: the range [first,last) is broken into
946  // @p n_chunks each of size 32 plus the @p remainder.
947  // accumulate_regular() does the work on 32*n_chunks elements employing SIMD
948  // if possible and stores the result of the operation for each chunk in @p outer_results.
949 
950  // The code returns the result as the last argument in order to make
951  // spawning tasks simpler and use automatic template deduction.
952 
953 
958  const unsigned int vector_accumulation_recursion_threshold = 128;
959 
960  template <typename Operation, typename ResultType>
961  void accumulate_recursive (const Operation &op,
962  const size_type first,
963  const size_type last,
964  ResultType &result)
965  {
966  const size_type vec_size = last - first;
967  if (vec_size <= vector_accumulation_recursion_threshold * 32)
968  {
969  // the vector is short enough so we perform the summation. first
970  // work on the regular part. The innermost 32 values are expanded in
971  // order to obtain known loop bounds for most of the work.
972  size_type index = first;
973  ResultType outer_results [vector_accumulation_recursion_threshold];
974 
975  // set the zeroth element to zero to correctly handle the case where
976  // vec_size == 0
977  outer_results[0] = ResultType();
978 
979  // the variable serves two purposes: (i) number of chunks (each 32 indices)
980  // for the given size; all results are stored in outer_results[0,n_chunks)
981  // (ii) in the SIMD case n_chunks is also a next free index in outer_results[]
982  // to which we can write after accumulate_regular() is executed.
983  size_type n_chunks = vec_size / 32;
984  const size_type remainder = vec_size % 32;
985  Assert (remainder == 0 || n_chunks < vector_accumulation_recursion_threshold,
986  ExcInternalError());
987 
988  // Select between the regular version and vectorized version based
989  // on the number types we are given. To choose the vectorized
990  // version often enough, we need to have all tasks but the last one
991  // to be divisible by the vectorization length
992  accumulate_regular(op, n_chunks, index, outer_results,
993  std::integral_constant<bool, Operation::vectorizes>());
994 
995  // now work on the remainder, i.e., the last up to 32 values. Use
996  // switch statement with fall-through to work on these values.
997  if (remainder > 0)
998  {
999  // if we got here, it means that (vec_size <= vector_accumulation_recursion_threshold * 32),
1000  // which is to say that the domain can be split into n_chunks <= vector_accumulation_recursion_threshold:
1001  AssertIndexRange(n_chunks, vector_accumulation_recursion_threshold+1);
1002  // split the remainder into chunks of 8, there could be up to 3
1003  // such chunks since remainder < 32.
1004  // Work on those chunks without any SIMD, that is we call op(index).
1005  const size_type inner_chunks = remainder / 8;
1006  Assert (inner_chunks <= 3, ExcInternalError());
1007  const size_type remainder_inner = remainder % 8;
1008  ResultType r0 = ResultType(), r1 = ResultType(),
1009  r2 = ResultType();
1010  switch (inner_chunks)
1011  {
1012  case 3:
1013  r2 = op(index++);
1014  for (size_type j=1; j<8; ++j)
1015  r2 += op(index++);
1016  DEAL_II_FALLTHROUGH;
1017  case 2:
1018  r1 = op(index++);
1019  for (size_type j=1; j<8; ++j)
1020  r1 += op(index++);
1021  r1 += r2;
1022  DEAL_II_FALLTHROUGH;
1023  case 1:
1024  r2 = op(index++);
1025  for (size_type j=1; j<8; ++j)
1026  r2 += op(index++);
1027  DEAL_II_FALLTHROUGH;
1028  default:
1029  for (size_type j=0; j<remainder_inner; ++j)
1030  r0 += op(index++);
1031  r0 += r2;
1032  r0 += r1;
1033  if (n_chunks == vector_accumulation_recursion_threshold)
1034  outer_results[vector_accumulation_recursion_threshold-1] += r0;
1035  else
1036  {
1037  outer_results[n_chunks] = r0;
1038  n_chunks++;
1039  }
1040  break;
1041  }
1042  }
1043  // make sure we worked through all indices
1044  AssertDimension(index, last);
1045 
1046  // now sum the results from the chunks stored in outer_results[0,n_chunks)
1047  // recursively
1048  while (n_chunks > 1)
1049  {
1050  if (n_chunks % 2 == 1)
1051  outer_results[n_chunks++] = ResultType();
1052  for (size_type i=0; i<n_chunks; i+=2)
1053  outer_results[i/2] = outer_results[i] + outer_results[i+1];
1054  n_chunks /= 2;
1055  }
1056  result = outer_results[0];
1057  }
1058  else
1059  {
1060  // split vector into four pieces and work on the pieces
1061  // recursively. Make pieces (except last) divisible by one fourth the
1062  // recursion threshold.
1063  const size_type new_size =
1064  (vec_size / (vector_accumulation_recursion_threshold * 32)) *
1065  vector_accumulation_recursion_threshold * 8;
1066  Assert (first+3*new_size < last,
1067  ExcInternalError());
1068  ResultType r0, r1, r2, r3;
1069  accumulate_recursive (op, first, first+new_size, r0);
1070  accumulate_recursive (op, first+new_size, first+2*new_size, r1);
1071  accumulate_recursive (op, first+2*new_size, first+3*new_size, r2);
1072  accumulate_recursive (op, first+3*new_size, last, r3);
1073  r0 += r1;
1074  r2 += r3;
1075  result = r0 + r2;
1076  }
1077  }
1078 
1079 
1080  // this is the inner working routine for the accumulation loops
1081  // below. This is the standard case where the loop bounds are known. We
1082  // pulled this function out of the regular accumulate routine because we
1083  // might do this thing vectorized (see specialized function below)
1084  template <typename Operation, typename ResultType>
1085  void
1086  accumulate_regular(const Operation &op,
1087  size_type &n_chunks,
1088  size_type &index,
1089  ResultType (&outer_results)[vector_accumulation_recursion_threshold],
1090  std::integral_constant<bool, false>)
1091  {
1092  // note that each chunk is chosen to have a width of 32, thereby the index
1093  // is incremented by 4*8 for each @p i.
1094  for (size_type i=0; i<n_chunks; ++i)
1095  {
1096  ResultType r0 = op(index);
1097  ResultType r1 = op(index+1);
1098  ResultType r2 = op(index+2);
1099  ResultType r3 = op(index+3);
1100  index += 4;
1101  for (size_type j=1; j<8; ++j, index += 4)
1102  {
1103  r0 += op(index);
1104  r1 += op(index+1);
1105  r2 += op(index+2);
1106  r3 += op(index+3);
1107  }
1108  r0 += r1;
1109  r2 += r3;
1110  outer_results[i] = r0 + r2;
1111  }
1112  }
1113 
1114 
1115 
1116  // this is the inner working routine for the accumulation loops
1117  // below. This is the specialized case where the loop bounds are known and
1118  // where we can vectorize. In that case, we request the 'do_vectorized'
1119  // routine of the operation instead of the regular one which does several
1120  // operations at once.
1121  template <typename Operation, typename Number>
1122  void
1123  accumulate_regular(const Operation &op,
1124  size_type &n_chunks,
1125  size_type &index,
1126  Number (&outer_results)[vector_accumulation_recursion_threshold],
1127  std::integral_constant<bool, true>)
1128  {
1129  // we start from @p index and workout @p n_chunks each of size 32.
1130  // in order employ SIMD and work on @p nvecs at a time, we split this
1131  // loop yet again:
1132  // First we work on (n_chunks/nvecs) chunks, where each chunk processes
1133  // nvecs*(4*8) elements.
1134 
1135  const unsigned int nvecs = VectorizedArray<Number>::n_array_elements;
1136  const size_type regular_chunks = n_chunks/nvecs;
1137  for (size_type i=0; i<regular_chunks; ++i)
1138  {
1139  VectorizedArray<Number> r0 = op.do_vectorized(index);
1140  VectorizedArray<Number> r1 = op.do_vectorized(index+nvecs);
1141  VectorizedArray<Number> r2 = op.do_vectorized(index+2*nvecs);
1142  VectorizedArray<Number> r3 = op.do_vectorized(index+3*nvecs);
1143  index += nvecs*4;
1144  for (size_type j=1; j<8; ++j, index += nvecs*4)
1145  {
1146  r0 += op.do_vectorized(index);
1147  r1 += op.do_vectorized(index+nvecs);
1148  r2 += op.do_vectorized(index+2*nvecs);
1149  r3 += op.do_vectorized(index+3*nvecs);
1150  }
1151  r0 += r1;
1152  r2 += r3;
1153  r0 += r2;
1154  r0.store(&outer_results[i*VectorizedArray<Number>::n_array_elements]);
1155  }
1156 
1157  // If we are treating a case where the vector length is not divisible by
1158  // the vectorization length, need a cleanup loop
1159  // The remaining chunks are processed one by one starting from regular_chunks * nvecs;
1160  // We do as much as possible with 2 SIMD operations within each chunk.
1161  // Here we assume that nvecs < 32/2 = 16 as well as 16%nvecs==0.
1163  17);
1164  Assert (16 % nvecs == 0,
1165  ExcInternalError());
1166  if (n_chunks % VectorizedArray<Number>::n_array_elements != 0)
1167  {
1169  r1 = VectorizedArray<Number>();
1170  const size_type start_irreg = regular_chunks * nvecs;
1171  for (size_type c=start_irreg; c<n_chunks; ++c)
1172  for (size_type j=0; j<32; j+=2*nvecs, index+=2*nvecs)
1173  {
1174  r0 += op.do_vectorized(index);
1175  r1 += op.do_vectorized(index+nvecs);
1176  }
1177  r0 += r1;
1178  r0.store(&outer_results[start_irreg]);
1179  // update n_chunks to denote unused element in outer_results[] from
1180  // which we can keep writing.
1181  n_chunks = start_irreg + VectorizedArray<Number>::n_array_elements;
1182  }
1183  }
1184 
1185 
1186 
1187 #ifdef DEAL_II_WITH_THREADS
1188 
1216  template <typename Operation, typename ResultType>
1218  {
1219  static const unsigned int threshold_array_allocate = 512;
1220 
1221  TBBReduceFunctor(const Operation &op,
1222  const size_type start,
1223  const size_type end)
1224  :
1225  op(op),
1226  start(start),
1227  end(end)
1228  {
1229  const size_type vec_size = end-start;
1230  // set chunk size for sub-tasks
1231  const unsigned int gs = internal::VectorImplementation::minimum_parallel_grain_size;
1232  n_chunks = std::min(static_cast<size_type>(4*MultithreadInfo::n_threads()),
1233  vec_size / gs);
1234  chunk_size = vec_size / n_chunks;
1235 
1236  // round to next multiple of 512 (or leave it at the minimum grain size
1237  // if that happens to be smaller). this is advantageous because our
1238  // algorithm favors lengths of a power of 2 due to pairwise summation ->
1239  // at most one 'oddly' sized chunk
1240  if (chunk_size > 512)
1241  chunk_size = ((chunk_size + 511)/512)*512;
1242  n_chunks = (vec_size + chunk_size - 1) / chunk_size;
1243  AssertIndexRange((n_chunks-1)*chunk_size, vec_size);
1244  AssertIndexRange(vec_size, n_chunks*chunk_size+1);
1245 
1246  if (n_chunks > threshold_array_allocate)
1247  {
1248  // make sure we allocate an even number of elements,
1249  // access to the new last element is needed in do_sum()
1250  large_array.resize(2*((n_chunks+1)/2));
1251  array_ptr = large_array.data();
1252  }
1253  else
1254  array_ptr = &small_array[0];
1255  }
1256 
1261  void operator() (const tbb::blocked_range<size_type> &range) const
1262  {
1263  for (size_type i = range.begin(); i < range.end(); ++i)
1264  accumulate_recursive(op, start+i*chunk_size, std::min(start+(i+1)*chunk_size, end),
1265  array_ptr[i]);
1266  }
1267 
1268  ResultType do_sum() const
1269  {
1270  while (n_chunks > 1)
1271  {
1272  if (n_chunks % 2 == 1)
1273  array_ptr[n_chunks++] = ResultType();
1274  for (size_type i=0; i<n_chunks; i+=2)
1275  array_ptr[i/2] = array_ptr[i] + array_ptr[i+1];
1276  n_chunks /= 2;
1277  }
1278  return array_ptr[0];
1279  }
1280 
1281  const Operation &op;
1282  const size_type start;
1283  const size_type end;
1284 
1285  mutable unsigned int n_chunks;
1286  unsigned int chunk_size;
1287  ResultType small_array [threshold_array_allocate];
1288  std::vector<ResultType> large_array;
1289  // this variable either points to small_array or large_array depending on
1290  // the number of threads we want to feed
1291  mutable ResultType *array_ptr;
1292  };
1293 #endif
1294 
1295 
1296 
1301  template <typename Operation, typename ResultType>
1302  void parallel_reduce (const Operation &op,
1303  const size_type start,
1304  const size_type end,
1305  ResultType &result,
1306  std::shared_ptr<parallel::internal::TBBPartitioner> &partitioner)
1307  {
1308 #ifdef DEAL_II_WITH_THREADS
1309  size_type vec_size = end-start;
1310  // only go to the parallel function in case there are at least 4 parallel
1311  // items, otherwise the overhead is too large
1312  if (vec_size >= 4*internal::VectorImplementation::minimum_parallel_grain_size &&
1314  {
1315  Assert(partitioner.get() != nullptr,
1316  ExcInternalError("Unexpected initialization of Vector that does "
1317  "not set the TBB partitioner to a usable state."));
1318  std::shared_ptr<tbb::affinity_partitioner> tbb_partitioner =
1319  partitioner->acquire_one_partitioner();
1320 
1321  TBBReduceFunctor<Operation,ResultType> generic_functor(op, start, end);
1322  tbb::parallel_for (tbb::blocked_range<size_type> (0,
1323  generic_functor.n_chunks,
1324  1),
1325  generic_functor,
1326  *tbb_partitioner);
1327  partitioner->release_one_partitioner(tbb_partitioner);
1328  result = generic_functor.do_sum();
1329  }
1330  else
1331  accumulate_recursive(op,start,end,result);
1332 #else
1333  accumulate_recursive(op,start,end,result);
1334  (void)partitioner;
1335 #endif
1336  }
1337  }
1338 }
1339 
1340 DEAL_II_NAMESPACE_CLOSE
1341 
1342 #endif
void store(Number *ptr) const
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1248
void operator()(const tbb::blocked_range< size_type > &range) const
#define AssertIndexRange(index, range)
Definition: exceptions.h:1284
static real_type abs(const number &x)
Definition: numbers.h:465
static ::ExceptionBase & ExcMessage(std::string arg1)
unsigned int global_dof_index
Definition: types.h:88
static real_type abs_square(const number &x)
Definition: numbers.h:456
#define Assert(cond, exc)
Definition: exceptions.h:1142
SymmetricTensor< 2, dim, Number > b(const Tensor< 2, dim, Number > &F)
Tensor< 2, dim, Number > w(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
void load(const Number *ptr)
static unsigned int n_threads()
static ::ExceptionBase & ExcInternalError()