Loading [MathJax]/extensions/TeX/AMSsymbols.js
 Reference documentation for deal.II version 8.5.1
All Classes Namespaces Functions Variables Typedefs Enumerations Enumerator Friends Modules Pages
vector_operations_internal.h
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2016 - 2017 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE at
12 // the top level of the deal.II distribution.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #ifndef dealii__vector_operations_internal_h
18 #define dealii__vector_operations_internal_h
19 
20 #include <deal.II/base/multithread_info.h>
21 #include <deal.II/base/parallel.h>
22 #include <deal.II/base/thread_management.h>
23 #include <deal.II/base/vectorization.h>
24 
25 #include <cstring>
26 #include <cstdio>
27 
28 DEAL_II_NAMESPACE_OPEN
29 
30 namespace internal
31 {
32  namespace VectorOperations
33  {
35 
36  template <typename T>
37  bool is_non_negative (const T &t)
38  {
39  return t >= 0;
40  }
41 
42 
43  template <typename T>
44  bool is_non_negative (const std::complex<T> &)
45  {
46  Assert (false,
47  ExcMessage ("Complex numbers do not have an ordering."));
48 
49  return false;
50  }
51 
52 
53  template <typename T>
54  void print (const T &t,
55  const char *format)
56  {
57  if (format != 0)
58  std::printf (format, t);
59  else
60  std::printf (" %5.2f", double(t));
61  }
62 
63 
64 
65  template <typename T>
66  void print (const std::complex<T> &t,
67  const char *format)
68  {
69  if (format != 0)
70  std::printf (format, t.real(), t.imag());
71  else
72  std::printf (" %5.2f+%5.2fi",
73  double(t.real()), double(t.imag()));
74  }
75 
76  // call std::copy, except for in
77  // the case where we want to copy
78  // from std::complex to a
79  // non-complex type
80  template <typename T, typename U>
81  void copy (const T *begin,
82  const T *end,
83  U *dest)
84  {
85  std::copy (begin, end, dest);
86  }
87 
88  template <typename T, typename U>
89  void copy (const std::complex<T> *begin,
90  const std::complex<T> *end,
91  std::complex<U> *dest)
92  {
93  std::copy (begin, end, dest);
94  }
95 
96  template <typename T, typename U>
97  void copy (const std::complex<T> *,
98  const std::complex<T> *,
99  U *)
100  {
101  Assert (false, ExcMessage ("Can't convert a vector of complex numbers "
102  "into a vector of reals/doubles"));
103  }
104 
105 
106 
107 #ifdef DEAL_II_WITH_THREADS
108 
116  template <typename Functor>
118  {
119  TBBForFunctor(Functor &functor,
120  const size_type start,
121  const size_type end)
122  :
123  functor(functor),
124  start(start),
125  end(end)
126  {
127  const size_type vec_size = end-start;
128  // set chunk size for sub-tasks
129  const unsigned int gs = internal::Vector::minimum_parallel_grain_size;
130  n_chunks = std::min(static_cast<size_type>(4*MultithreadInfo::n_threads()),
131  vec_size / gs);
132  chunk_size = vec_size / n_chunks;
133 
134  // round to next multiple of 512 (or minimum grain size if that happens
135  // to be smaller). this is advantageous because our accumulation
136  // algorithms favor lengths of a power of 2 due to pairwise summation ->
137  // at most one 'oddly' sized chunk
138  if (chunk_size > 512)
139  chunk_size = ((chunk_size + 511)/512)*512;
140  n_chunks = (vec_size + chunk_size - 1) / chunk_size;
141  AssertIndexRange((n_chunks-1)*chunk_size, vec_size);
142  AssertIndexRange(vec_size, n_chunks*chunk_size+1);
143  };
144 
145  void operator() (const tbb::blocked_range<size_type> &range) const
146  {
147  const size_type r_begin = start + range.begin()*chunk_size;
148  const size_type r_end = std::min(start + range.end()*chunk_size, end);
149  functor(r_begin, r_end);
150  }
151 
152  Functor &functor;
153  const size_type start;
154  const size_type end;
155  unsigned int n_chunks;
156  size_type chunk_size;
157  };
158 #endif
159 
160  template <typename Functor>
161  void parallel_for(Functor &functor,
162  size_type start,
163  size_type end,
164  std_cxx11::shared_ptr<parallel::internal::TBBPartitioner> &partitioner)
165  {
166 #ifdef DEAL_II_WITH_THREADS
167  size_type vec_size = end-start;
168  // only go to the parallel function in case there are at least 4 parallel
169  // items, otherwise the overhead is too large
170  if (vec_size >= 4*internal::Vector::minimum_parallel_grain_size &&
172  {
173  Assert(partitioner.get() != NULL,
174  ExcInternalError("Unexpected initialization of Vector that does "
175  "not set the TBB partitioner to a usable state."));
176  std_cxx11::shared_ptr<tbb::affinity_partitioner> tbb_partitioner =
177  partitioner->acquire_one_partitioner();
178 
179  TBBForFunctor<Functor> generic_functor(functor, start, end);
180  tbb::parallel_for (tbb::blocked_range<size_type> (0,
181  generic_functor.n_chunks,
182  1),
183  generic_functor,
184  *tbb_partitioner);
185  partitioner->release_one_partitioner(tbb_partitioner);
186  }
187  else if (vec_size > 0)
188  functor(start,end);
189 #else
190  functor(start,end);
191  (void)partitioner;
192 #endif
193  }
194 
195 
196  // Define the functors necessary to use SIMD with TBB. we also include the
197  // simple copy and set operations
198 
199  template <typename Number>
200  struct Vector_set
201  {
202  Vector_set(Number value, Number *dst)
203  :
204  value(value),
205  dst(dst)
206  {}
207 
208  void operator() (const size_type begin, const size_type end) const
209  {
210  if (value == Number())
211  std::memset (dst+begin,0,(end-begin)*sizeof(Number));
212  else
213  std::fill (dst+begin, dst+end, value);
214  }
215 
216  Number value;
217  Number *dst;
218  };
219 
220  template <typename Number, typename OtherNumber>
221  struct Vector_copy
222  {
223  Vector_copy(const OtherNumber *src, Number *dst)
224  :
225  src(src),
226  dst(dst)
227  {}
228 
229  void operator() (const size_type begin, const size_type end) const
230  {
232  std::memcpy(dst+begin, src+begin, (end-begin)*sizeof(Number));
233  else
234  {
235  DEAL_II_OPENMP_SIMD_PRAGMA
236  for (size_type i=begin; i<end; ++i)
237  dst[i] = src[i];
238  }
239  }
240 
241  const OtherNumber *src;
242  Number *dst;
243  };
244 
245  template <typename Number>
246  struct Vectorization_multiply_factor
247  {
248  Vectorization_multiply_factor(Number *val, Number factor)
249  :
250  val(val),
251  factor(factor)
252  {}
253 
254  void operator() (const size_type begin, const size_type end) const
255  {
257  {
258  DEAL_II_OPENMP_SIMD_PRAGMA
259  for (size_type i=begin; i<end; ++i)
260  val[i] *= factor;
261  }
262  else
263  {
264  for (size_type i=begin; i<end; ++i)
265  val[i] *= factor;
266  }
267  }
268 
269  Number *val;
270  Number factor;
271  };
272 
273  template <typename Number>
274  struct Vectorization_add_av
275  {
276  Vectorization_add_av(Number *val, Number *v_val, Number factor)
277  :
278  val(val),
279  v_val(v_val),
280  factor(factor)
281  {}
282 
283  void operator() (const size_type begin, const size_type end) const
284  {
286  {
287  DEAL_II_OPENMP_SIMD_PRAGMA
288  for (size_type i=begin; i<end; ++i)
289  val[i] += factor*v_val[i];
290  }
291  else
292  {
293  for (size_type i=begin; i<end; ++i)
294  val[i] += factor*v_val[i];
295  }
296  }
297 
298  Number *val;
299  Number *v_val;
300  Number factor;
301  };
302 
303  template <typename Number>
304  struct Vectorization_sadd_xav
305  {
306  Vectorization_sadd_xav(Number *val, Number *v_val, Number a, Number x)
307  :
308  val(val),
309  v_val(v_val),
310  a(a),
311  x(x)
312  {}
313 
314  void operator() (const size_type begin, const size_type end) const
315  {
317  {
318  DEAL_II_OPENMP_SIMD_PRAGMA
319  for (size_type i=begin; i<end; ++i)
320  val[i] = x*val[i] + a*v_val[i];
321  }
322  else
323  {
324  for (size_type i=begin; i<end; ++i)
325  val[i] = x*val[i] + a*v_val[i];
326  }
327  }
328 
329  Number *val;
330  Number *v_val;
331  Number a;
332  Number x;
333  };
334 
335  template <typename Number>
336  struct Vectorization_subtract_v
337  {
338  Vectorization_subtract_v(Number *val, Number *v_val)
339  :
340  val(val),
341  v_val(v_val)
342  {}
343 
344  void operator() (const size_type begin, const size_type end) const
345  {
347  {
348  DEAL_II_OPENMP_SIMD_PRAGMA
349  for (size_type i=begin; i<end; ++i)
350  val[i] -= v_val[i];
351  }
352  else
353  {
354  for (size_type i=begin; i<end; ++i)
355  val[i] -= v_val[i];
356  }
357  }
358 
359  Number *val;
360  Number *v_val;
361  };
362 
363  template <typename Number>
364  struct Vectorization_add_factor
365  {
366  Vectorization_add_factor(Number *val, Number factor)
367  :
368  val(val),
369  factor(factor)
370  {}
371 
372  void operator() (const size_type begin, const size_type end) const
373  {
375  {
376  DEAL_II_OPENMP_SIMD_PRAGMA
377  for (size_type i=begin; i<end; ++i)
378  val[i] += factor;
379  }
380  else
381  {
382  for (size_type i=begin; i<end; ++i)
383  val[i] += factor;
384  }
385  }
386 
387  Number *val;
388  Number factor;
389  };
390 
391  template <typename Number>
392  struct Vectorization_add_v
393  {
394  Vectorization_add_v(Number *val, Number *v_val)
395  :
396  val(val),
397  v_val(v_val)
398  {}
399 
400  void operator() (const size_type begin, const size_type end) const
401  {
403  {
404  DEAL_II_OPENMP_SIMD_PRAGMA
405  for (size_type i=begin; i<end; ++i)
406  val[i] += v_val[i];
407  }
408  else
409  {
410  for (size_type i=begin; i<end; ++i)
411  val[i] += v_val[i];
412  }
413  }
414 
415  Number *val;
416  Number *v_val;
417  };
418 
419  template <typename Number>
420  struct Vectorization_add_avpbw
421  {
422  Vectorization_add_avpbw(Number *val, Number *v_val, Number *w_val, Number a, Number b)
423  :
424  val(val),
425  v_val(v_val),
426  w_val(w_val),
427  a(a),
428  b(b)
429  {}
430 
431  void operator() (const size_type begin, const size_type end) const
432  {
434  {
435  DEAL_II_OPENMP_SIMD_PRAGMA
436  for (size_type i=begin; i<end; ++i)
437  val[i] = val[i] + a*v_val[i] + b*w_val[i];
438  }
439  else
440  {
441  for (size_type i=begin; i<end; ++i)
442  val[i] = val[i] + a*v_val[i] + b*w_val[i];
443  }
444  }
445 
446  Number *val;
447  Number *v_val;
448  Number *w_val;
449  Number a;
450  Number b;
451  };
452 
453  template <typename Number>
454  struct Vectorization_sadd_xv
455  {
456  Vectorization_sadd_xv(Number *val, Number *v_val, Number x)
457  :
458  val(val),
459  v_val(v_val),
460  x(x)
461  {}
462 
463  void operator() (const size_type begin, const size_type end) const
464  {
466  {
467  DEAL_II_OPENMP_SIMD_PRAGMA
468  for (size_type i=begin; i<end; ++i)
469  val[i] = x*val[i] + v_val[i];
470  }
471  else
472  {
473  for (size_type i=begin; i<end; ++i)
474  val[i] = x*val[i] + v_val[i];
475  }
476  }
477 
478  Number *val;
479  Number *v_val;
480  Number x;
481  };
482 
483  template <typename Number>
484  struct Vectorization_sadd_xavbw
485  {
486  Vectorization_sadd_xavbw(Number *val, Number *v_val, Number *w_val,
487  Number x, Number a, Number b)
488  :
489  val(val),
490  v_val(v_val),
491  w_val(w_val),
492  x(x),
493  a(a),
494  b(b)
495  {}
496 
497  void operator() (const size_type begin, const size_type end) const
498  {
500  {
501  DEAL_II_OPENMP_SIMD_PRAGMA
502  for (size_type i=begin; i<end; ++i)
503  val[i] = x*val[i] + a*v_val[i] + b*w_val[i];
504  }
505  else
506  {
507  for (size_type i=begin; i<end; ++i)
508  val[i] = x*val[i] + a*v_val[i] + b*w_val[i];
509  }
510  }
511 
512  Number *val;
513  Number *v_val;
514  Number *w_val;
515  Number x;
516  Number a;
517  Number b;
518  };
519 
520  template <typename Number>
521  struct Vectorization_scale
522  {
523  Vectorization_scale(Number *val, Number *v_val)
524  :
525  val(val),
526  v_val(v_val)
527  {}
528 
529  void operator() (const size_type begin, const size_type end) const
530  {
532  {
533  DEAL_II_OPENMP_SIMD_PRAGMA
534  for (size_type i=begin; i<end; ++i)
535  val[i] *= v_val[i];
536  }
537  else
538  {
539  for (size_type i=begin; i<end; ++i)
540  val[i] *= v_val[i];
541  }
542  }
543 
544  Number *val;
545  Number *v_val;
546  };
547 
548  template <typename Number>
549  struct Vectorization_equ_au
550  {
551  Vectorization_equ_au(Number *val, Number *u_val, Number a)
552  :
553  val(val),
554  u_val(u_val),
555  a(a)
556  {}
557 
558  void operator() (const size_type begin, const size_type end) const
559  {
561  {
562  DEAL_II_OPENMP_SIMD_PRAGMA
563  for (size_type i=begin; i<end; ++i)
564  val[i] = a*u_val[i];
565  }
566  else
567  {
568  for (size_type i=begin; i<end; ++i)
569  val[i] = a*u_val[i];
570  }
571  }
572 
573  Number *val;
574  Number *u_val;
575  Number a;
576  };
577 
578  template <typename Number>
579  struct Vectorization_equ_aubv
580  {
581  Vectorization_equ_aubv(Number *val, Number *u_val, Number *v_val,
582  Number a, Number b)
583  :
584  val(val),
585  u_val(u_val),
586  v_val(v_val),
587  a(a),
588  b(b)
589  {}
590 
591  void operator() (const size_type begin, const size_type end) const
592  {
594  {
595  DEAL_II_OPENMP_SIMD_PRAGMA
596  for (size_type i=begin; i<end; ++i)
597  val[i] = a*u_val[i] + b*v_val[i];
598  }
599  else
600  {
601  for (size_type i=begin; i<end; ++i)
602  val[i] = a*u_val[i] + b*v_val[i];
603  }
604  }
605 
606  Number *val;
607  Number *u_val;
608  Number *v_val;
609  Number a;
610  Number b;
611  };
612 
613  template <typename Number>
614  struct Vectorization_equ_aubvcw
615  {
616  Vectorization_equ_aubvcw(Number *val, Number *u_val, Number *v_val,
617  Number *w_val, Number a, Number b, Number c)
618  :
619  val(val),
620  u_val(u_val),
621  v_val(v_val),
622  w_val(w_val),
623  a(a),
624  b(b),
625  c(c)
626  {}
627 
628  void operator() (const size_type begin, const size_type end) const
629  {
631  {
632  DEAL_II_OPENMP_SIMD_PRAGMA
633  for (size_type i=begin; i<end; ++i)
634  val[i] = a*u_val[i] + b*v_val[i] + c*w_val[i];
635  }
636  else
637  {
638  for (size_type i=begin; i<end; ++i)
639  val[i] = a*u_val[i] + b*v_val[i] + c*w_val[i];
640  }
641  }
642 
643  Number *val;
644  Number *u_val;
645  Number *v_val;
646  Number *w_val;
647  Number a;
648  Number b;
649  Number c;
650  };
651 
652  template <typename Number>
653  struct Vectorization_ratio
654  {
655  Vectorization_ratio(Number *val, Number *a_val, Number *b_val)
656  :
657  val(val),
658  a_val(a_val),
659  b_val(b_val)
660  {}
661 
662  void operator() (const size_type begin, const size_type end) const
663  {
665  {
666  DEAL_II_OPENMP_SIMD_PRAGMA
667  for (size_type i=begin; i<end; ++i)
668  val[i] = a_val[i]/b_val[i];
669  }
670  else
671  {
672  for (size_type i=begin; i<end; ++i)
673  val[i] = a_val[i]/b_val[i];
674  }
675  }
676 
677  Number *val;
678  Number *a_val;
679  Number *b_val;
680  };
681 
682 
683 
684  // All sums over all the vector entries (l2-norm, inner product, etc.) are
685  // performed with the same code, using a templated operation defined
686  // here. There are always two versions defined, a standard one that covers
687  // most cases and a vectorized one which is only for equal types and float
688  // and double.
689  template <typename Number, typename Number2>
690  struct Dot
691  {
692  static const bool vectorizes = types_are_equal<Number,Number2>::value &&
694 
695  Dot(const Number *X, const Number2 *Y)
696  :
697  X(X),
698  Y(Y)
699  {}
700 
701  Number
702  operator() (const size_type i) const
703  {
704  return X[i] * Number(numbers::NumberTraits<Number2>::conjugate(Y[i]));
705  }
706 
708  do_vectorized(const size_type i) const
709  {
711  x.load(X+i);
712  y.load(Y+i);
713  return x * y;
714  }
715 
716  const Number *X;
717  const Number2 *Y;
718  };
719 
720  template <typename Number, typename RealType>
721  struct Norm2
722  {
723  static const bool vectorizes = VectorizedArray<Number>::n_array_elements > 1;
724 
725  Norm2(const Number *X)
726  :
727  X(X)
728  {}
729 
730  RealType
731  operator() (const size_type i) const
732  {
734  }
735 
737  do_vectorized(const size_type i) const
738  {
740  x.load(X+i);
741  return x * x;
742  }
743 
744  const Number *X;
745  };
746 
747  template <typename Number, typename RealType>
748  struct Norm1
749  {
750  static const bool vectorizes = VectorizedArray<Number>::n_array_elements > 1;
751 
752  Norm1(const Number *X)
753  :
754  X(X)
755  {}
756 
757  RealType
758  operator() (const size_type i) const
759  {
761  }
762 
764  do_vectorized(const size_type i) const
765  {
767  x.load(X+i);
768  return std::abs(x);
769  }
770 
771  const Number *X;
772  };
773 
774  template <typename Number, typename RealType>
775  struct NormP
776  {
777  static const bool vectorizes = VectorizedArray<Number>::n_array_elements > 1;
778 
779  NormP(const Number *X, RealType p)
780  :
781  X(X),
782  p(p)
783  {}
784 
785  RealType
786  operator() (const size_type i) const
787  {
788  return std::pow(numbers::NumberTraits<Number>::abs(X[i]), p);
789  }
790 
792  do_vectorized(const size_type i) const
793  {
795  x.load(X+i);
796  return std::pow(std::abs(x),p);
797  }
798 
799  const Number *X;
800  RealType p;
801  };
802 
803  template <typename Number>
804  struct MeanValue
805  {
806  static const bool vectorizes = VectorizedArray<Number>::n_array_elements > 1;
807 
808  MeanValue(const Number *X)
809  :
810  X(X)
811  {}
812 
813  Number
814  operator() (const size_type i) const
815  {
816  return X[i];
817  }
818 
820  do_vectorized(const size_type i) const
821  {
823  x.load(X+i);
824  return x;
825  }
826 
827  const Number *X;
828  };
829 
830  template <typename Number>
831  struct AddAndDot
832  {
833  static const bool vectorizes = VectorizedArray<Number>::n_array_elements > 1;
834 
835  AddAndDot(Number *X, const Number *V, const Number *W, Number a)
836  :
837  X(X),
838  V(V),
839  W(W),
840  a(a)
841  {}
842 
843  Number
844  operator() (const size_type i) const
845  {
846  X[i] += a * V[i];
847  return X[i] * Number(numbers::NumberTraits<Number>::conjugate(W[i]));
848  }
849 
851  do_vectorized(const size_type i) const
852  {
854  x.load(X+i);
855  v.load(V+i);
856  x += a * v;
857  x.store(X+i);
858  // may only load from W after storing in X because the pointers might
859  // point to the same memory
860  w.load(W+i);
861  return x * w;
862  }
863 
864  Number *X;
865  const Number *V, *W;
866  Number a;
867  };
868 
869 
870 
871  // this is the main working loop for all vector sums using the templated
872  // operation above. it accumulates the sums using a block-wise summation
873  // algorithm with post-update. this blocked algorithm has been proposed in
874  // a similar form by Castaldo, Whaley and Chronopoulos (SIAM
875  // J. Sci. Comput. 31, 1156-1174, 2008) and we use the smallest possible
876  // block size, 2. Sometimes it is referred to as pairwise summation. The
877  // worst case error made by this algorithm is on the order O(eps *
878  // log2(vec_size)), whereas a naive summation is O(eps * vec_size). Even
879  // though the Kahan summation is even more accurate with an error O(eps)
880  // by carrying along remainders not captured by the main sum, that involves
881  // additional costs which are not worthwhile. See the Wikipedia article on
882  // the Kahan summation algorithm.
883 
884  // The algorithm implemented here has the additional benefit that it is
885  // easily parallelized without changing the order of how the elements are
886  // added (floating point addition is not associative). For the same vector
887  // size and minimum_parallel_grainsize, the blocks are always the
888  // same and added pairwise.
889 
890  // The depth of recursion is controlled by the 'magic' parameter
891  // vector_accumulation_recursion_threshold: If the length is below
892  // vector_accumulation_recursion_threshold * 32 (32 is the part of code we
893  // unroll), a straight loop instead of recursion will be used. At the
894  // innermost level, eight values are added consecutively in order to better
895  // balance multiplications and additions.
896 
897  // Loops are unrolled as follows: the range [first,last) is broken into
898  // @p n_chunks each of size 32 plus the @p remainder.
899  // accumulate_regular() does the work on 32*n_chunks elements employing SIMD
900  // if possible and stores the result of the operation for each chunk in @p outer_results.
901 
902  // The code returns the result as the last argument in order to make
903  // spawning tasks simpler and use automatic template deduction.
904 
905 
910  const unsigned int vector_accumulation_recursion_threshold = 128;
911 
912  template <typename Operation, typename ResultType>
913  void accumulate_recursive (const Operation &op,
914  const size_type first,
915  const size_type last,
916  ResultType &result)
917  {
918  const size_type vec_size = last - first;
919  if (vec_size <= vector_accumulation_recursion_threshold * 32)
920  {
921  // the vector is short enough so we perform the summation. first
922  // work on the regular part. The innermost 32 values are expanded in
923  // order to obtain known loop bounds for most of the work.
924  size_type index = first;
925  ResultType outer_results [vector_accumulation_recursion_threshold];
926 
927  // set the zeroth element to zero to correctly handle the case where
928  // vec_size == 0
929  outer_results[0] = ResultType();
930 
931  // the variable serves two purposes: (i) number of chunks (each 32 indices)
932  // for the given size; all results are stored in outer_results[0,n_chunks)
933  // (ii) in the SIMD case n_chunks is also a next free index in outer_results[]
934  // to which we can write after accumulate_regular() is executed.
935  size_type n_chunks = vec_size / 32;
936  const size_type remainder = vec_size % 32;
937  Assert (remainder == 0 || n_chunks < vector_accumulation_recursion_threshold,
938  ExcInternalError());
939 
940  // Select between the regular version and vectorized version based
941  // on the number types we are given. To choose the vectorized
942  // version often enough, we need to have all tasks but the last one
943  // to be divisible by the vectorization length
944  accumulate_regular(op, n_chunks, index, outer_results,
946 
947  // now work on the remainder, i.e., the last up to 32 values. Use
948  // switch statement with fall-through to work on these values.
949  if (remainder > 0)
950  {
951  // if we got here, it means that (vec_size <= vector_accumulation_recursion_threshold * 32),
952  // which is to say that the domain can be split into n_chunks <= vector_accumulation_recursion_threshold:
953  AssertIndexRange(n_chunks, vector_accumulation_recursion_threshold+1);
954  // split the remainder into chunks of 8, there could be up to 3
955  // such chunks since remainder < 32.
956  // Work on those chunks without any SIMD, that is we call op(index).
957  const size_type inner_chunks = remainder / 8;
958  Assert (inner_chunks <= 3, ExcInternalError());
959  const size_type remainder_inner = remainder % 8;
960  ResultType r0 = ResultType(), r1 = ResultType(),
961  r2 = ResultType();
962  switch (inner_chunks)
963  {
964  case 3:
965  r2 = op(index++);
966  for (size_type j=1; j<8; ++j)
967  r2 += op(index++);
968  // no break
969  case 2:
970  r1 = op(index++);
971  for (size_type j=1; j<8; ++j)
972  r1 += op(index++);
973  r1 += r2;
974  // no break
975  case 1:
976  r2 = op(index++);
977  for (size_type j=1; j<8; ++j)
978  r2 += op(index++);
979  // no break
980  default:
981  for (size_type j=0; j<remainder_inner; ++j)
982  r0 += op(index++);
983  r0 += r2;
984  r0 += r1;
985  if (n_chunks == vector_accumulation_recursion_threshold)
986  outer_results[vector_accumulation_recursion_threshold-1] += r0;
987  else
988  {
989  outer_results[n_chunks] = r0;
990  n_chunks++;
991  }
992  break;
993  }
994  }
995  // make sure we worked through all indices
996  AssertDimension(index, last);
997 
998  // now sum the results from the chunks stored in outer_results[0,n_chunks)
999  // recursively
1000  while (n_chunks > 1)
1001  {
1002  if (n_chunks % 2 == 1)
1003  outer_results[n_chunks++] = ResultType();
1004  for (size_type i=0; i<n_chunks; i+=2)
1005  outer_results[i/2] = outer_results[i] + outer_results[i+1];
1006  n_chunks /= 2;
1007  }
1008  result = outer_results[0];
1009  }
1010  else
1011  {
1012  // split vector into four pieces and work on the pieces
1013  // recursively. Make pieces (except last) divisible by one fourth the
1014  // recursion threshold.
1015  const size_type new_size =
1016  (vec_size / (vector_accumulation_recursion_threshold * 32)) *
1017  vector_accumulation_recursion_threshold * 8;
1018  Assert (first+3*new_size < last,
1019  ExcInternalError());
1020  ResultType r0, r1, r2, r3;
1021  accumulate_recursive (op, first, first+new_size, r0);
1022  accumulate_recursive (op, first+new_size, first+2*new_size, r1);
1023  accumulate_recursive (op, first+2*new_size, first+3*new_size, r2);
1024  accumulate_recursive (op, first+3*new_size, last, r3);
1025  r0 += r1;
1026  r2 += r3;
1027  result = r0 + r2;
1028  }
1029  }
1030 
1031 
1032  // this is the inner working routine for the accumulation loops
1033  // below. This is the standard case where the loop bounds are known. We
1034  // pulled this function out of the regular accumulate routine because we
1035  // might do this thing vectorized (see specialized function below)
1036  template <typename Operation, typename ResultType>
1037  void
1038  accumulate_regular(const Operation &op,
1039  size_type &n_chunks,
1040  size_type &index,
1041  ResultType (&outer_results)[vector_accumulation_recursion_threshold],
1043  {
1044  // note that each chunk is chosen to have a width of 32, thereby the index
1045  // is incremented by 4*8 for each @p i.
1046  for (size_type i=0; i<n_chunks; ++i)
1047  {
1048  ResultType r0 = op(index);
1049  ResultType r1 = op(index+1);
1050  ResultType r2 = op(index+2);
1051  ResultType r3 = op(index+3);
1052  index += 4;
1053  for (size_type j=1; j<8; ++j, index += 4)
1054  {
1055  r0 += op(index);
1056  r1 += op(index+1);
1057  r2 += op(index+2);
1058  r3 += op(index+3);
1059  }
1060  r0 += r1;
1061  r2 += r3;
1062  outer_results[i] = r0 + r2;
1063  }
1064  }
1065 
1066 
1067 
1068  // this is the inner working routine for the accumulation loops
1069  // below. This is the specialized case where the loop bounds are known and
1070  // where we can vectorize. In that case, we request the 'do_vectorized'
1071  // routine of the operation instead of the regular one which does several
1072  // operations at once.
1073  template <typename Operation, typename Number>
1074  void
1075  accumulate_regular(const Operation &op,
1076  size_type &n_chunks,
1077  size_type &index,
1078  Number (&outer_results)[vector_accumulation_recursion_threshold],
1080  {
1081  // we start from @p index and workout @p n_chunks each of size 32.
1082  // in order employ SIMD and work on @p nvecs at a time, we split this
1083  // loop yet again:
1084  // First we work on (n_chunks/nvecs) chunks, where each chunk processes
1085  // nvecs*(4*8) elements.
1086 
1087  const unsigned int nvecs = VectorizedArray<Number>::n_array_elements;
1088  const size_type regular_chunks = n_chunks/nvecs;
1089  for (size_type i=0; i<regular_chunks; ++i)
1090  {
1091  VectorizedArray<Number> r0 = op.do_vectorized(index);
1092  VectorizedArray<Number> r1 = op.do_vectorized(index+nvecs);
1093  VectorizedArray<Number> r2 = op.do_vectorized(index+2*nvecs);
1094  VectorizedArray<Number> r3 = op.do_vectorized(index+3*nvecs);
1095  index += nvecs*4;
1096  for (size_type j=1; j<8; ++j, index += nvecs*4)
1097  {
1098  r0 += op.do_vectorized(index);
1099  r1 += op.do_vectorized(index+nvecs);
1100  r2 += op.do_vectorized(index+2*nvecs);
1101  r3 += op.do_vectorized(index+3*nvecs);
1102  }
1103  r0 += r1;
1104  r2 += r3;
1105  r0 += r2;
1106  r0.store(&outer_results[i*VectorizedArray<Number>::n_array_elements]);
1107  }
1108 
1109  // If we are treating a case where the vector length is not divisible by
1110  // the vectorization length, need a cleanup loop
1111  // The remaining chunks are processed one by one starting from regular_chunks * nvecs;
1112  // We do as much as possible with 2 SIMD operations within each chunk.
1113  // Here we assume that nvecs < 32/2 = 16 as well as 16%nvecs==0.
1115  17);
1116  Assert (16 % nvecs == 0,
1117  ExcInternalError());
1118  if (n_chunks % VectorizedArray<Number>::n_array_elements != 0)
1119  {
1121  r1 = VectorizedArray<Number>();
1122  const size_type start_irreg = regular_chunks * nvecs;
1123  for (size_type c=start_irreg; c<n_chunks; ++c)
1124  for (size_type j=0; j<32; j+=2*nvecs, index+=2*nvecs)
1125  {
1126  r0 += op.do_vectorized(index);
1127  r1 += op.do_vectorized(index+nvecs);
1128  }
1129  r0 += r1;
1130  r0.store(&outer_results[start_irreg]);
1131  // update n_chunks to denote unused element in outer_results[] from
1132  // which we can keep writing.
1133  n_chunks = start_irreg + VectorizedArray<Number>::n_array_elements;
1134  }
1135  }
1136 
1137 
1138 
1139 #ifdef DEAL_II_WITH_THREADS
1140 
1168  template <typename Operation, typename ResultType>
1170  {
1171  static const unsigned int threshold_array_allocate = 512;
1172 
1173  TBBReduceFunctor(const Operation &op,
1174  const size_type start,
1175  const size_type end)
1176  :
1177  op(op),
1178  start(start),
1179  end(end)
1180  {
1181  const size_type vec_size = end-start;
1182  // set chunk size for sub-tasks
1183  const unsigned int gs = internal::Vector::minimum_parallel_grain_size;
1184  n_chunks = std::min(static_cast<size_type>(4*MultithreadInfo::n_threads()),
1185  vec_size / gs);
1186  chunk_size = vec_size / n_chunks;
1187 
1188  // round to next multiple of 512 (or leave it at the minimum grain size
1189  // if that happens to be smaller). this is advantageous because our
1190  // algorithm favors lengths of a power of 2 due to pairwise summation ->
1191  // at most one 'oddly' sized chunk
1192  if (chunk_size > 512)
1193  chunk_size = ((chunk_size + 511)/512)*512;
1194  n_chunks = (vec_size + chunk_size - 1) / chunk_size;
1195  AssertIndexRange((n_chunks-1)*chunk_size, vec_size);
1196  AssertIndexRange(vec_size, n_chunks*chunk_size+1);
1197 
1198  if (n_chunks > threshold_array_allocate)
1199  {
1200  // make sure we allocate an even number of elements,
1201  // access to the new last element is needed in do_sum()
1202  large_array.resize(2*((n_chunks+1)/2));
1203  array_ptr = &large_array[0];
1204  }
1205  else
1206  array_ptr = &small_array[0];
1207  };
1208 
1213  void operator() (const tbb::blocked_range<size_type> &range) const
1214  {
1215  for (size_type i = range.begin(); i < range.end(); ++i)
1216  accumulate_recursive(op, start+i*chunk_size, std::min(start+(i+1)*chunk_size, end),
1217  array_ptr[i]);
1218  }
1219 
1220  ResultType do_sum() const
1221  {
1222  while (n_chunks > 1)
1223  {
1224  if (n_chunks % 2 == 1)
1225  array_ptr[n_chunks++] = ResultType();
1226  for (size_type i=0; i<n_chunks; i+=2)
1227  array_ptr[i/2] = array_ptr[i] + array_ptr[i+1];
1228  n_chunks /= 2;
1229  }
1230  return array_ptr[0];
1231  }
1232 
1233  const Operation &op;
1234  const size_type start;
1235  const size_type end;
1236 
1237  mutable unsigned int n_chunks;
1238  unsigned int chunk_size;
1239  ResultType small_array [threshold_array_allocate];
1240  std::vector<ResultType> large_array;
1241  // this variable either points to small_array or large_array depending on
1242  // the number of threads we want to feed
1243  mutable ResultType *array_ptr;
1244  };
1245 #endif
1246 
1247 
1248 
1253  template <typename Operation, typename ResultType>
1254  void parallel_reduce (const Operation &op,
1255  const size_type start,
1256  const size_type end,
1257  ResultType &result,
1258  std_cxx11::shared_ptr<parallel::internal::TBBPartitioner> &partitioner)
1259  {
1260 #ifdef DEAL_II_WITH_THREADS
1261  size_type vec_size = end-start;
1262  // only go to the parallel function in case there are at least 4 parallel
1263  // items, otherwise the overhead is too large
1264  if (vec_size >= 4*internal::Vector::minimum_parallel_grain_size &&
1266  {
1267  Assert(partitioner.get() != NULL,
1268  ExcInternalError("Unexpected initialization of Vector that does "
1269  "not set the TBB partitioner to a usable state."));
1270  std_cxx11::shared_ptr<tbb::affinity_partitioner> tbb_partitioner =
1271  partitioner->acquire_one_partitioner();
1272 
1273  TBBReduceFunctor<Operation,ResultType> generic_functor(op, start, end);
1274  tbb::parallel_for (tbb::blocked_range<size_type> (0,
1275  generic_functor.n_chunks,
1276  1),
1277  generic_functor,
1278  *tbb_partitioner);
1279  partitioner->release_one_partitioner(tbb_partitioner);
1280  result = generic_functor.do_sum();
1281  }
1282  else
1283  accumulate_recursive(op,start,end,result);
1284 #else
1285  accumulate_recursive(op,start,end,result);
1286  (void)partitioner;
1287 #endif
1288  }
1289  }
1290 }
1291 
1292 DEAL_II_NAMESPACE_CLOSE
1293 
1294 #endif
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1146
void operator()(const tbb::blocked_range< size_type > &range) const
#define AssertIndexRange(index, range)
Definition: exceptions.h:1170
static real_type abs(const number &x)
Definition: numbers.h:342
DEAL_II_ALWAYS_INLINE void load(const Number *ptr)
static ::ExceptionBase & ExcMessage(std::string arg1)
unsigned int global_dof_index
Definition: types.h:88
static real_type abs_square(const number &x)
Definition: numbers.h:333
#define Assert(cond, exc)
Definition: exceptions.h:313
DEAL_II_ALWAYS_INLINE void store(Number *ptr) const
SymmetricTensor< 2, dim, Number > b(const Tensor< 2, dim, Number > &F)
Tensor< 2, dim, Number > w(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
static unsigned int n_threads()
static ::ExceptionBase & ExcInternalError()