deal.II version GIT relicensing-2291-g668cd86249 2024-12-24 11:30:00+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
solver_gmres.cc
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2024 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
15
17
19
21
22namespace internal
23{
24 namespace SolverGMRESImplementation
25 {
26 template <bool delayed_reorthogonalization, typename Number>
27 void
28 do_Tvmult_add(const unsigned int n_vectors,
29 const std::size_t locally_owned_size,
30 const Number *current_vector,
31 const std::vector<const Number *> &orthogonal_vectors,
33 {
34 unsigned int j = 0;
35
36 if (n_vectors <= 128)
37 {
38 // optimized path
39 static constexpr unsigned int n_lanes =
41
43 for (unsigned int i = 0; i < n_vectors; ++i)
44 hs[i] = 0.0;
46 correct[delayed_reorthogonalization ? 129 : 1];
47 if (delayed_reorthogonalization)
48 for (unsigned int i = 0; i < n_vectors + 1; ++i)
49 correct[i] = 0.0;
50
51 unsigned int c = 0;
52
53 constexpr unsigned int inner_batch_size =
54 delayed_reorthogonalization ? 6 : 12;
55
56 const unsigned int loop_length_c =
57 locally_owned_size / n_lanes / inner_batch_size;
58
59 // At this point, we would like to run a loop over the variable 'c'
60 // from 0 all the way to loop_length_c, and then run a loop over 'i'
61 // to work on all vectors we want to compute the inner product
62 // against. In other words, the loop layout would be:
63 //
64 // for (unsigned int c = 0; c < loop_length_c; ++c)
65 // {
66 // // do some work that only depends on the index c or the
67 // // derived index j = c * n_lanes * inner_batch_size
68 // ...
69 // for (unsigned int i = 0; i < n_vectors - 1; ++i)
70 // {
71 // // do the work with orthonormal_vectors[i][j] and
72 // // current_vector[j] to fill the summation variables
73 // }
74 // }
75 //
76 // However, this access pattern leads to relatively poor memory
77 // behavior with low performance because we would access only a few
78 // entries of n_vectors 'orthonormal_vectors[i][j]' for an index j
79 // at a time, before we pass on to the next vector i. More
80 // precisely, we access inner_batch_size * n_lanes many entries
81 // before moving on to the next vector. Modern CPUs derive a good
82 // deal of performance from so-called hardware prefetching, which is
83 // a mechanism that speculatively initiates loads from main memory
84 // that the hardware guesses will be accessed soon, in order to
85 // reduce the waiting time once the access actually happens. This
86 // data is put into fast cache memory in the meantime. Prefetching
87 // gets typically initiated when we access the entries of an array
88 // consecutively, or also when looping over the data elements of a
89 // few vectors at the same time. However, for more than around 10
90 // vectors looped over simultaneously, the hardware gets to see too
91 // many streams and the capacity of the loop stream detectors gets
92 // exceeded. As a result, the hardware will first initiate a load
93 // when the entry is actually requested by the respective code with
94 // that loop index. As it takes many clock cycles for data to arrive
95 // from memory (on the order of 200-500 clock cycles on 2024
96 // hardware, whereas caches can deliver data in 4-20 cycles and
97 // computations can be done in 3-6 cycles), and since even CPUs with
98 // good out-of-order execution capabilities can issue only a limited
99 // number of loads, the memory interface will become under-utilized,
100 // which is counter-intuitive for a loop we expect to be very
101 // memory-bandwidth heavy.
102 //
103 // The solution is to perform so-called loop blocking, see, e.g.,
104 // https://en.wikipedia.org/wiki/Loop_nest_optimization - we split
105 // the loop over the variable i into tiles (or blocks) of size 8 to
106 // make sure the hardware prefetchers are able to follow all 8
107 // streams, using a variable called 'i_block', and then run the loop
108 // over 'c' inside. Since we do not want to re-load the entries of
109 // 'current_vector' every time we work on blocks of size 8 for the
110 // 'i' variable, but want to make sure to obtained it from faster
111 // cache memory, we also tile the loop over 'c' into blocks. These
112 // blocks have size 64, which is big enough to get most of the
113 // effect of prefetchers (64 * inner_block_size * n_lanes is already
114 // several kB of data) but small enough for 'current_vector' to
115 // still be in fast cache memory for the next round of
116 // 'i_block'. The end result are thus three nested loops visible
117 // here, over 'c_block', 'i_block', and 'c', and an inner loop over
118 // i and the inner batch size for the actual work. Not the prettiest
119 // code, but giving adequate performance.
120 for (unsigned int c_block = 0; c_block < (loop_length_c + 63) / 64;
121 ++c_block)
122 for (unsigned int i_block = 0; i_block < (n_vectors + 7) / 8;
123 ++i_block)
124 for (c = c_block * 64, j = c * n_lanes * inner_batch_size;
125 c < std::min(loop_length_c, (c_block + 1) * 64);
126 ++c, j += n_lanes * inner_batch_size)
127 {
128 VectorizedArray<double> vvec[inner_batch_size];
129 for (unsigned int k = 0; k < inner_batch_size; ++k)
130 vvec[k].load(current_vector + j + k * n_lanes);
131 VectorizedArray<double> prev_vector[inner_batch_size];
132 if (delayed_reorthogonalization || i_block == 0)
133 for (unsigned int k = 0; k < inner_batch_size; ++k)
134 prev_vector[k].load(orthogonal_vectors[n_vectors - 1] +
135 j + k * n_lanes);
136
137 if (i_block == 0)
138 {
139 VectorizedArray<double> local_sum_0 =
140 prev_vector[0] * vvec[0];
141 VectorizedArray<double> local_sum_1 =
142 prev_vector[0] * prev_vector[0];
143 VectorizedArray<double> local_sum_2 = vvec[0] * vvec[0];
144 for (unsigned int k = 1; k < inner_batch_size; ++k)
145 {
146 local_sum_0 += prev_vector[k] * vvec[k];
147 if (delayed_reorthogonalization)
148 {
149 local_sum_1 += prev_vector[k] * prev_vector[k];
150 local_sum_2 += vvec[k] * vvec[k];
151 }
152 }
153 hs[n_vectors - 1] += local_sum_0;
154 if (delayed_reorthogonalization)
155 {
156 correct[n_vectors - 1] += local_sum_1;
157 correct[n_vectors] += local_sum_2;
158 }
159 }
160
161 for (unsigned int i = i_block * 8;
162 i < std::min(n_vectors - 1, (i_block + 1) * 8);
163 ++i)
164 {
165 // break the dependency chain into the field hs[i] for
166 // small sizes i by first accumulating 6 or 12 results
167 // into a local variable
169 temp.load(orthogonal_vectors[i] + j);
170 VectorizedArray<double> local_sum_0 = temp * vvec[0];
171 VectorizedArray<double> local_sum_1 =
172 delayed_reorthogonalization ? temp * prev_vector[0] :
173 0.;
174 for (unsigned int k = 1; k < inner_batch_size; ++k)
175 {
176 temp.load(orthogonal_vectors[i] + j + k * n_lanes);
177 local_sum_0 += temp * vvec[k];
178 if (delayed_reorthogonalization)
179 local_sum_1 += temp * prev_vector[k];
180 }
181 hs[i] += local_sum_0;
182 if (delayed_reorthogonalization)
183 correct[i] += local_sum_1;
184 }
185 }
186
187 c *= inner_batch_size;
188 for (; c < locally_owned_size / n_lanes; ++c, j += n_lanes)
189 {
190 VectorizedArray<double> vvec, prev_vector;
191 vvec.load(current_vector + j);
192 prev_vector.load(orthogonal_vectors[n_vectors - 1] + j);
193 hs[n_vectors - 1] += prev_vector * vvec;
194 if (delayed_reorthogonalization)
195 {
196 correct[n_vectors - 1] += prev_vector * prev_vector;
197 correct[n_vectors] += vvec * vvec;
198 }
199
200 for (unsigned int i = 0; i < n_vectors - 1; ++i)
201 {
203 temp.load(orthogonal_vectors[i] + j);
204 hs[i] += temp * vvec;
205 if (delayed_reorthogonalization)
206 correct[i] += temp * prev_vector;
207 }
208 }
209
210 for (unsigned int i = 0; i < n_vectors; ++i)
211 {
212 h(i) += hs[i].sum();
213 if (delayed_reorthogonalization)
214 h(i + n_vectors) += correct[i].sum();
215 }
216 if (delayed_reorthogonalization)
217 h(n_vectors + n_vectors) += correct[n_vectors].sum();
218 }
219
220 // remainder loop of optimized path or non-optimized path (if
221 // n>128)
222 for (; j < locally_owned_size; ++j)
223 {
224 const double vvec = current_vector[j];
225 const double prev_vector = orthogonal_vectors[n_vectors - 1][j];
226 h(n_vectors - 1) += prev_vector * vvec;
227 if (delayed_reorthogonalization)
228 {
229 h(n_vectors + n_vectors - 1) += prev_vector * prev_vector;
230 h(n_vectors + n_vectors) += vvec * vvec;
231 }
232 for (unsigned int i = 0; i < n_vectors - 1; ++i)
233 {
234 const double temp = orthogonal_vectors[i][j];
235 h(i) += temp * vvec;
236 if (delayed_reorthogonalization)
237 h(n_vectors + i) += temp * prev_vector;
238 }
239 }
240 }
241
242
243
244 template <bool delayed_reorthogonalization, typename Number>
245 double
246 do_subtract_and_norm(const unsigned int n_vectors,
247 const std::size_t locally_owned_size,
248 const std::vector<const Number *> &orthogonal_vectors,
249 const Vector<double> &h,
250 Number *current_vector)
251 {
252 double norm_vv_temp = 0;
253
254 Number *previous_vector =
255 const_cast<Number *>(orthogonal_vectors[n_vectors - 1]);
256 const double inverse_norm_previous =
257 delayed_reorthogonalization ? 1. / h(n_vectors + n_vectors - 1) : 0.;
258 const double scaling_factor_vv =
259 delayed_reorthogonalization ?
260 (h(n_vectors + n_vectors) > 0.0 ?
261 inverse_norm_previous / h(n_vectors + n_vectors) :
262 inverse_norm_previous / h(n_vectors + n_vectors - 1)) :
263 0.;
264 const double last_factor = h(n_vectors - 1);
265
266 VectorizedArray<double> norm_vv_temp_vectorized = 0.0;
267
268 static constexpr unsigned int n_lanes = VectorizedArray<double>::size();
269 constexpr unsigned int inner_batch_size =
270 delayed_reorthogonalization ? 6 : 12;
271
272 // As for the do_Tvmult_add loop above, we perform loop blocking on both
273 // the 'i' and 'c' variable to help hardware prefetchers to perform
274 // adequately, and get three nested loops here plus the inner loops. See
275 // the extensive comments above for the full rationale.
276 unsigned int j = 0;
277 unsigned int c = 0;
278 const unsigned int loop_length_c =
279 locally_owned_size / n_lanes / inner_batch_size;
280 const unsigned int loop_length_i = (n_vectors + 7) / 8;
281 for (unsigned int c_block = 0; c_block < (loop_length_c + 63) / 64;
282 ++c_block)
283 for (unsigned int i_block = 0; i_block < (n_vectors + 7) / 8; ++i_block)
284 for (c = c_block * 64, j = c * n_lanes * inner_batch_size;
285 c < std::min(loop_length_c, (c_block + 1) * 64);
286 ++c, j += n_lanes * inner_batch_size)
287 {
288 VectorizedArray<double> temp[inner_batch_size];
289 for (unsigned int k = 0; k < inner_batch_size; ++k)
290 temp[k].load(current_vector + j + k * n_lanes);
291 VectorizedArray<double> prev_vector[inner_batch_size];
292 if (delayed_reorthogonalization)
293 for (unsigned int k = 0; k < inner_batch_size; ++k)
294 prev_vector[k].load(previous_vector + j + k * n_lanes);
295
296 for (unsigned int i = i_block * 8;
297 i < std::min(n_vectors - 1, (i_block + 1) * 8);
298 ++i)
299 {
300 const double factor = h(i);
301 const double correction_factor =
302 (delayed_reorthogonalization ? h(n_vectors + i) : 0.0);
303 for (unsigned int k = 0; k < inner_batch_size; ++k)
304 {
306 vec.load(orthogonal_vectors[i] + j + k * n_lanes);
307 temp[k] -= factor * vec;
308 if (delayed_reorthogonalization)
309 prev_vector[k] -= correction_factor * vec;
310 }
311 }
312
313 if (delayed_reorthogonalization)
314 {
315 if (i_block + 1 == loop_length_i)
316 for (unsigned int k = 0; k < inner_batch_size; ++k)
317 {
318 prev_vector[k] = prev_vector[k] * inverse_norm_previous;
319 prev_vector[k].store(previous_vector + j + k * n_lanes);
320 temp[k] -= last_factor * prev_vector[k];
321 temp[k] = temp[k] * scaling_factor_vv;
322 temp[k].store(current_vector + j + k * n_lanes);
323 }
324 else
325 for (unsigned int k = 0; k < inner_batch_size; ++k)
326 {
327 prev_vector[k].store(previous_vector + j + k * n_lanes);
328 temp[k].store(current_vector + j + k * n_lanes);
329 }
330 }
331 else
332 {
333 if (i_block + 1 == loop_length_i)
334 for (unsigned int k = 0; k < inner_batch_size; ++k)
335 {
336 prev_vector[k].load(previous_vector + j + k * n_lanes);
337 temp[k] -= last_factor * prev_vector[k];
338 temp[k].store(current_vector + j + k * n_lanes);
339 norm_vv_temp_vectorized += temp[k] * temp[k];
340 }
341 else
342 for (unsigned int k = 0; k < inner_batch_size; ++k)
343 temp[k].store(current_vector + j + k * n_lanes);
344 }
345 }
346
347 c *= inner_batch_size;
348 for (; c < locally_owned_size / n_lanes; ++c, j += n_lanes)
349 {
350 VectorizedArray<double> temp, prev_vector;
351 temp.load(current_vector + j);
352 prev_vector.load(previous_vector + j);
353 if (!delayed_reorthogonalization)
354 temp -= h(n_vectors - 1) * prev_vector;
355
356 for (unsigned int i = 0; i < n_vectors - 1; ++i)
357 {
359 vec.load(orthogonal_vectors[i] + j);
360 temp -= h(i) * vec;
361 if (delayed_reorthogonalization)
362 prev_vector -= h(n_vectors + i) * vec;
363 }
364
365 if (delayed_reorthogonalization)
366 {
367 prev_vector = prev_vector * inverse_norm_previous;
368 prev_vector.store(previous_vector + j);
369 temp -= h(n_vectors - 1) * prev_vector;
370 temp = temp * scaling_factor_vv;
371 temp.store(current_vector + j);
372 }
373 else
374 {
375 temp.store(current_vector + j);
376 norm_vv_temp_vectorized += temp * temp;
377 }
378 }
379
380 if (!delayed_reorthogonalization)
381 norm_vv_temp += norm_vv_temp_vectorized.sum();
382
383 for (; j < locally_owned_size; ++j)
384 {
385 double temp = current_vector[j];
386 double prev_vector = previous_vector[j];
387 if (delayed_reorthogonalization)
388 {
389 for (unsigned int i = 0; i < n_vectors - 1; ++i)
390 {
391 const double vec = orthogonal_vectors[i][j];
392 temp -= h(i) * vec;
393 prev_vector -= h(n_vectors + i) * vec;
394 }
395 prev_vector *= inverse_norm_previous;
396 previous_vector[j] = prev_vector;
397 temp -= h(n_vectors - 1) * prev_vector;
398 temp *= scaling_factor_vv;
399 }
400 else
401 {
402 temp -= h(n_vectors - 1) * prev_vector;
403 for (unsigned int i = 0; i < n_vectors - 1; ++i)
404 temp -= h(i) * orthogonal_vectors[i][j];
405 norm_vv_temp += temp * temp;
406 }
407 current_vector[j] = temp;
408 }
409
410 return norm_vv_temp;
411 }
412
413
414
415 template <typename Number>
416 void
417 do_add(const unsigned int n_vectors,
418 const std::size_t locally_owned_size,
419 const std::vector<const Number *> &tmp_vectors,
420 const Vector<double> &h,
421 const bool zero_out,
422 Number *output)
423 {
424 static constexpr unsigned int n_lanes = VectorizedArray<double>::size();
425 constexpr unsigned int inner_batch_size = 12;
426
427 unsigned int j = 0;
428 unsigned int c = 0;
429 for (; c < locally_owned_size / n_lanes / inner_batch_size;
430 ++c, j += n_lanes * inner_batch_size)
431 {
432 VectorizedArray<double> temp[inner_batch_size];
433 if (zero_out)
434 for (VectorizedArray<double> &a : temp)
435 a = {};
436 else
437 for (unsigned int k = 0; k < inner_batch_size; ++k)
438 temp[k].load(output + j + k * n_lanes);
439
440 for (unsigned int i = 0; i < n_vectors; ++i)
441 {
442 const double h_i = h(i);
443 for (unsigned int k = 0; k < inner_batch_size; ++k)
444 {
446 v_ij.load(tmp_vectors[i] + j + k * n_lanes);
447 temp[k] += v_ij * h_i;
448 }
449 }
450
451 for (unsigned int k = 0; k < inner_batch_size; ++k)
452 temp[k].store(output + j + k * n_lanes);
453 }
454
455 c *= inner_batch_size;
456 for (; c < locally_owned_size / n_lanes; ++c, j += n_lanes)
457 {
458 VectorizedArray<double> temp = {};
459 if (!zero_out)
460 temp.load(output + j);
461
462 for (unsigned int i = 0; i < n_vectors; ++i)
463 {
465 v_ij.load(tmp_vectors[i] + j);
466 temp += v_ij * h(i);
467 }
468
469 temp.store(output + j);
470 }
471
472 for (; j < locally_owned_size; ++j)
473 {
474 double temp = zero_out ? 0.0 : output[j];
475 for (unsigned int i = 0; i < n_vectors; ++i)
476 temp += tmp_vectors[i][j] * h(i);
477 output[j] = temp;
478 }
479 }
480 } // namespace SolverGMRESImplementation
481} // namespace internal
482
483#include "solver_gmres.inst"
484
Number sum() const
void store(OtherNumber *ptr) const
void load(const OtherNumber *ptr)
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:498
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:499
types::global_dof_index locally_owned_size
Definition mpi.cc:822
void do_Tvmult_add(const unsigned int n_vectors, const std::size_t locally_owned_size, const Number *current_vector, const std::vector< const Number * > &orthogonal_vectors, Vector< double > &h)
double do_subtract_and_norm(const unsigned int n_vectors, const std::size_t locally_owned_size, const std::vector< const Number * > &orthogonal_vectors, const Vector< double > &h, Number *current_vector)
void do_add(const unsigned int n_vectors, const std::size_t locally_owned_size, const std::vector< const Number * > &tmp_vectors, const Vector< double > &h, const bool zero_out, Number *output)
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)