deal.II version GIT relicensing-2025-ge390a5d412 2024-10-25 14:10:00+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
solver_gmres.cc
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2024 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
15
17
19
21
22namespace internal
23{
24 namespace SolverGMRESImplementation
25 {
26 template <bool delayed_reorthogonalization, typename Number>
27 void
28 do_Tvmult_add(const unsigned int n_vectors,
29 const std::size_t locally_owned_size,
30 const Number *current_vector,
31 const std::vector<const Number *> &orthogonal_vectors,
33 {
34 unsigned int j = 0;
35
36 if (n_vectors <= 128)
37 {
38 // optimized path
39 static constexpr unsigned int n_lanes =
41
43 for (unsigned int i = 0; i < n_vectors; ++i)
44 hs[i] = 0.0;
46 correct[delayed_reorthogonalization ? 129 : 1];
47 if (delayed_reorthogonalization)
48 for (unsigned int i = 0; i < n_vectors + 1; ++i)
49 correct[i] = 0.0;
50
51 unsigned int c = 0;
52
53 constexpr unsigned int inner_batch_size =
54 delayed_reorthogonalization ? 6 : 12;
55
56 for (; c < locally_owned_size / n_lanes / inner_batch_size;
57 ++c, j += n_lanes * inner_batch_size)
58 {
59 VectorizedArray<double> vvec[inner_batch_size];
60 for (unsigned int k = 0; k < inner_batch_size; ++k)
61 vvec[k].load(current_vector + j + k * n_lanes);
62 VectorizedArray<double> prev_vector[inner_batch_size];
63 for (unsigned int k = 0; k < inner_batch_size; ++k)
64 prev_vector[k].load(orthogonal_vectors[n_vectors - 1] + j +
65 k * n_lanes);
66
67 {
68 VectorizedArray<double> local_sum_0 = prev_vector[0] * vvec[0];
69 VectorizedArray<double> local_sum_1 =
70 prev_vector[0] * prev_vector[0];
71 VectorizedArray<double> local_sum_2 = vvec[0] * vvec[0];
72 for (unsigned int k = 1; k < inner_batch_size; ++k)
73 {
74 local_sum_0 += prev_vector[k] * vvec[k];
75 if (delayed_reorthogonalization)
76 {
77 local_sum_1 += prev_vector[k] * prev_vector[k];
78 local_sum_2 += vvec[k] * vvec[k];
79 }
80 }
81 hs[n_vectors - 1] += local_sum_0;
82 if (delayed_reorthogonalization)
83 {
84 correct[n_vectors - 1] += local_sum_1;
85 correct[n_vectors] += local_sum_2;
86 }
87 }
88
89 for (unsigned int i = 0; i < n_vectors - 1; ++i)
90 {
91 // break the dependency chain into the field hs[i] for
92 // small sizes i by first accumulating 4 or 8 results
93 // into a local variable
95 temp.load(orthogonal_vectors[i] + j);
96 VectorizedArray<double> local_sum_0 = temp * vvec[0];
97 VectorizedArray<double> local_sum_1 =
98 delayed_reorthogonalization ? temp * prev_vector[0] : 0.;
99 for (unsigned int k = 1; k < inner_batch_size; ++k)
100 {
101 temp.load(orthogonal_vectors[i] + j + k * n_lanes);
102 local_sum_0 += temp * vvec[k];
103 if (delayed_reorthogonalization)
104 local_sum_1 += temp * prev_vector[k];
105 }
106 hs[i] += local_sum_0;
107 if (delayed_reorthogonalization)
108 correct[i] += local_sum_1;
109 }
110 }
111
112 c *= inner_batch_size;
113 for (; c < locally_owned_size / n_lanes; ++c, j += n_lanes)
114 {
115 VectorizedArray<double> vvec, prev_vector;
116 vvec.load(current_vector + j);
117 prev_vector.load(orthogonal_vectors[n_vectors - 1] + j);
118 hs[n_vectors - 1] += prev_vector * vvec;
119 if (delayed_reorthogonalization)
120 {
121 correct[n_vectors - 1] += prev_vector * prev_vector;
122 correct[n_vectors] += vvec * vvec;
123 }
124
125 for (unsigned int i = 0; i < n_vectors - 1; ++i)
126 {
128 temp.load(orthogonal_vectors[i] + j);
129 hs[i] += temp * vvec;
130 if (delayed_reorthogonalization)
131 correct[i] += temp * prev_vector;
132 }
133 }
134
135 for (unsigned int i = 0; i < n_vectors; ++i)
136 {
137 h(i) += hs[i].sum();
138 if (delayed_reorthogonalization)
139 h(i + n_vectors) += correct[i].sum();
140 }
141 if (delayed_reorthogonalization)
142 h(n_vectors + n_vectors) += correct[n_vectors].sum();
143 }
144
145 // remainder loop of optimized path or non-optimized path (if
146 // n>128)
147 for (; j < locally_owned_size; ++j)
148 {
149 const double vvec = current_vector[j];
150 const double prev_vector = orthogonal_vectors[n_vectors - 1][j];
151 h(n_vectors - 1) += prev_vector * vvec;
152 if (delayed_reorthogonalization)
153 {
154 h(n_vectors + n_vectors - 1) += prev_vector * prev_vector;
155 h(n_vectors + n_vectors) += vvec * vvec;
156 }
157 for (unsigned int i = 0; i < n_vectors - 1; ++i)
158 {
159 const double temp = orthogonal_vectors[i][j];
160 h(i) += temp * vvec;
161 if (delayed_reorthogonalization)
162 h(n_vectors + i) += temp * prev_vector;
163 }
164 }
165 }
166
167
168
169 template <bool delayed_reorthogonalization, typename Number>
170 double
171 do_subtract_and_norm(const unsigned int n_vectors,
172 const std::size_t locally_owned_size,
173 const std::vector<const Number *> &orthogonal_vectors,
174 const Vector<double> &h,
175 Number *current_vector)
176 {
177 double norm_vv_temp = 0;
178
179 Number *previous_vector =
180 const_cast<Number *>(orthogonal_vectors[n_vectors - 1]);
181 const double inverse_norm_previous =
182 delayed_reorthogonalization ? 1. / h(n_vectors + n_vectors - 1) : 0.;
183 const double scaling_factor_vv =
184 delayed_reorthogonalization ?
185 (h(n_vectors + n_vectors) > 0.0 ?
186 inverse_norm_previous / h(n_vectors + n_vectors) :
187 inverse_norm_previous / h(n_vectors + n_vectors - 1)) :
188 0.;
189 VectorizedArray<double> norm_vv_temp_vectorized = 0.0;
190
191 static constexpr unsigned int n_lanes = VectorizedArray<double>::size();
192 constexpr unsigned int inner_batch_size =
193 delayed_reorthogonalization ? 6 : 12;
194
195 unsigned int j = 0;
196 unsigned int c = 0;
197 for (; c < locally_owned_size / n_lanes / inner_batch_size;
198 ++c, j += n_lanes * inner_batch_size)
199 {
200 VectorizedArray<double> temp[inner_batch_size];
201 VectorizedArray<double> prev_vector[inner_batch_size];
202
203 const double last_factor = h(n_vectors - 1);
204 for (unsigned int k = 0; k < inner_batch_size; ++k)
205 {
206 temp[k].load(current_vector + j + k * n_lanes);
207 prev_vector[k].load(previous_vector + j + k * n_lanes);
208 if (!delayed_reorthogonalization)
209 temp[k] -= last_factor * prev_vector[k];
210 }
211
212 for (unsigned int i = 0; i < n_vectors - 1; ++i)
213 {
214 const double factor = h(i);
215 const double correction_factor =
216 (delayed_reorthogonalization ? h(n_vectors + i) : 0.0);
217 for (unsigned int k = 0; k < inner_batch_size; ++k)
218 {
220 vec.load(orthogonal_vectors[i] + j + k * n_lanes);
221 temp[k] -= factor * vec;
222 if (delayed_reorthogonalization)
223 prev_vector[k] -= correction_factor * vec;
224 }
225 }
226
227 if (delayed_reorthogonalization)
228 for (unsigned int k = 0; k < inner_batch_size; ++k)
229 {
230 prev_vector[k] = prev_vector[k] * inverse_norm_previous;
231 prev_vector[k].store(previous_vector + j + k * n_lanes);
232 temp[k] -= last_factor * prev_vector[k];
233 temp[k] = temp[k] * scaling_factor_vv;
234 temp[k].store(current_vector + j + k * n_lanes);
235 }
236 else
237 for (unsigned int k = 0; k < inner_batch_size; ++k)
238 {
239 temp[k].store(current_vector + j + k * n_lanes);
240 norm_vv_temp_vectorized += temp[k] * temp[k];
241 }
242 }
243
244 c *= inner_batch_size;
245 for (; c < locally_owned_size / n_lanes; ++c, j += n_lanes)
246 {
247 VectorizedArray<double> temp, prev_vector;
248 temp.load(current_vector + j);
249 prev_vector.load(previous_vector + j);
250 if (!delayed_reorthogonalization)
251 temp -= h(n_vectors - 1) * prev_vector;
252
253 for (unsigned int i = 0; i < n_vectors - 1; ++i)
254 {
256 vec.load(orthogonal_vectors[i] + j);
257 temp -= h(i) * vec;
258 if (delayed_reorthogonalization)
259 prev_vector -= h(n_vectors + i) * vec;
260 }
261
262 if (delayed_reorthogonalization)
263 {
264 prev_vector = prev_vector * inverse_norm_previous;
265 prev_vector.store(previous_vector + j);
266 temp -= h(n_vectors - 1) * prev_vector;
267 temp = temp * scaling_factor_vv;
268 temp.store(current_vector + j);
269 }
270 else
271 {
272 temp.store(current_vector + j);
273 norm_vv_temp_vectorized += temp * temp;
274 }
275 }
276
277 if (!delayed_reorthogonalization)
278 norm_vv_temp += norm_vv_temp_vectorized.sum();
279
280 for (; j < locally_owned_size; ++j)
281 {
282 double temp = current_vector[j];
283 double prev_vector = previous_vector[j];
284 if (delayed_reorthogonalization)
285 {
286 for (unsigned int i = 0; i < n_vectors - 1; ++i)
287 {
288 const double vec = orthogonal_vectors[i][j];
289 temp -= h(i) * vec;
290 prev_vector -= h(n_vectors + i) * vec;
291 }
292 prev_vector *= inverse_norm_previous;
293 previous_vector[j] = prev_vector;
294 temp -= h(n_vectors - 1) * prev_vector;
295 temp *= scaling_factor_vv;
296 }
297 else
298 {
299 temp -= h(n_vectors - 1) * prev_vector;
300 for (unsigned int i = 0; i < n_vectors - 1; ++i)
301 temp -= h(i) * orthogonal_vectors[i][j];
302 norm_vv_temp += temp * temp;
303 }
304 current_vector[j] = temp;
305 }
306
307 return norm_vv_temp;
308 }
309
310
311
312 template <typename Number>
313 void
314 do_add(const unsigned int n_vectors,
315 const std::size_t locally_owned_size,
316 const std::vector<const Number *> &tmp_vectors,
317 const Vector<double> &h,
318 const bool zero_out,
319 Number *output)
320 {
321 static constexpr unsigned int n_lanes = VectorizedArray<double>::size();
322 constexpr unsigned int inner_batch_size = 12;
323
324 unsigned int j = 0;
325 unsigned int c = 0;
326 for (; c < locally_owned_size / n_lanes / inner_batch_size;
327 ++c, j += n_lanes * inner_batch_size)
328 {
329 VectorizedArray<double> temp[inner_batch_size];
330 if (zero_out)
331 for (VectorizedArray<double> &a : temp)
332 a = {};
333 else
334 for (unsigned int k = 0; k < inner_batch_size; ++k)
335 temp[k].load(output + j + k * n_lanes);
336
337 for (unsigned int i = 0; i < n_vectors; ++i)
338 {
339 const double h_i = h(i);
340 for (unsigned int k = 0; k < inner_batch_size; ++k)
341 {
343 v_ij.load(tmp_vectors[i] + j + k * n_lanes);
344 temp[k] += v_ij * h_i;
345 }
346 }
347
348 for (unsigned int k = 0; k < inner_batch_size; ++k)
349 temp[k].store(output + j + k * n_lanes);
350 }
351
352 c *= inner_batch_size;
353 for (; c < locally_owned_size / n_lanes; ++c, j += n_lanes)
354 {
355 VectorizedArray<double> temp = {};
356 if (!zero_out)
357 temp.load(output + j);
358
359 for (unsigned int i = 0; i < n_vectors; ++i)
360 {
362 v_ij.load(tmp_vectors[i] + j);
363 temp += v_ij * h(i);
364 }
365
366 temp.store(output + j);
367 }
368
369 for (; j < locally_owned_size; ++j)
370 {
371 double temp = zero_out ? 0.0 : output[j];
372 for (unsigned int i = 0; i < n_vectors; ++i)
373 temp += tmp_vectors[i][j] * h(i);
374 output[j] = temp;
375 }
376 }
377 } // namespace SolverGMRESImplementation
378} // namespace internal
379
380#include "solver_gmres.inst"
381
Number sum() const
void store(OtherNumber *ptr) const
void load(const OtherNumber *ptr)
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:498
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:499
void do_Tvmult_add(const unsigned int n_vectors, const std::size_t locally_owned_size, const Number *current_vector, const std::vector< const Number * > &orthogonal_vectors, Vector< double > &h)
double do_subtract_and_norm(const unsigned int n_vectors, const std::size_t locally_owned_size, const std::vector< const Number * > &orthogonal_vectors, const Vector< double > &h, Number *current_vector)
void do_add(const unsigned int n_vectors, const std::size_t locally_owned_size, const std::vector< const Number * > &tmp_vectors, const Vector< double > &h, const bool zero_out, Number *output)