Reference documentation for deal.II version 9.4.1
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
cuda_fe_evaluation.h
Go to the documentation of this file.
1// ---------------------------------------------------------------------
2//
3// Copyright (C) 2016 - 2021 by the deal.II authors
4//
5// This file is part of the deal.II library.
6//
7// The deal.II library is free software; you can use it, redistribute
8// it, and/or modify it under the terms of the GNU Lesser General
9// Public License as published by the Free Software Foundation; either
10// version 2.1 of the License, or (at your option) any later version.
11// The full text of the license can be found in the file LICENSE.md at
12// the top level directory of deal.II.
13//
14// ---------------------------------------------------------------------
15
16#ifndef dealii_cuda_fe_evaluation_h
17#define dealii_cuda_fe_evaluation_h
18
19#include <deal.II/base/config.h>
20
21#ifdef DEAL_II_COMPILER_CUDA_AWARE
22
23# include <deal.II/base/tensor.h>
25
28
31# include <deal.II/matrix_free/cuda_matrix_free.templates.h>
33
35
39namespace CUDAWrappers
40{
41 namespace internal
42 {
47 template <int dim, int n_points_1d>
48 __device__ inline unsigned int
50 {
51 return (dim == 1 ? threadIdx.x % n_points_1d :
52 dim == 2 ? threadIdx.x % n_points_1d + n_points_1d * threadIdx.y :
53 threadIdx.x % n_points_1d +
54 n_points_1d *
55 (threadIdx.y + n_points_1d * threadIdx.z));
56 }
57 } // namespace internal
58
84 template <int dim,
85 int fe_degree,
86 int n_q_points_1d = fe_degree + 1,
87 int n_components_ = 1,
88 typename Number = double>
90 {
91 public:
95 using value_type = Number;
96
101
106
110 static constexpr unsigned int dimension = dim;
111
115 static constexpr unsigned int n_components = n_components_;
116
120 static constexpr unsigned int n_q_points =
121 Utilities::pow(n_q_points_1d, dim);
122
126 static constexpr unsigned int tensor_dofs_per_cell =
127 Utilities::pow(fe_degree + 1, dim);
128
132 __device__
133 FEEvaluation(const unsigned int cell_id,
134 const data_type * data,
136
145 __device__ void
146 read_dof_values(const Number *src);
147
154 __device__ void
155 distribute_local_to_global(Number *dst) const;
156
164 __device__ void
165 evaluate(const bool evaluate_val, const bool evaluate_grad);
166
174 __device__ void
175 integrate(const bool integrate_val, const bool integrate_grad);
176
181 __device__ value_type
182 get_value() const;
183
188 __device__ value_type
189 get_dof_value() const;
190
195 __device__ void
196 submit_value(const value_type &val_in);
197
202 __device__ void
203 submit_dof_value(const value_type &val_in);
204
209 __device__ gradient_type
210 get_gradient() const;
211
216 __device__ void
217 submit_gradient(const gradient_type &grad_in);
218
219 // clang-format off
230 // clang-format on
231 template <typename Functor>
232 __device__ void
233 apply_for_each_quad_point(const Functor &func);
234
235 private:
237 unsigned int n_cells;
238 unsigned int padding_length;
239 const unsigned int mf_object_id;
240
241 const ::internal::MatrixFreeFunctions::ConstraintKinds
243
244 const bool use_coloring;
245
246 Number *inv_jac;
247 Number *JxW;
248
249 // Internal buffer
250 Number *values;
251 Number *gradients[dim];
252 };
253
254
255
256 template <int dim,
257 int fe_degree,
258 int n_q_points_1d,
259 int n_components_,
260 typename Number>
261 __device__
263 FEEvaluation(const unsigned int cell_id,
264 const data_type * data,
266 : n_cells(data->n_cells)
267 , padding_length(data->padding_length)
268 , mf_object_id(data->id)
269 , constraint_mask(data->constraint_mask[cell_id])
270 , use_coloring(data->use_coloring)
271 , values(shdata->values)
272 {
273 local_to_global = data->local_to_global + padding_length * cell_id;
274 inv_jac = data->inv_jacobian + padding_length * cell_id;
275 JxW = data->JxW + padding_length * cell_id;
276
277 for (unsigned int i = 0; i < dim; ++i)
278 gradients[i] = shdata->gradients[i];
279 }
280
281
282
283 template <int dim,
284 int fe_degree,
285 int n_q_points_1d,
286 int n_components_,
287 typename Number>
288 __device__ void
290 read_dof_values(const Number *src)
291 {
292 static_assert(n_components_ == 1, "This function only supports FE with one \
293 components");
294 const unsigned int idx = internal::compute_index<dim, n_q_points_1d>();
295
296 const types::global_dof_index src_idx = local_to_global[idx];
297 // Use the read-only data cache.
298 values[idx] = __ldg(&src[src_idx]);
299
300 __syncthreads();
301
302 internal::resolve_hanging_nodes<dim, fe_degree, false>(constraint_mask,
303 values);
304 }
305
306
307
308 template <int dim,
309 int fe_degree,
310 int n_q_points_1d,
311 int n_components_,
312 typename Number>
313 __device__ void
315 distribute_local_to_global(Number *dst) const
316 {
317 static_assert(n_components_ == 1, "This function only supports FE with one \
318 components");
319 internal::resolve_hanging_nodes<dim, fe_degree, true>(constraint_mask,
320 values);
321
322 const unsigned int idx = internal::compute_index<dim, n_q_points_1d>();
323
324 const types::global_dof_index destination_idx = local_to_global[idx];
325
326 if (use_coloring)
327 dst[destination_idx] += values[idx];
328 else
329 atomicAdd(&dst[destination_idx], values[idx]);
330 }
331
332
333
334 template <int dim,
335 int fe_degree,
336 int n_q_points_1d,
337 int n_components_,
338 typename Number>
339 __device__ void
341 const bool evaluate_val,
342 const bool evaluate_grad)
343 {
344 // First evaluate the gradients because it requires values that will be
345 // changed if evaluate_val is true
348 dim,
349 fe_degree,
350 n_q_points_1d,
351 Number>
352 evaluator_tensor_product(mf_object_id);
353 if (evaluate_val == true && evaluate_grad == true)
354 {
355 evaluator_tensor_product.value_and_gradient_at_quad_pts(values,
356 gradients);
357 __syncthreads();
358 }
359 else if (evaluate_grad == true)
360 {
361 evaluator_tensor_product.gradient_at_quad_pts(values, gradients);
362 __syncthreads();
363 }
364 else if (evaluate_val == true)
365 {
366 evaluator_tensor_product.value_at_quad_pts(values);
367 __syncthreads();
368 }
369 }
370
371
372
373 template <int dim,
374 int fe_degree,
375 int n_q_points_1d,
376 int n_components_,
377 typename Number>
378 __device__ void
380 const bool integrate_val,
381 const bool integrate_grad)
382 {
385 dim,
386 fe_degree,
387 n_q_points_1d,
388 Number>
389 evaluator_tensor_product(mf_object_id);
390 if (integrate_val == true && integrate_grad == true)
391 {
392 evaluator_tensor_product.integrate_value_and_gradient(values,
393 gradients);
394 }
395 else if (integrate_val == true)
396 {
397 evaluator_tensor_product.integrate_value(values);
398 __syncthreads();
399 }
400 else if (integrate_grad == true)
401 {
402 evaluator_tensor_product.integrate_gradient<false>(values, gradients);
403 __syncthreads();
404 }
405 }
406
407
408
409 template <int dim,
410 int fe_degree,
411 int n_q_points_1d,
412 int n_components_,
413 typename Number>
414 __device__ typename FEEvaluation<dim,
415 fe_degree,
416 n_q_points_1d,
417 n_components_,
418 Number>::value_type
420 get_value() const
421 {
422 const unsigned int q_point = internal::compute_index<dim, n_q_points_1d>();
423 return values[q_point];
424 }
425
426
427
428 template <int dim,
429 int fe_degree,
430 int n_q_points_1d,
431 int n_components_,
432 typename Number>
433 __device__ typename FEEvaluation<dim,
434 fe_degree,
435 n_q_points_1d,
436 n_components_,
437 Number>::value_type
439 get_dof_value() const
440 {
441 const unsigned int dof = internal::compute_index<dim, fe_degree + 1>();
442 return values[dof];
443 }
444
445
446
447 template <int dim,
448 int fe_degree,
449 int n_q_points_1d,
450 int n_components_,
451 typename Number>
452 __device__ void
454 submit_value(const value_type &val_in)
455 {
456 const unsigned int q_point = internal::compute_index<dim, n_q_points_1d>();
457 values[q_point] = val_in * JxW[q_point];
458 }
459
460
461
462 template <int dim,
463 int fe_degree,
464 int n_q_points_1d,
465 int n_components_,
466 typename Number>
467 __device__ void
469 submit_dof_value(const value_type &val_in)
470 {
471 const unsigned int dof = internal::compute_index<dim, fe_degree + 1>();
472 values[dof] = val_in;
473 }
474
475
476
477 template <int dim,
478 int fe_degree,
479 int n_q_points_1d,
480 int n_components_,
481 typename Number>
482 __device__ typename FEEvaluation<dim,
483 fe_degree,
484 n_q_points_1d,
485 n_components_,
486 Number>::gradient_type
488 get_gradient() const
489 {
490 static_assert(n_components_ == 1, "This function only supports FE with one \
491 components");
492
493 // TODO optimize if the mesh is uniform
494 const unsigned int q_point = internal::compute_index<dim, n_q_points_1d>();
495 const Number * inv_jacobian = &inv_jac[q_point];
496 gradient_type grad;
497 for (unsigned int d_1 = 0; d_1 < dim; ++d_1)
498 {
499 Number tmp = 0.;
500 for (unsigned int d_2 = 0; d_2 < dim; ++d_2)
501 tmp += inv_jacobian[padding_length * n_cells * (dim * d_2 + d_1)] *
502 gradients[d_2][q_point];
503 grad[d_1] = tmp;
504 }
505
506 return grad;
507 }
508
509
510
511 template <int dim,
512 int fe_degree,
513 int n_q_points_1d,
514 int n_components_,
515 typename Number>
516 __device__ void
518 submit_gradient(const gradient_type &grad_in)
519 {
520 // TODO optimize if the mesh is uniform
521 const unsigned int q_point = internal::compute_index<dim, n_q_points_1d>();
522 const Number * inv_jacobian = &inv_jac[q_point];
523 for (unsigned int d_1 = 0; d_1 < dim; ++d_1)
524 {
525 Number tmp = 0.;
526 for (unsigned int d_2 = 0; d_2 < dim; ++d_2)
527 tmp += inv_jacobian[n_cells * padding_length * (dim * d_1 + d_2)] *
528 grad_in[d_2];
529 gradients[d_1][q_point] = tmp * JxW[q_point];
530 }
531 }
532
533
534
535 template <int dim,
536 int fe_degree,
537 int n_q_points_1d,
538 int n_components_,
539 typename Number>
540 template <typename Functor>
541 __device__ void
543 apply_for_each_quad_point(const Functor &func)
544 {
545 func(this);
546
547 __syncthreads();
548 }
549} // namespace CUDAWrappers
550
552
553#endif
554
555#endif
static constexpr unsigned int tensor_dofs_per_cell
void integrate(const bool integrate_val, const bool integrate_grad)
void submit_dof_value(const value_type &val_in)
void submit_gradient(const gradient_type &grad_in)
const unsigned int mf_object_id
void submit_value(const value_type &val_in)
types::global_dof_index * local_to_global
void distribute_local_to_global(Number *dst) const
static constexpr unsigned int dimension
const ::internal::MatrixFreeFunctions::ConstraintKinds constraint_mask
void evaluate(const bool evaluate_val, const bool evaluate_grad)
static constexpr unsigned int n_q_points
static constexpr unsigned int n_components
value_type get_dof_value() const
typename MatrixFree< dim, Number >::Data data_type
void read_dof_values(const Number *src)
gradient_type get_gradient() const
void apply_for_each_quad_point(const Functor &func)
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:442
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:443
unsigned int compute_index()
constexpr T pow(const T base, const int iexp)
Definition: utilities.h:462