Reference documentation for deal.II version 9.1.1
\(\newcommand{\dealcoloneq}{\mathrel{\vcenter{:}}=}\)
cuda_fe_evaluation.h
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2016 - 2018 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 #ifndef dealii_cuda_fe_evaluation_h
17 #define dealii_cuda_fe_evaluation_h
18 
19 #include <deal.II/base/config.h>
20 
21 #ifdef DEAL_II_COMPILER_CUDA_AWARE
22 
23 # include <deal.II/base/tensor.h>
24 # include <deal.II/base/utilities.h>
25 
26 # include <deal.II/lac/cuda_vector.h>
27 
28 # include <deal.II/matrix_free/cuda_hanging_nodes_internal.h>
29 # include <deal.II/matrix_free/cuda_matrix_free.h>
30 # include <deal.II/matrix_free/cuda_matrix_free.templates.h>
31 # include <deal.II/matrix_free/cuda_tensor_product_kernels.h>
32 
33 DEAL_II_NAMESPACE_OPEN
34 
38 namespace CUDAWrappers
39 {
67  template <int dim,
68  int fe_degree,
69  int n_q_points_1d = fe_degree + 1,
70  int n_components_ = 1,
71  typename Number = double>
73  {
74  public:
75  using value_type = Number;
77  using data_type = typename MatrixFree<dim, Number>::Data;
78  static constexpr unsigned int dimension = dim;
79  static constexpr unsigned int n_components = n_components_;
80  static constexpr unsigned int n_q_points =
81  Utilities::pow(n_q_points_1d, dim);
82  static constexpr unsigned int tensor_dofs_per_cell =
83  Utilities::pow(fe_degree + 1, dim);
84 
88  __device__
89  FEEvaluation(const unsigned int cell_id,
90  const data_type * data,
91  SharedData<dim, Number> *shdata);
92 
101  __device__ void
102  read_dof_values(const Number *src);
103 
110  __device__ void
111  distribute_local_to_global(Number *dst) const;
112 
120  __device__ void
121  evaluate(const bool evaluate_val, const bool evaluate_grad);
122 
130  __device__ void
131  integrate(const bool integrate_val, const bool integrate_grad);
132 
137  __device__ value_type
138  get_value(const unsigned int q_point) const;
139 
146  __device__ void
147  submit_value(const value_type &val_in, const unsigned int q_point);
148 
153  __device__ gradient_type
154  get_gradient(const unsigned int q_point) const;
155 
160  __device__ void
161  submit_gradient(const gradient_type &grad_in, const unsigned int q_point);
162 
163  // clang-format off
174  // clang-format on
175  template <typename Functor>
176  __device__ void
177  apply_quad_point_operations(const Functor &func);
178 
179  private:
180  types::global_dof_index *local_to_global;
181  unsigned int n_cells;
182  unsigned int padding_length;
183 
184  const unsigned int constraint_mask;
185 
186  Number *inv_jac;
187  Number *JxW;
188 
189  // Internal buffer
190  Number *values;
191  Number *gradients[dim];
192  };
193 
194 
195 
196  template <int dim,
197  int fe_degree,
198  int n_q_points_1d,
199  int n_components_,
200  typename Number>
201  __device__
203  FEEvaluation(const unsigned int cell_id,
204  const data_type * data,
205  SharedData<dim, Number> *shdata)
206  : n_cells(data->n_cells)
207  , padding_length(data->padding_length)
208  , constraint_mask(data->constraint_mask[cell_id])
209  , values(shdata->values)
210  {
211  local_to_global = data->local_to_global + padding_length * cell_id;
212  inv_jac = data->inv_jacobian + padding_length * cell_id;
213  JxW = data->JxW + padding_length * cell_id;
214 
215  for (unsigned int i = 0; i < dim; ++i)
216  gradients[i] = shdata->gradients[i];
217  }
218 
219 
220 
221  template <int dim,
222  int fe_degree,
223  int n_q_points_1d,
224  int n_components_,
225  typename Number>
226  __device__ void
228  read_dof_values(const Number *src)
229  {
230  static_assert(n_components_ == 1, "This function only supports FE with one \
231  components");
232  const unsigned int idx =
233  (threadIdx.x % n_q_points_1d) +
234  (dim > 1 ? threadIdx.y : 0) * n_q_points_1d +
235  (dim > 2 ? threadIdx.z : 0) * n_q_points_1d * n_q_points_1d;
236 
237  const types::global_dof_index src_idx = local_to_global[idx];
238  // Use the read-only data cache.
239  values[idx] = __ldg(&src[src_idx]);
240 
241  if (constraint_mask)
242  internal::resolve_hanging_nodes<dim, fe_degree, false>(constraint_mask,
243  values);
244 
245  __syncthreads();
246  }
247 
248 
249 
250  template <int dim,
251  int fe_degree,
252  int n_q_points_1d,
253  int n_components_,
254  typename Number>
255  __device__ void
257  distribute_local_to_global(Number *dst) const
258  {
259  static_assert(n_components_ == 1, "This function only supports FE with one \
260  components");
261  if (constraint_mask)
262  internal::resolve_hanging_nodes<dim, fe_degree, true>(constraint_mask,
263  values);
264 
265  const unsigned int idx =
266  (threadIdx.x % n_q_points_1d) +
267  (dim > 1 ? threadIdx.y : 0) * n_q_points_1d +
268  (dim > 2 ? threadIdx.z : 0) * n_q_points_1d * n_q_points_1d;
269  const types::global_dof_index destination_idx = local_to_global[idx];
270 
271  dst[destination_idx] += values[idx];
272  }
273 
274 
275 
276  template <int dim,
277  int fe_degree,
278  int n_q_points_1d,
279  int n_components_,
280  typename Number>
281  __device__ void
283  const bool evaluate_val,
284  const bool evaluate_grad)
285  {
286  // First evaluate the gradients because it requires values that will be
287  // changed if evaluate_val is true
289  internal::EvaluatorVariant::evaluate_general,
290  dim,
291  fe_degree,
292  n_q_points_1d,
293  Number>
294  evaluator_tensor_product;
295  if (evaluate_grad == true)
296  {
297  evaluator_tensor_product.gradient_at_quad_pts(values, gradients);
298  __syncthreads();
299  }
300 
301  if (evaluate_val == true)
302  {
303  evaluator_tensor_product.value_at_quad_pts(values);
304  __syncthreads();
305  }
306  }
307 
308 
309 
310  template <int dim,
311  int fe_degree,
312  int n_q_points_1d,
313  int n_components_,
314  typename Number>
315  __device__ void
317  const bool integrate_val,
318  const bool integrate_grad)
319  {
321  internal::EvaluatorVariant::evaluate_general,
322  dim,
323  fe_degree,
324  n_q_points_1d,
325  Number>
326  evaluator_tensor_product;
327  if (integrate_val == true)
328  {
329  evaluator_tensor_product.integrate_value(values);
330  __syncthreads();
331  if (integrate_grad == true)
332  {
333  evaluator_tensor_product.integrate_gradient<true>(values,
334  gradients);
335  __syncthreads();
336  }
337  }
338  else if (integrate_grad == true)
339  {
340  evaluator_tensor_product.integrate_gradient<false>(values, gradients);
341  __syncthreads();
342  }
343  }
344 
345 
346 
347  template <int dim,
348  int fe_degree,
349  int n_q_points_1d,
350  int n_components_,
351  typename Number>
352  __device__ typename FEEvaluation<dim,
353  fe_degree,
354  n_q_points_1d,
355  n_components_,
356  Number>::value_type
358  const unsigned int q_point) const
359  {
360  return values[q_point];
361  }
362 
363 
364 
365  template <int dim,
366  int fe_degree,
367  int n_q_points_1d,
368  int n_components_,
369  typename Number>
370  __device__ void
372  submit_value(const value_type &val_in, const unsigned int q_point)
373  {
374  values[q_point] = val_in * JxW[q_point];
375  }
376 
377 
378 
379  template <int dim,
380  int fe_degree,
381  int n_q_points_1d,
382  int n_components_,
383  typename Number>
384  __device__ typename FEEvaluation<dim,
385  fe_degree,
386  n_q_points_1d,
387  n_components_,
388  Number>::gradient_type
390  get_gradient(const unsigned int q_point) const
391  {
392  static_assert(n_components_ == 1, "This function only supports FE with one \
393  components");
394  // TODO optimize if the mesh is uniform
395  const Number *inv_jacobian = &inv_jac[q_point];
396  gradient_type grad;
397  for (int d_1 = 0; d_1 < dim; ++d_1)
398  {
399  Number tmp = 0.;
400  for (int d_2 = 0; d_2 < dim; ++d_2)
401  tmp += inv_jacobian[padding_length * n_cells * (dim * d_2 + d_1)] *
402  gradients[d_2][q_point];
403  grad[d_1] = tmp;
404  }
405 
406  return grad;
407  }
408 
409 
410 
411  template <int dim,
412  int fe_degree,
413  int n_q_points_1d,
414  int n_components_,
415  typename Number>
416  __device__ void
418  submit_gradient(const gradient_type &grad_in, const unsigned int q_point)
419  {
420  // TODO optimize if the mesh is uniform
421  const Number *inv_jacobian = &inv_jac[q_point];
422  for (int d_1 = 0; d_1 < dim; ++d_1)
423  {
424  Number tmp = 0.;
425  for (int d_2 = 0; d_2 < dim; ++d_2)
426  tmp += inv_jacobian[n_cells * padding_length * (dim * d_1 + d_2)] *
427  grad_in[d_2];
428  gradients[d_1][q_point] = tmp * JxW[q_point];
429  }
430  }
431 
432 
433 
434  template <int dim,
435  int fe_degree,
436  int n_q_points_1d,
437  int n_components_,
438  typename Number>
439  template <typename Functor>
440  __device__ void
442  apply_quad_point_operations(const Functor &func)
443  {
444  const unsigned int q_point =
445  (dim == 1 ?
446  threadIdx.x % n_q_points_1d :
447  dim == 2 ?
448  threadIdx.x % n_q_points_1d + n_q_points_1d * threadIdx.y :
449  threadIdx.x % n_q_points_1d +
450  n_q_points_1d * (threadIdx.y + n_q_points_1d * threadIdx.z));
451  func(this, q_point);
452 
453  __syncthreads();
454  }
455 } // namespace CUDAWrappers
456 
457 DEAL_II_NAMESPACE_CLOSE
458 
459 #endif
460 
461 #endif
void submit_value(const value_type &val_in, const unsigned int q_point)
value_type get_value(const unsigned int q_point) const
gradient_type get_gradient(const unsigned int q_point) const
void evaluate(const bool evaluate_val, const bool evaluate_grad)
constexpr unsigned int pow(const unsigned int base, const int iexp)
Definition: utilities.h:428
void apply_quad_point_operations(const Functor &func)
unsigned int global_dof_index
Definition: types.h:89
FEEvaluation(const unsigned int cell_id, const data_type *data, SharedData< dim, Number > *shdata)
void read_dof_values(const Number *src)
void submit_gradient(const gradient_type &grad_in, const unsigned int q_point)
void distribute_local_to_global(Number *dst) const
void integrate(const bool integrate_val, const bool integrate_grad)