Reference documentation for deal.II version 9.0.0
cuda_fe_evaluation.h
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2016 - 2018 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE at
12 // the top level of the deal.II distribution.
13 //
14 // ---------------------------------------------------------------------
15 
16 #ifndef dealii_cuda_fe_evaluation_h
17 #define dealii_cuda_fe_evaluation_h
18 
19 #include <deal.II/base/tensor.h>
20 #include <deal.II/base/utilities.h>
21 #include <deal.II/lac/cuda_vector.h>
22 #include <deal.II/matrix_free/cuda_matrix_free.h>
23 #include <deal.II/matrix_free/cuda_matrix_free.templates.h>
24 #include <deal.II/matrix_free/cuda_tensor_product_kernels.h>
25 
26 DEAL_II_NAMESPACE_OPEN
27 
31 namespace CUDAWrappers
32 {
33  namespace internal
34  {
35  template <int dim, int fe_degree, bool transpose, typename Number>
36  __device__ void resolve_hanging_nodes_shmem(Number *values, const unsigned
37  int constr)
38  {
39  //TODO
40  }
41  }
42 
43 
44 
72  template <int dim, int fe_degree, int n_q_points_1d = fe_degree+1,
73  int n_components_ = 1, typename Number = double>
75  {
76  public:
77  typedef Number value_type;
80  static constexpr unsigned int dimension = dim;
81  static constexpr unsigned int n_components = n_components_;
82  static constexpr unsigned int n_q_points = Utilities::pow(n_q_points_1d, dim);
83  static constexpr unsigned int tensor_dofs_per_cell = Utilities::pow(fe_degree + 1, dim);
84 
88  __device__ FEEvaluation(int cell_id,
89  const data_type *data,
90  SharedData<dim,Number> *shdata);
91 
100  __device__ void read_dof_values(const Number *src);
101 
108  __device__ void distribute_local_to_global(Number *dst) const;
109 
117  __device__ void evaluate(const bool evaluate_val,
118  const bool evaluate_grad);
119 
127  __device__ void integrate(const bool integrate_val,
128  const bool integrate_grad);
129 
134  __device__ value_type get_value(const unsigned int q_point) const;
135 
142  __device__ void submit_value(const value_type &val_in,
143  const unsigned int q_point);
144 
149  __device__ gradient_type get_gradient(const unsigned int q_point) const;
150 
155  __device__ void submit_gradient(const gradient_type &grad_in,
156  const unsigned int q_point);
157 
161  template <typename functor>
162  __device__ void apply_quad_point_operations(const functor &func);
163 
164  private:
165  unsigned int *local_to_global;
166  unsigned int n_cells;
167  unsigned int padding_length;
168 
169  const unsigned int constraint_mask;
170 
171  Number *inv_jac;
172  Number *JxW;
173 
174  // Internal buffer
175  Number *values;
176  Number *gradients[dim];
177  };
178 
179 
180 
181  template <int dim, int fe_degree, int n_q_points_1d, int n_components_,
182  typename Number>
183  __device__
185  FEEvaluation(int cell_id,
186  const data_type *data,
187  SharedData<dim,Number> *shdata)
188  :
189  n_cells(data->n_cells),
190  padding_length(data->padding_length),
191  constraint_mask(data->constraint_mask[cell_id]),
192  values(shdata->values)
193  {
194  local_to_global = data->local_to_global + padding_length*cell_id;
195  inv_jac = data->inv_jacobian + padding_length*cell_id;
196  JxW = data->JxW + padding_length*cell_id;
197 
198  for (unsigned int i=0; i < dim; ++i)
199  gradients[i] = shdata->gradients[i];
200  }
201 
202 
203 
204  template <int dim, int fe_degree, int n_q_points_1d, int n_components_,
205  typename Number>
206  __device__ void
208  read_dof_values(const Number *src)
209  {
210  static_assert(n_components_ == 1, "This function only supports FE with one \
211  components");
212  const unsigned int idx = (threadIdx.x%n_q_points_1d)
213  +(dim>1 ? threadIdx.y : 0)*n_q_points_1d
214  +(dim>2 ? threadIdx.z : 0)*n_q_points_1d*n_q_points_1d;
215 
216  const unsigned int src_idx = local_to_global[idx];
217  // Use the read-only data cache.
218  values[idx] = __ldg(&src[src_idx]);
219 
220  if (constraint_mask)
221  internal::resolve_hanging_nodes_shmem<dim,fe_degree,false>(values,
222  constraint_mask);
223 
224  __syncthreads();
225  }
226 
227 
228 
229  template <int dim, int fe_degree, int n_q_points_1d, int n_components_,
230  typename Number>
231  __device__ void
233  distribute_local_to_global(Number *dst) const
234  {
235  static_assert(n_components_ == 1, "This function only supports FE with one \
236  components");
237  if (constraint_mask)
238  internal::resolve_hanging_nodes_shmem<dim,fe_degree,true>(values,
239  constraint_mask);
240 
241 
242  const unsigned int idx = (threadIdx.x%n_q_points_1d)
243  + (dim>1 ? threadIdx.y : 0) * n_q_points_1d
244  + (dim>2 ? threadIdx.z : 0) * n_q_points_1d * n_q_points_1d;
245  const unsigned int destination_idx = local_to_global[idx];
246 
247  dst[destination_idx] += values[idx];
248  }
249 
250 
251 
252  template <int dim, int fe_degree, int n_q_points_1d, int n_components_,
253  typename Number>
254  __device__ void
256  evaluate(const bool evaluate_val, const bool evaluate_grad)
257  {
258  // First evaluate the gradients because it requires values that will be
259  // changed if evaluate_val is true
260  internal::EvaluatorTensorProduct<internal::EvaluatorVariant::evaluate_general,
261  dim, fe_degree,n_q_points_1d, Number> evaluator_tensor_product;
262  if (evaluate_grad == true)
263  {
264  evaluator_tensor_product.gradient_at_quad_pts(values, gradients);
265  __syncthreads();
266  }
267 
268  if (evaluate_val == true)
269  {
270  evaluator_tensor_product.value_at_quad_pts(values);
271  __syncthreads();
272  }
273  }
274 
275 
276 
277  template <int dim, int fe_degree, int n_q_points_1d, int n_components_,
278  typename Number>
279  __device__ void
281  integrate(const bool integrate_val, const bool integrate_grad)
282  {
283  internal::EvaluatorTensorProduct<internal::EvaluatorVariant::evaluate_general,
284  dim, fe_degree,n_q_points_1d, Number> evaluator_tensor_product;
285  if (integrate_val == true)
286  {
287  evaluator_tensor_product.integrate_value(values);
288  __syncthreads();
289  if (integrate_grad == true)
290  {
291  evaluator_tensor_product.integrate_gradient<true>(values, gradients);
292  __syncthreads();
293  }
294  }
295  else if (integrate_grad == true)
296  {
297  evaluator_tensor_product.integrate_gradient<false>(values, gradients);
298  __syncthreads();
299  }
300  }
301 
302 
303 
304  template <int dim, int fe_degree, int n_q_points_1d, int n_components_,
305  typename Number>
306  __device__
307  typename FEEvaluation<dim, fe_degree, n_q_points_1d, n_components_, Number>::value_type
309  get_value(const unsigned int q_point) const
310  {
311  return values[q_point];
312  }
313 
314 
315 
316  template <int dim, int fe_degree, int n_q_points_1d, int n_components_,
317  typename Number>
318  __device__ void
320  submit_value(const value_type &val_in, const unsigned int q_point)
321  {
322  values[q_point] = val_in * JxW[q_point];
323  }
324 
325 
326 
327  template <int dim, int fe_degree, int n_q_points_1d, int n_components_,
328  typename Number>
329  __device__
332  get_gradient(const unsigned int q_point) const
333  {
334  static_assert(n_components_ == 1, "This function only supports FE with one \
335  components");
336  // TODO optimize if the mesh is uniform
337  const Number *inv_jacobian = &inv_jac[q_point];
338  gradient_type grad;
339  for (int d_1=0; d_1<dim; ++d_1)
340  {
341  Number tmp = 0.;
342  for (int d_2=0; d_2<dim; ++d_2)
343  tmp += inv_jacobian[padding_length*n_cells*(dim*d_2+d_1)] *
344  gradients[d_2][q_point];
345  grad[d_1] = tmp;
346  }
347 
348  return grad;
349  }
350 
351 
352 
353  template <int dim, int fe_degree, int n_q_points_1d, int n_components_,
354  typename Number>
355  __device__ void
357  submit_gradient(const gradient_type &grad_in, const unsigned int q_point)
358  {
359  // TODO optimize if the mesh is uniform
360  const Number *inv_jacobian = &inv_jac[q_point];
361  for (int d_1=0; d_1<dim; ++d_1)
362  {
363  Number tmp = 0.;
364  for (int d_2=0; d_2<dim; ++d_2)
365  tmp += inv_jacobian[n_cells*padding_length*(dim*d_1+d_2)] *
366  grad_in[d_2];
367  gradients[d_1][q_point] = tmp * JxW[q_point];
368  }
369  }
370 
371 
372 
373  template <int dim, int fe_degree, int n_q_points_1d, int n_components_,
374  typename Number>
375  template <typename functor>
376  __device__ void
378  apply_quad_point_operations(const functor &func)
379  {
380  const unsigned int q_point = (dim == 1 ? threadIdx.x%n_q_points_1d :
381  dim == 2 ? threadIdx.x%n_q_points_1d + n_q_points_1d *threadIdx.y :
382  threadIdx.x%n_q_points_1d + n_q_points_1d * (threadIdx.y +
383  n_q_points_1d*threadIdx.z));
384  func(this, q_point);
385 
386  __syncthreads();
387  }
388 }
389 
390 DEAL_II_NAMESPACE_CLOSE
391 
392 #endif
void submit_value(const value_type &val_in, const unsigned int q_point)
constexpr unsigned int pow(const unsigned int base, const unsigned int iexp)
Definition: utilities.h:354
value_type get_value(const unsigned int q_point) const
gradient_type get_gradient(const unsigned int q_point) const
void evaluate(const bool evaluate_val, const bool evaluate_grad)
FEEvaluation(int cell_id, const data_type *data, SharedData< dim, Number > *shdata)
void apply_quad_point_operations(const functor &func)
void read_dof_values(const Number *src)
void submit_gradient(const gradient_type &grad_in, const unsigned int q_point)
void distribute_local_to_global(Number *dst) const
void integrate(const bool integrate_val, const bool integrate_grad)