Reference documentation for deal.II version GIT relicensing-464-g14f7274e4d 2024-04-22 15:40:02+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
cuda_sparse_matrix.cc
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2018 - 2023 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
17
20
21#ifdef DEAL_II_WITH_CUDA
22
23# include <cusparse.h>
24
26
27namespace CUDAWrappers
28{
29 namespace internal
30 {
31 template <typename Number>
32 __global__ void
33 scale(Number *val,
34 const Number a,
35 const typename SparseMatrix<Number>::size_type N)
36 {
37 const typename SparseMatrix<Number>::size_type idx =
38 threadIdx.x + blockIdx.x * blockDim.x;
39 if (idx < N)
40 val[idx] *= a;
41 }
42
43
44
45 void
47 int n,
48 int nnz,
49 const float *A_val_dev,
50 const int *A_row_ptr_dev,
51 const int *A_column_index_dev,
52 cusparseSpMatDescr_t &sp_descr)
53 {
54 cusparseStatus_t error_code = cusparseCreateCsr(
55 &sp_descr,
56 m,
57 n,
58 nnz,
59 reinterpret_cast<void *>(const_cast<int *>(A_row_ptr_dev)),
60 reinterpret_cast<void *>(const_cast<int *>(A_column_index_dev)),
61 reinterpret_cast<void *>(const_cast<float *>(A_val_dev)),
62 CUSPARSE_INDEX_32I,
63 CUSPARSE_INDEX_32I,
64 CUSPARSE_INDEX_BASE_ZERO,
65 CUDA_R_32F);
66 AssertCusparse(error_code);
67 }
68
69
70
71 void
73 int n,
74 int nnz,
75 const double *A_val_dev,
76 const int *A_row_ptr_dev,
77 const int *A_column_index_dev,
78 cusparseSpMatDescr_t &sp_descr)
79 {
80 cusparseStatus_t error_code = cusparseCreateCsr(
81 &sp_descr,
82 m,
83 n,
84 nnz,
85 reinterpret_cast<void *>(const_cast<int *>(A_row_ptr_dev)),
86 reinterpret_cast<void *>(const_cast<int *>(A_column_index_dev)),
87 reinterpret_cast<void *>(const_cast<double *>(A_val_dev)),
88 CUSPARSE_INDEX_32I,
89 CUSPARSE_INDEX_32I,
90 CUSPARSE_INDEX_BASE_ZERO,
91 CUDA_R_64F);
92 AssertCusparse(error_code);
93 }
94
95
96
97 void
98 csrmv(cusparseHandle_t handle,
99 bool transpose,
100 int m,
101 int n,
102 const cusparseSpMatDescr_t sp_descr,
103 const float *x,
104 bool add,
105 float *y)
106 {
107 float alpha = 1.;
108 float beta = add ? 1. : 0.;
109 cusparseOperation_t cusparse_operation =
110 transpose ? CUSPARSE_OPERATION_TRANSPOSE :
111 CUSPARSE_OPERATION_NON_TRANSPOSE;
112
113 // Move the data to cuSPARSE data type
114 cusparseDnVecDescr_t x_cuvec;
115 cusparseStatus_t error_code =
116 cusparseCreateDnVec(&x_cuvec,
117 n,
118 reinterpret_cast<void *>(const_cast<float *>(x)),
119 CUDA_R_32F);
120 AssertCusparse(error_code);
121
122 cusparseDnVecDescr_t y_cuvec;
123 error_code =
124 cusparseCreateDnVec(&y_cuvec,
125 m,
126 reinterpret_cast<void *>(const_cast<float *>(y)),
127 CUDA_R_32F);
128 AssertCusparse(error_code);
129
130 // This function performs y = alpha*op(A)*x + beta*y
131 size_t buffer_size = 0;
132 error_code = cusparseSpMV_bufferSize(handle,
133 cusparse_operation,
134 &alpha,
135 sp_descr,
136 x_cuvec,
137 &beta,
138 y_cuvec,
139 CUDA_R_32F,
140 CUSPARSE_MV_ALG_DEFAULT,
141 &buffer_size);
142
143 void *buffer = nullptr;
144 cudaError_t cuda_error_code = cudaMalloc(&buffer, buffer_size);
145 AssertCuda(cuda_error_code);
146
147 // execute SpMV
148 error_code = cusparseSpMV(handle,
149 cusparse_operation,
150 &alpha,
151 sp_descr,
152 x_cuvec,
153 &beta,
154 y_cuvec,
155 CUDA_R_32F,
156 CUSPARSE_MV_ALG_DEFAULT,
157 buffer);
158 AssertCusparse(error_code);
159
160 cuda_error_code = cudaFree(buffer);
161 AssertCuda(cuda_error_code);
162 error_code = cusparseDestroyDnVec(x_cuvec);
163 AssertCusparse(error_code);
164 error_code = cusparseDestroyDnVec(y_cuvec);
165 AssertCusparse(error_code);
166 }
167
168
169
170 void
171 csrmv(cusparseHandle_t handle,
172 bool transpose,
173 int m,
174 int n,
175 const cusparseSpMatDescr_t sp_descr,
176 const double *x,
177 bool add,
178 double *y)
179 {
180 double alpha = 1.;
181 double beta = add ? 1. : 0.;
182 cusparseOperation_t cusparse_operation =
183 transpose ? CUSPARSE_OPERATION_TRANSPOSE :
184 CUSPARSE_OPERATION_NON_TRANSPOSE;
185
186 // Move the data to cuSPARSE data type
187 cusparseDnVecDescr_t x_cuvec;
188 cusparseStatus_t error_code =
189 cusparseCreateDnVec(&x_cuvec,
190 n,
191 reinterpret_cast<void *>(const_cast<double *>(x)),
192 CUDA_R_64F);
193 AssertCusparse(error_code);
194
195 cusparseDnVecDescr_t y_cuvec;
196 error_code =
197 cusparseCreateDnVec(&y_cuvec,
198 m,
199 reinterpret_cast<void *>(const_cast<double *>(y)),
200 CUDA_R_64F);
201 AssertCusparse(error_code);
202
203 // This function performs y = alpha*op(A)*x + beta*y
204 size_t buffer_size = 0;
205 error_code = cusparseSpMV_bufferSize(handle,
206 cusparse_operation,
207 &alpha,
208 sp_descr,
209 x_cuvec,
210 &beta,
211 y_cuvec,
212 CUDA_R_64F,
213 CUSPARSE_MV_ALG_DEFAULT,
214 &buffer_size);
215
216 void *buffer = nullptr;
217 cudaError_t cuda_error_code = cudaMalloc(&buffer, buffer_size);
218 AssertCuda(cuda_error_code);
219
220 // execute SpMV
221 error_code = cusparseSpMV(handle,
222 cusparse_operation,
223 &alpha,
224 sp_descr,
225 x_cuvec,
226 &beta,
227 y_cuvec,
228 CUDA_R_64F,
229 CUSPARSE_MV_ALG_DEFAULT,
230 buffer);
231 AssertCusparse(error_code);
232
233 cuda_error_code = cudaFree(buffer);
234 AssertCuda(cuda_error_code);
235 error_code = cusparseDestroyDnVec(x_cuvec);
236 AssertCusparse(error_code);
237 error_code = cusparseDestroyDnVec(y_cuvec);
238 AssertCusparse(error_code);
239 }
240
241
242
243 template <typename Number>
244 __global__ void
246 const Number *val_dev,
247 const int *column_index_dev,
248 const int *row_ptr_dev,
249 Number *sums)
250 {
251 const typename SparseMatrix<Number>::size_type row =
252 threadIdx.x + blockIdx.x * blockDim.x;
253
254 if (row < n_rows)
255 {
256 for (int j = row_ptr_dev[row]; j < row_ptr_dev[row + 1]; ++j)
257 atomicAdd(&sums[column_index_dev[j]], std::abs(val_dev[j]));
258 }
259 }
260
261
262
263 template <typename Number>
264 __global__ void
266 const Number *val_dev,
267 const int * /*column_index_dev*/,
268 const int *row_ptr_dev,
269 Number *sums)
270 {
271 const typename SparseMatrix<Number>::size_type row =
272 threadIdx.x + blockIdx.x * blockDim.x;
273
274 if (row < n_rows)
275 {
276 sums[row] = (Number)0.;
277 for (int j = row_ptr_dev[row]; j < row_ptr_dev[row + 1]; ++j)
278 sums[row] += std::abs(val_dev[j]);
279 }
280 }
281 } // namespace internal
282
283
284
285 template <typename Number>
287 : nnz(0)
288 , n_rows(0)
289 , val_dev(nullptr, Utilities::CUDA::delete_device_data<Number>)
290 , column_index_dev(nullptr, Utilities::CUDA::delete_device_data<int>)
291 , row_ptr_dev(nullptr, Utilities::CUDA::delete_device_data<int>)
292 , descr(nullptr)
293 , sp_descr(nullptr)
294 {}
295
296
297
298 template <typename Number>
301 const ::SparseMatrix<Number> &sparse_matrix_host)
302 : val_dev(nullptr, Utilities::CUDA::delete_device_data<Number>)
303 , column_index_dev(nullptr, Utilities::CUDA::delete_device_data<int>)
304 , row_ptr_dev(nullptr, Utilities::CUDA::delete_device_data<int>)
305 , descr(nullptr)
306 , sp_descr(nullptr)
307 {
308 reinit(handle, sparse_matrix_host);
309 }
310
311
312
313 template <typename Number>
315 : cusparse_handle(other.cusparse_handle)
316 , nnz(other.nnz)
317 , n_rows(other.n_rows)
318 , n_cols(other.n_cols)
319 , val_dev(std::move(other.val_dev))
320 , column_index_dev(std::move(other.column_index_dev))
321 , row_ptr_dev(std::move(other.row_ptr_dev))
322 , descr(other.descr)
323 , sp_descr(other.sp_descr)
324 {
325 other.nnz = 0;
326 other.n_rows = 0;
327 other.n_cols = 0;
328 other.descr = nullptr;
329 other.sp_descr = nullptr;
330 }
331
332
333
334 template <typename Number>
336 {
337 if (descr != nullptr)
338 {
339 const cusparseStatus_t cusparse_error_code =
340 cusparseDestroyMatDescr(descr);
341 AssertNothrowCusparse(cusparse_error_code);
342 descr = nullptr;
343 }
344
345 if (sp_descr != nullptr)
346 {
347 const cusparseStatus_t cusparse_error_code =
348 cusparseDestroySpMat(sp_descr);
349 AssertNothrowCusparse(cusparse_error_code);
350 sp_descr = nullptr;
351 }
352
353 nnz = 0;
354 n_rows = 0;
355 }
356
357
358
359 template <typename Number>
362 {
363 cusparse_handle = other.cusparse_handle;
364 nnz = other.nnz;
365 n_rows = other.n_rows;
366 n_cols = other.n_cols;
367 val_dev = std::move(other.val_dev);
368 column_index_dev = std::move(other.column_index_dev);
369 row_ptr_dev = std::move(other.row_ptr_dev);
370 descr = other.descr;
371 sp_descr = other.sp_descr;
372
373 other.nnz = 0;
374 other.n_rows = 0;
375 other.n_cols = 0;
376 other.descr = nullptr;
377 other.sp_descr = nullptr;
378
379 return *this;
380 }
381
382
383
384 template <typename Number>
385 void
388 const ::SparseMatrix<Number> &sparse_matrix_host)
389 {
390 cusparse_handle = handle.cusparse_handle;
391 nnz = sparse_matrix_host.n_nonzero_elements();
392 n_rows = sparse_matrix_host.m();
393 n_cols = sparse_matrix_host.n();
394 const unsigned int row_ptr_size = n_rows + 1;
395 std::vector<Number> val;
396 val.reserve(nnz);
397 std::vector<int> column_index;
398 column_index.reserve(nnz);
399 std::vector<int> row_ptr(row_ptr_size, 0);
400
401 // ::SparseMatrix stores the diagonal first in each row so we need to
402 // do some reordering
403 for (int row = 0; row < n_rows; ++row)
404 {
405 auto p_end = sparse_matrix_host.end(row);
406 unsigned int counter = 0;
407 for (auto p = sparse_matrix_host.begin(row); p != p_end; ++p)
408 {
409 val.emplace_back(p->value());
410 column_index.emplace_back(p->column());
411 ++counter;
412 }
413 row_ptr[row + 1] = row_ptr[row] + counter;
414
415 // Sort the elements in the row
416 const unsigned int offset = row_ptr[row];
417 const int diag_index = column_index[offset];
418 Number diag_elem = sparse_matrix_host.diag_element(row);
419 unsigned int pos = 1;
420 while ((column_index[offset + pos] < row) && (pos < counter))
421 {
422 val[offset + pos - 1] = val[offset + pos];
423 column_index[offset + pos - 1] = column_index[offset + pos];
424 ++pos;
425 }
426 val[offset + pos - 1] = diag_elem;
427 column_index[offset + pos - 1] = diag_index;
428 }
429
430 // Copy the elements to the gpu
431 val_dev.reset(Utilities::CUDA::allocate_device_data<Number>(nnz));
432 cudaError_t error_code = cudaMemcpy(val_dev.get(),
433 val.data(),
434 nnz * sizeof(Number),
435 cudaMemcpyHostToDevice);
436 AssertCuda(error_code);
437
438 // Copy the column indices to the gpu
439 column_index_dev.reset(Utilities::CUDA::allocate_device_data<int>(nnz));
440 AssertCuda(error_code);
441 error_code = cudaMemcpy(column_index_dev.get(),
442 column_index.data(),
443 nnz * sizeof(int),
444 cudaMemcpyHostToDevice);
445 AssertCuda(error_code);
446
447 // Copy the row pointer to the gpu
448 row_ptr_dev.reset(Utilities::CUDA::allocate_device_data<int>(row_ptr_size));
449 AssertCuda(error_code);
450 error_code = cudaMemcpy(row_ptr_dev.get(),
451 row_ptr.data(),
452 row_ptr_size * sizeof(int),
453 cudaMemcpyHostToDevice);
454 AssertCuda(error_code);
455
456 // Create the matrix descriptor
457 cusparseStatus_t cusparse_error_code = cusparseCreateMatDescr(&descr);
458 AssertCusparse(cusparse_error_code);
459 cusparse_error_code =
460 cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL);
461 AssertCusparse(cusparse_error_code);
462 cusparse_error_code =
463 cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO);
464 AssertCusparse(cusparse_error_code);
465
466 // Create the sparse matrix descriptor
468 n_cols,
469 nnz,
470 val_dev.get(),
471 row_ptr_dev.get(),
472 column_index_dev.get(),
473 sp_descr);
474 }
475
476
477
478 template <typename Number>
481 {
482 AssertIsFinite(factor);
483 const int n_blocks = 1 + (nnz - 1) / block_size;
484 internal::scale<Number>
485 <<<n_blocks, block_size>>>(val_dev.get(), factor, nnz);
487
488 return *this;
489 }
490
491
492
493 template <typename Number>
496 {
497 AssertIsFinite(factor);
498 Assert(factor != Number(0.), ExcZero());
499 const int n_blocks = 1 + (nnz - 1) / block_size;
500 internal::scale<Number>
501 <<<n_blocks, block_size>>>(val_dev.get(), 1. / factor, nnz);
503
504 return *this;
505 }
506
507
508
509 template <typename Number>
510 void
514 {
515 internal::csrmv(cusparse_handle,
516 false,
517 n_rows,
518 n_cols,
519 sp_descr,
520 src.get_values(),
521 false,
522 dst.get_values());
523 }
524
525
526
527 template <typename Number>
528 void
532 {
533 internal::csrmv(cusparse_handle,
534 true,
535 n_rows,
536 n_cols,
537 sp_descr,
538 src.get_values(),
539 false,
540 dst.get_values());
541 }
542
543
544
545 template <typename Number>
546 void
550 {
551 internal::csrmv(cusparse_handle,
552 false,
553 n_rows,
554 n_cols,
555 sp_descr,
556 src.get_values(),
557 true,
558 dst.get_values());
559 }
560
561
562
563 template <typename Number>
564 void
568 {
569 internal::csrmv(cusparse_handle,
570 true,
571 n_rows,
572 n_cols,
573 sp_descr,
574 src.get_values(),
575 true,
576 dst.get_values());
577 }
578
579
580
581 template <typename Number>
582 Number
585 {
587 vmult(tmp, v);
588
589 return v * tmp;
590 }
591
592
593
594 template <typename Number>
595 Number
605
606
607
608 template <typename Number>
609 Number
614 {
615 vmult(dst, x);
616 dst.sadd(-1., 1., b);
617
618 return dst.l2_norm();
619 }
620
621
622
623 template <typename Number>
624 Number
626 {
628 const int n_blocks = 1 + (nnz - 1) / block_size;
629 internal::l1_norm<Number>
630 <<<n_blocks, block_size>>>(n_rows,
631 val_dev.get(),
632 column_index_dev.get(),
633 row_ptr_dev.get(),
634 column_sums.get_values());
636
637 return column_sums.linfty_norm();
638 }
639
640
641
642 template <typename Number>
643 Number
645 {
647 const int n_blocks = 1 + (nnz - 1) / block_size;
648 internal::linfty_norm<Number>
649 <<<n_blocks, block_size>>>(n_rows,
650 val_dev.get(),
651 column_index_dev.get(),
652 row_ptr_dev.get(),
653 row_sums.get_values());
655
656 return row_sums.linfty_norm();
657 }
658
659
660
661 template <typename Number>
662 Number
664 {
666 cudaError_t cuda_error = cudaMemcpy(matrix_values.get_values(),
667 val_dev.get(),
668 nnz * sizeof(Number),
669 cudaMemcpyDeviceToDevice);
670 AssertCuda(cuda_error);
671
672 return matrix_values.l2_norm();
673 }
674
675
676
677 template <typename Number>
678 std::tuple<Number *, int *, int *, cusparseMatDescr_t, cusparseSpMatDescr_t>
680 {
681 return std::make_tuple(val_dev.get(),
682 column_index_dev.get(),
683 row_ptr_dev.get(),
684 descr,
685 sp_descr);
686 }
687
688
689
690 template class SparseMatrix<float>;
691 template class SparseMatrix<double>;
692} // namespace CUDAWrappers
694
695#endif
void vmult_add(LinearAlgebra::CUDAWrappers::Vector< Number > &dst, const LinearAlgebra::CUDAWrappers::Vector< Number > &src) const
std::tuple< Number *, int *, int *, cusparseMatDescr_t, cusparseSpMatDescr_t > get_cusparse_matrix() const
void Tvmult_add(LinearAlgebra::CUDAWrappers::Vector< Number > &dst, const LinearAlgebra::CUDAWrappers::Vector< Number > &src) const
void Tvmult(LinearAlgebra::CUDAWrappers::Vector< Number > &dst, const LinearAlgebra::CUDAWrappers::Vector< Number > &src) const
void reinit(Utilities::CUDA::Handle &handle, const ::SparseMatrix< Number > &sparse_matrix_host)
Number matrix_norm_square(const LinearAlgebra::CUDAWrappers::Vector< Number > &v) const
SparseMatrix & operator*=(const Number factor)
Number residual(LinearAlgebra::CUDAWrappers::Vector< Number > &dst, const LinearAlgebra::CUDAWrappers::Vector< Number > &x, const LinearAlgebra::CUDAWrappers::Vector< Number > &b) const
SparseMatrix & operator/=(const Number factor)
SparseMatrix & operator=(CUDAWrappers::SparseMatrix< Number > &&)
void vmult(LinearAlgebra::CUDAWrappers::Vector< Number > &dst, const LinearAlgebra::CUDAWrappers::Vector< Number > &src) const
Number matrix_scalar_product(const LinearAlgebra::CUDAWrappers::Vector< Number > &u, const LinearAlgebra::CUDAWrappers::Vector< Number > &v) const
void sadd(const Number s, const Number a, const Vector< Number > &V)
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:502
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:503
DerivativeForm< 1, spacedim, dim, Number > transpose(const DerivativeForm< 1, dim, spacedim, Number > &DF)
#define AssertCusparse(error_code)
#define AssertCudaKernel()
static ::ExceptionBase & ExcZero()
#define Assert(cond, exc)
#define AssertIsFinite(number)
#define AssertNothrowCusparse(error_code)
#define AssertCuda(error_code)
void create_sp_mat_descr(int m, int n, int nnz, const float *A_val_dev, const int *A_row_ptr_dev, const int *A_column_index_dev, cusparseSpMatDescr_t &sp_descr)
void csrmv(cusparseHandle_t handle, bool transpose, int m, int n, const cusparseSpMatDescr_t sp_descr, const float *x, bool add, float *y)
constexpr int block_size
Definition cuda_size.h:28
STL namespace.
::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)
cusparseHandle_t cusparse_handle
Definition cuda.h:76
Number linfty_norm(const Tensor< 2, dim, Number > &t)
Definition tensor.h:3066
Number l1_norm(const Tensor< 2, dim, Number > &t)
Definition tensor.h:3040