21#ifdef DEAL_II_WITH_CUDA
31 template <
typename Number>
38 threadIdx.x + blockIdx.x * blockDim.x;
49 const float *A_val_dev,
50 const int *A_row_ptr_dev,
51 const int *A_column_index_dev,
52 cusparseSpMatDescr_t &sp_descr)
54 cusparseStatus_t error_code = cusparseCreateCsr(
59 reinterpret_cast<void *
>(
const_cast<int *
>(A_row_ptr_dev)),
60 reinterpret_cast<void *
>(
const_cast<int *
>(A_column_index_dev)),
61 reinterpret_cast<void *
>(
const_cast<float *
>(A_val_dev)),
64 CUSPARSE_INDEX_BASE_ZERO,
75 const double *A_val_dev,
76 const int *A_row_ptr_dev,
77 const int *A_column_index_dev,
78 cusparseSpMatDescr_t &sp_descr)
80 cusparseStatus_t error_code = cusparseCreateCsr(
85 reinterpret_cast<void *
>(
const_cast<int *
>(A_row_ptr_dev)),
86 reinterpret_cast<void *
>(
const_cast<int *
>(A_column_index_dev)),
87 reinterpret_cast<void *
>(
const_cast<double *
>(A_val_dev)),
90 CUSPARSE_INDEX_BASE_ZERO,
102 const cusparseSpMatDescr_t sp_descr,
108 float beta = add ? 1. : 0.;
109 cusparseOperation_t cusparse_operation =
110 transpose ? CUSPARSE_OPERATION_TRANSPOSE :
111 CUSPARSE_OPERATION_NON_TRANSPOSE;
114 cusparseDnVecDescr_t x_cuvec;
115 cusparseStatus_t error_code =
116 cusparseCreateDnVec(&x_cuvec,
118 reinterpret_cast<void *
>(
const_cast<float *
>(x)),
122 cusparseDnVecDescr_t y_cuvec;
124 cusparseCreateDnVec(&y_cuvec,
126 reinterpret_cast<void *
>(
const_cast<float *
>(y)),
131 size_t buffer_size = 0;
132 error_code = cusparseSpMV_bufferSize(handle,
140 CUSPARSE_MV_ALG_DEFAULT,
143 void *buffer =
nullptr;
144 cudaError_t cuda_error_code = cudaMalloc(&buffer, buffer_size);
148 error_code = cusparseSpMV(handle,
156 CUSPARSE_MV_ALG_DEFAULT,
160 cuda_error_code = cudaFree(buffer);
162 error_code = cusparseDestroyDnVec(x_cuvec);
164 error_code = cusparseDestroyDnVec(y_cuvec);
175 const cusparseSpMatDescr_t sp_descr,
181 double beta = add ? 1. : 0.;
182 cusparseOperation_t cusparse_operation =
183 transpose ? CUSPARSE_OPERATION_TRANSPOSE :
184 CUSPARSE_OPERATION_NON_TRANSPOSE;
187 cusparseDnVecDescr_t x_cuvec;
188 cusparseStatus_t error_code =
189 cusparseCreateDnVec(&x_cuvec,
191 reinterpret_cast<void *
>(
const_cast<double *
>(x)),
195 cusparseDnVecDescr_t y_cuvec;
197 cusparseCreateDnVec(&y_cuvec,
199 reinterpret_cast<void *
>(
const_cast<double *
>(y)),
204 size_t buffer_size = 0;
205 error_code = cusparseSpMV_bufferSize(handle,
213 CUSPARSE_MV_ALG_DEFAULT,
216 void *buffer =
nullptr;
217 cudaError_t cuda_error_code = cudaMalloc(&buffer, buffer_size);
221 error_code = cusparseSpMV(handle,
229 CUSPARSE_MV_ALG_DEFAULT,
233 cuda_error_code = cudaFree(buffer);
235 error_code = cusparseDestroyDnVec(x_cuvec);
237 error_code = cusparseDestroyDnVec(y_cuvec);
243 template <
typename Number>
246 const Number *val_dev,
247 const int *column_index_dev,
248 const int *row_ptr_dev,
252 threadIdx.x + blockIdx.x * blockDim.x;
256 for (
int j = row_ptr_dev[row]; j < row_ptr_dev[row + 1]; ++j)
257 atomicAdd(&sums[column_index_dev[j]],
std::abs(val_dev[j]));
263 template <
typename Number>
266 const Number *val_dev,
268 const int *row_ptr_dev,
272 threadIdx.x + blockIdx.x * blockDim.x;
276 sums[row] = (Number)0.;
277 for (
int j = row_ptr_dev[row]; j < row_ptr_dev[row + 1]; ++j)
285 template <
typename Number>
289 , val_dev(nullptr,
Utilities::CUDA::delete_device_data<Number>)
290 , column_index_dev(nullptr,
Utilities::CUDA::delete_device_data<
int>)
291 , row_ptr_dev(nullptr,
Utilities::CUDA::delete_device_data<
int>)
298 template <
typename Number>
301 const ::SparseMatrix<Number> &sparse_matrix_host)
302 : val_dev(nullptr,
Utilities::CUDA::delete_device_data<Number>)
303 , column_index_dev(nullptr,
Utilities::CUDA::delete_device_data<
int>)
304 , row_ptr_dev(nullptr,
Utilities::CUDA::delete_device_data<
int>)
308 reinit(handle, sparse_matrix_host);
313 template <
typename Number>
315 : cusparse_handle(other.cusparse_handle)
317 , n_rows(other.n_rows)
318 , n_cols(other.n_cols)
319 , val_dev(
std::move(other.val_dev))
320 , column_index_dev(
std::move(other.column_index_dev))
321 , row_ptr_dev(
std::move(other.row_ptr_dev))
323 , sp_descr(other.sp_descr)
328 other.descr =
nullptr;
329 other.sp_descr =
nullptr;
334 template <
typename Number>
337 if (descr !=
nullptr)
339 const cusparseStatus_t cusparse_error_code =
340 cusparseDestroyMatDescr(descr);
345 if (sp_descr !=
nullptr)
347 const cusparseStatus_t cusparse_error_code =
348 cusparseDestroySpMat(sp_descr);
359 template <
typename Number>
365 n_rows = other.n_rows;
366 n_cols = other.n_cols;
367 val_dev = std::move(other.val_dev);
368 column_index_dev = std::move(other.column_index_dev);
369 row_ptr_dev = std::move(other.row_ptr_dev);
371 sp_descr = other.sp_descr;
376 other.descr =
nullptr;
377 other.sp_descr =
nullptr;
384 template <
typename Number>
388 const ::SparseMatrix<Number> &sparse_matrix_host)
391 nnz = sparse_matrix_host.n_nonzero_elements();
392 n_rows = sparse_matrix_host.m();
393 n_cols = sparse_matrix_host.n();
394 const unsigned int row_ptr_size = n_rows + 1;
395 std::vector<Number> val;
397 std::vector<int> column_index;
398 column_index.reserve(nnz);
399 std::vector<int> row_ptr(row_ptr_size, 0);
403 for (
int row = 0; row < n_rows; ++row)
405 auto p_end = sparse_matrix_host.end(row);
406 unsigned int counter = 0;
407 for (
auto p = sparse_matrix_host.begin(row); p != p_end; ++p)
409 val.emplace_back(p->value());
410 column_index.emplace_back(p->column());
413 row_ptr[row + 1] = row_ptr[row] + counter;
416 const unsigned int offset = row_ptr[row];
417 const int diag_index = column_index[offset];
418 Number diag_elem = sparse_matrix_host.diag_element(row);
419 unsigned int pos = 1;
420 while ((column_index[offset + pos] < row) && (pos < counter))
422 val[offset + pos - 1] = val[offset + pos];
423 column_index[offset + pos - 1] = column_index[offset + pos];
426 val[offset + pos - 1] = diag_elem;
427 column_index[offset + pos - 1] = diag_index;
432 cudaError_t error_code = cudaMemcpy(val_dev.get(),
434 nnz *
sizeof(Number),
435 cudaMemcpyHostToDevice);
441 error_code = cudaMemcpy(column_index_dev.get(),
444 cudaMemcpyHostToDevice);
450 error_code = cudaMemcpy(row_ptr_dev.get(),
452 row_ptr_size *
sizeof(
int),
453 cudaMemcpyHostToDevice);
457 cusparseStatus_t cusparse_error_code = cusparseCreateMatDescr(&descr);
459 cusparse_error_code =
460 cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL);
462 cusparse_error_code =
463 cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO);
472 column_index_dev.get(),
478 template <
typename Number>
483 const int n_blocks = 1 + (nnz - 1) /
block_size;
485 <<<n_blocks,
block_size>>>(val_dev.get(), factor, nnz);
493 template <
typename Number>
499 const int n_blocks = 1 + (nnz - 1) /
block_size;
501 <<<n_blocks,
block_size>>>(val_dev.get(), 1. / factor, nnz);
509 template <
typename Number>
527 template <
typename Number>
545 template <
typename Number>
563 template <
typename Number>
581 template <
typename Number>
594 template <
typename Number>
608 template <
typename Number>
616 dst.
sadd(-1., 1., b);
623 template <
typename Number>
628 const int n_blocks = 1 + (nnz - 1) /
block_size;
632 column_index_dev.get(),
642 template <
typename Number>
647 const int n_blocks = 1 + (nnz - 1) /
block_size;
651 column_index_dev.get(),
661 template <
typename Number>
666 cudaError_t cuda_error = cudaMemcpy(matrix_values.
get_values(),
668 nnz *
sizeof(Number),
669 cudaMemcpyDeviceToDevice);
672 return matrix_values.
l2_norm();
677 template <
typename Number>
678 std::tuple<Number *, int *, int *, cusparseMatDescr_t, cusparseSpMatDescr_t>
681 return std::make_tuple(val_dev.get(),
682 column_index_dev.get(),
void vmult_add(LinearAlgebra::CUDAWrappers::Vector< Number > &dst, const LinearAlgebra::CUDAWrappers::Vector< Number > &src) const
std::tuple< Number *, int *, int *, cusparseMatDescr_t, cusparseSpMatDescr_t > get_cusparse_matrix() const
void Tvmult_add(LinearAlgebra::CUDAWrappers::Vector< Number > &dst, const LinearAlgebra::CUDAWrappers::Vector< Number > &src) const
cusparseHandle_t cusparse_handle
void Tvmult(LinearAlgebra::CUDAWrappers::Vector< Number > &dst, const LinearAlgebra::CUDAWrappers::Vector< Number > &src) const
void reinit(Utilities::CUDA::Handle &handle, const ::SparseMatrix< Number > &sparse_matrix_host)
Number frobenius_norm() const
Number matrix_norm_square(const LinearAlgebra::CUDAWrappers::Vector< Number > &v) const
SparseMatrix & operator*=(const Number factor)
Number residual(LinearAlgebra::CUDAWrappers::Vector< Number > &dst, const LinearAlgebra::CUDAWrappers::Vector< Number > &x, const LinearAlgebra::CUDAWrappers::Vector< Number > &b) const
SparseMatrix & operator/=(const Number factor)
SparseMatrix & operator=(CUDAWrappers::SparseMatrix< Number > &&)
void vmult(LinearAlgebra::CUDAWrappers::Vector< Number > &dst, const LinearAlgebra::CUDAWrappers::Vector< Number > &src) const
Number linfty_norm() const
Number matrix_scalar_product(const LinearAlgebra::CUDAWrappers::Vector< Number > &u, const LinearAlgebra::CUDAWrappers::Vector< Number > &v) const
real_type linfty_norm() const
void sadd(const Number s, const Number a, const Vector< Number > &V)
real_type l2_norm() const
Number * get_values() const
#define DEAL_II_NAMESPACE_OPEN
#define DEAL_II_NAMESPACE_CLOSE
#define AssertCusparse(error_code)
#define AssertCudaKernel()
static ::ExceptionBase & ExcZero()
#define Assert(cond, exc)
#define AssertIsFinite(number)
#define AssertNothrowCusparse(error_code)
#define AssertCuda(error_code)
void create_sp_mat_descr(int m, int n, int nnz, const float *A_val_dev, const int *A_row_ptr_dev, const int *A_column_index_dev, cusparseSpMatDescr_t &sp_descr)
__global__ void linfty_norm(const typename SparseMatrix< Number >::size_type n_rows, const Number *val_dev, const int *, const int *row_ptr_dev, Number *sums)
__global__ void l1_norm(const typename SparseMatrix< Number >::size_type n_rows, const Number *val_dev, const int *column_index_dev, const int *row_ptr_dev, Number *sums)
__global__ void scale(Number *val, const Number a, const typename SparseMatrix< Number >::size_type N)
void csrmv(cusparseHandle_t handle, bool transpose, int m, int n, const cusparseSpMatDescr_t sp_descr, const float *x, bool add, float *y)
Number * allocate_device_data(const std::size_t size)
::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)
cusparseHandle_t cusparse_handle
Number linfty_norm(const Tensor< 2, dim, Number > &t)
Number l1_norm(const Tensor< 2, dim, Number > &t)