18 #ifdef DEAL_II_WITH_PETSC
39 const int m = 0,
n = 0, n_nonzero_per_row = 0;
40 const PetscErrorCode ierr = MatCreateSeqAIJ(
41 PETSC_COMM_SELF,
m,
n, n_nonzero_per_row,
nullptr, &
matrix);
56 template <
typename SparsityPatternType>
58 const MPI_Comm & communicator,
59 const SparsityPatternType & sparsity_pattern,
60 const std::vector<size_type> &local_rows_per_process,
61 const std::vector<size_type> &local_columns_per_process,
62 const unsigned int this_process,
63 const bool preset_nonzero_locations)
67 local_rows_per_process,
68 local_columns_per_process,
70 preset_nonzero_locations);
84 ierr = MatDuplicate(other.
matrix, MAT_DO_NOT_COPY_VALUES, &
matrix);
88 template <
typename SparsityPatternType>
93 const IndexSet & local_active_columns,
94 const SparsityPatternType &sparsity_pattern,
95 const MPI_Comm & communicator)
105 local_active_columns,
123 const PetscErrorCode ierr =
130 template <
typename SparsityPatternType>
133 const MPI_Comm & communicator,
134 const SparsityPatternType & sparsity_pattern,
135 const std::vector<size_type> &local_rows_per_process,
136 const std::vector<size_type> &local_columns_per_process,
137 const unsigned int this_process,
138 const bool preset_nonzero_locations)
147 local_rows_per_process,
148 local_columns_per_process,
150 preset_nonzero_locations);
155 template <
typename SparsityPatternType>
158 const SparsityPatternType &sparsity_pattern,
159 const MPI_Comm & communicator)
161 do_reinit(communicator, local_rows, local_rows, sparsity_pattern);
164 template <
typename SparsityPatternType>
168 const SparsityPatternType &sparsity_pattern,
169 const MPI_Comm & communicator)
175 do_reinit(communicator, local_rows, local_columns, sparsity_pattern);
180 template <
typename SparsityPatternType>
185 const SparsityPatternType &sparsity_pattern)
187 Assert(sparsity_pattern.n_rows() == local_rows.
size(),
189 "SparsityPattern and IndexSet have different number of rows"));
191 sparsity_pattern.n_cols() == local_columns.
size(),
193 "SparsityPattern and IndexSet have different number of columns"));
195 ExcMessage(
"PETSc only supports contiguous row/column ranges"));
206 Assert(row_owners == sparsity_pattern.n_rows(),
209 "Each row has to be owned by exactly one owner (n_rows()=") +
211 " but sum(local_rows.n_elements())=" +
214 col_owners == sparsity_pattern.n_cols(),
217 "Each column has to be owned by exactly one owner (n_cols()=") +
219 " but sum(local_columns.n_elements())=" +
227 PetscErrorCode ierr = MatCreate(communicator, &
matrix);
230 ierr = MatSetSizes(
matrix,
233 sparsity_pattern.n_rows(),
234 sparsity_pattern.n_cols());
240 ierr = MatSetType(
matrix, MATAIJ);
264 const PetscInt local_row_end =
274 std::vector<PetscInt>
276 rowstart_in_window(local_row_end - local_row_start + 1, 0),
279 unsigned int n_cols = 0;
280 for (PetscInt i = local_row_start; i < local_row_end; ++i)
282 const PetscInt
row_length = sparsity_pattern.row_length(i);
283 rowstart_in_window[i + 1 - local_row_start] =
284 rowstart_in_window[i - local_row_start] +
row_length;
287 colnums_in_window.resize(n_cols + 1, -1);
293 PetscInt *ptr = colnums_in_window.data();
294 for (PetscInt i = local_row_start; i < local_row_end; ++i)
295 for (
typename SparsityPatternType::iterator p =
296 sparsity_pattern.begin(i);
297 p != sparsity_pattern.end(i);
310 ierr = MatMPIAIJSetPreallocationCSR(
matrix,
311 rowstart_in_window.data(),
312 colnums_in_window.data(),
314 ierr = MatSeqAIJSetPreallocationCSR(
matrix,
315 rowstart_in_window.data(),
316 colnums_in_window.data(),
324 ierr = MatSeqAIJSetPreallocationCSR(
matrix, &i, &i,
nullptr);
326 ierr = MatMPIAIJSetPreallocationCSR(
matrix, &i, &i,
nullptr);
338 template <
typename SparsityPatternType>
341 const MPI_Comm & communicator,
342 const SparsityPatternType & sparsity_pattern,
343 const std::vector<size_type> &local_rows_per_process,
344 const std::vector<size_type> &local_columns_per_process,
345 const unsigned int this_process,
346 const bool preset_nonzero_locations)
348 Assert(local_rows_per_process.size() == local_columns_per_process.size(),
350 local_columns_per_process.size()));
362 for (
unsigned int p = 0; p < this_process; ++p)
363 local_row_start += local_rows_per_process[p];
365 local_row_start + local_rows_per_process[this_process];
370 PetscErrorCode ierr = MatCreate(communicator, &
matrix);
373 ierr = MatSetSizes(
matrix,
374 local_rows_per_process[this_process],
375 local_columns_per_process[this_process],
376 sparsity_pattern.n_rows(),
377 sparsity_pattern.n_cols());
383 ierr = MatSetType(
matrix, MATAIJ);
399 if (preset_nonzero_locations ==
true)
412 std::vector<PetscInt>
414 rowstart_in_window(local_row_end - local_row_start + 1, 0),
418 for (
size_type i = local_row_start; i < local_row_end; ++i)
421 rowstart_in_window[i + 1 - local_row_start] =
422 rowstart_in_window[i - local_row_start] +
row_length;
425 colnums_in_window.resize(n_cols + 1, -1);
431 PetscInt *ptr = colnums_in_window.data();
432 for (
size_type i = local_row_start; i < local_row_end; ++i)
433 for (
typename SparsityPatternType::iterator p =
434 sparsity_pattern.begin(i);
435 p != sparsity_pattern.end(i);
448 ierr = MatSeqAIJSetPreallocationCSR(
matrix,
449 rowstart_in_window.data(),
450 colnums_in_window.data(),
452 ierr = MatMPIAIJSetPreallocationCSR(
matrix,
453 rowstart_in_window.data(),
454 colnums_in_window.data(),
464 template <
typename SparsityPatternType>
470 const IndexSet & local_active_columns,
471 const SparsityPatternType &sparsity_pattern)
473 # if DEAL_II_PETSC_VERSION_GTE(3, 10, 0)
474 Assert(sparsity_pattern.n_rows() == local_rows.
size(),
476 "SparsityPattern and IndexSet have different number of rows."));
478 sparsity_pattern.n_cols() == local_columns.
size(),
480 "SparsityPattern and IndexSet have different number of columns"));
482 ExcMessage(
"PETSc only supports contiguous row/column ranges"));
493 Assert(row_owners == sparsity_pattern.n_rows(),
496 "Each row has to be owned by exactly one owner (n_rows()=") +
498 " but sum(local_rows.n_elements())=" +
501 col_owners == sparsity_pattern.n_cols(),
504 "Each column has to be owned by exactly one owner (n_cols()=") +
506 " but sum(local_columns.n_elements())=" +
516 std::vector<PetscInt> idx_glob_row(n_local_active_rows);
517 std::vector<PetscInt> idx_glob_col(n_local_active_cols);
528 IS is_glob_row, is_glob_col;
530 ISLocalToGlobalMapping l2gmap_row;
531 ierr = ISCreateGeneral(communicator,
537 ierr = ISLocalToGlobalMappingCreateIS(is_glob_row, &l2gmap_row);
539 ierr = ISDestroy(&is_glob_row);
542 ISLocalToGlobalMappingViewFromOptions(l2gmap_row,
nullptr,
"-view_map");
546 ISLocalToGlobalMapping l2gmap_col;
547 ierr = ISCreateGeneral(communicator,
553 ierr = ISLocalToGlobalMappingCreateIS(is_glob_col, &l2gmap_col);
555 ierr = ISDestroy(&is_glob_col);
558 ISLocalToGlobalMappingViewFromOptions(l2gmap_col,
nullptr,
"-view_map");
562 ierr = MatCreateIS(communicator,
566 sparsity_pattern.n_rows(),
567 sparsity_pattern.n_cols(),
572 ierr = ISLocalToGlobalMappingDestroy(&l2gmap_row);
574 ierr = ISLocalToGlobalMappingDestroy(&l2gmap_col);
591 ierr = MatISGetLocalMat(
matrix, &local_matrix);
593 ierr = MatSetType(local_matrix,
602 const PetscInt local_row_start = 0;
603 const PetscInt local_row_end = local_active_rows.
n_elements();
608 std::vector<PetscInt> rowstart_in_window(local_row_end -
614 unsigned int n_cols = 0;
616 for (PetscInt i = local_row_start; i < local_row_end; ++i)
621 rowstart_in_window[i + 1 - local_row_start] =
622 rowstart_in_window[i - local_row_start] +
row_length;
625 colnums_in_window.resize(n_cols + 1, -1);
632 std::map<unsigned int, unsigned int> loc_act_cols_inv;
633 for (
unsigned int i = 0; i < local_active_columns.
n_elements(); ++i)
639 PetscInt *ptr = colnums_in_window.data();
640 for (PetscInt i = local_row_start; i < local_row_end; ++i)
643 for (
typename SparsityPatternType::iterator p =
647 *ptr = loc_act_cols_inv[p->column()];
654 ierr = MatSeqAIJSetPreallocationCSR(local_matrix,
655 rowstart_in_window.data(),
656 colnums_in_window.data(),
663 ierr = MatSeqAIJSetPreallocationCSR(local_matrix, &i, &i,
nullptr);
672 ierr = MatISRestoreLocalMat(
matrix, &local_matrix);
678 (void)local_active_rows;
680 (void)local_active_columns;
681 (void)sparsity_pattern;
684 "BDDC preconditioner requires PETSc 3.10.0 or newer"));
694 const std::vector<size_type> &,
695 const std::vector<size_type> &,
700 const std::vector<size_type> &,
701 const std::vector<size_type> &,
708 const std::vector<size_type> &,
709 const std::vector<size_type> &,
715 const std::vector<size_type> &,
716 const std::vector<size_type> &,
745 const std::vector<size_type> &,
746 const std::vector<size_type> &,
752 const std::vector<size_type> &,
753 const std::vector<size_type> &,
822 PetscInt n_rows, n_cols, n_loc_rows, n_loc_cols,
min,
max;
825 ierr = MatGetSize(
matrix, &n_rows, &n_cols);
828 ierr = MatGetLocalSize(
matrix, &n_loc_rows, &n_loc_cols);
836 "PETSc is requiring non contiguous memory allocation."));
848 PetscInt n_rows, n_cols, n_loc_rows, n_loc_cols,
min,
max;
851 ierr = MatGetSize(
matrix, &n_rows, &n_cols);
854 ierr = MatGetLocalSize(
matrix, &n_loc_rows, &n_loc_cols);
862 "PETSc is requiring non contiguous memory allocation."));
bool is_contiguous() const
size_type n_elements() const
void add_range(const size_type begin, const size_type end)
size_type nth_index_in_set(const size_type local_index) const
bool is_ascending_and_one_to_one(const MPI_Comm &communicator) const
types::global_dof_index size_type
SparseMatrix & operator=(const value_type d)
void copy_from(const SparseMatrix &other)
void mmult(SparseMatrix &C, const SparseMatrix &B, const MPI::Vector &V=MPI::Vector()) const
void reinit(const MPI_Comm &communicator, const SparsityPatternType &sparsity_pattern, const std::vector< size_type > &local_rows_per_process, const std::vector< size_type > &local_columns_per_process, const unsigned int this_process, const bool preset_nonzero_locations=true)
PetscScalar matrix_scalar_product(const Vector &u, const Vector &v) const
void Tmmult(SparseMatrix &C, const SparseMatrix &B, const MPI::Vector &V=MPI::Vector()) const
PetscScalar matrix_norm_square(const Vector &v) const
IndexSet locally_owned_range_indices() const
IndexSet locally_owned_domain_indices() const
void do_reinit(const MPI_Comm &comm, const SparsityPatternType &sparsity_pattern, const std::vector< size_type > &local_rows_per_process, const std::vector< size_type > &local_columns_per_process, const unsigned int this_process, const bool preset_nonzero_locations)
size_type row_length(const size_type row) const
void vmult(VectorBase &dst, const VectorBase &src) const
void mmult(MatrixBase &C, const MatrixBase &B, const VectorBase &V) const
types::global_dof_index size_type
MatrixBase & operator=(const MatrixBase &)=delete
void assert_is_compressed()
void Tmmult(MatrixBase &C, const MatrixBase &B, const VectorBase &V) const
void compress(const VectorOperation::values operation)
#define DEAL_II_NAMESPACE_OPEN
#define DEAL_II_NAMESPACE_CLOSE
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
#define Assert(cond, exc)
static ::ExceptionBase & ExcNotImplemented()
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
void set_keep_zero_rows(Mat &matrix)
PetscErrorCode destroy_matrix(Mat &matrix)
void close_matrix(Mat &matrix)
SymmetricTensor< 2, dim, Number > C(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
TrilinosWrappers::types::int_type global_row_index(const Epetra_CrsMatrix &matrix, const ::types::global_dof_index i)
T sum(const T &t, const MPI_Comm &mpi_communicator)
unsigned int global_dof_index