Reference documentation for deal.II version Git 1aa49a030b 2021-12-07 05:12:24 -0500
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
petsc_parallel_sparse_matrix.cc
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2004 - 2020 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
17 
18 #ifdef DEAL_II_WITH_PETSC
19 
20 # include <deal.II/base/mpi.h>
21 
23 # include <deal.II/lac/exceptions.h>
27 
29 
30 namespace PETScWrappers
31 {
32  namespace MPI
33  {
35  : communicator(MPI_COMM_SELF)
36  {
37  // just like for vectors: since we
38  // create an empty matrix, we can as
39  // well make it sequential
40  const int m = 0, n = 0, n_nonzero_per_row = 0;
41  const PetscErrorCode ierr = MatCreateSeqAIJ(
42  PETSC_COMM_SELF, m, n, n_nonzero_per_row, nullptr, &matrix);
43  AssertThrow(ierr == 0, ExcPETScError(ierr));
44  }
45 
46 
48  {
50  }
51 
52 
53 
54  template <typename SparsityPatternType>
56  const MPI_Comm & communicator,
57  const SparsityPatternType & sparsity_pattern,
58  const std::vector<size_type> &local_rows_per_process,
59  const std::vector<size_type> &local_columns_per_process,
60  const unsigned int this_process,
61  const bool preset_nonzero_locations)
62  : communicator(communicator)
63  {
64  do_reinit(sparsity_pattern,
65  local_rows_per_process,
66  local_columns_per_process,
67  this_process,
68  preset_nonzero_locations);
69  }
70 
71 
72 
73  void
75  {
76  if (&other == this)
77  return;
78 
79  this->communicator = other.communicator;
80 
81  PetscErrorCode ierr = destroy_matrix(matrix);
82  AssertThrow(ierr == 0, ExcPETScError(ierr));
83 
84  ierr = MatDuplicate(other.matrix, MAT_DO_NOT_COPY_VALUES, &matrix);
85  AssertThrow(ierr == 0, ExcPETScError(ierr));
86  }
87 
88 
89  SparseMatrix &
91  {
93  return *this;
94  }
95 
96  void
98  {
99  if (&other == this)
100  return;
101 
102  this->communicator = other.communicator;
103 
104  const PetscErrorCode ierr =
105  MatCopy(other.matrix, matrix, SAME_NONZERO_PATTERN);
106  AssertThrow(ierr == 0, ExcPETScError(ierr));
107  }
108 
109 
110 
111  template <typename SparsityPatternType>
112  void
114  const MPI_Comm & communicator,
115  const SparsityPatternType & sparsity_pattern,
116  const std::vector<size_type> &local_rows_per_process,
117  const std::vector<size_type> &local_columns_per_process,
118  const unsigned int this_process,
119  const bool preset_nonzero_locations)
120  {
121  this->communicator = communicator;
122 
123  // get rid of old matrix and generate a new one
124  const PetscErrorCode ierr = destroy_matrix(matrix);
125  AssertThrow(ierr == 0, ExcPETScError(ierr));
126 
127 
128  do_reinit(sparsity_pattern,
129  local_rows_per_process,
130  local_columns_per_process,
131  this_process,
132  preset_nonzero_locations);
133  }
134 
135  template <typename SparsityPatternType>
136  void
137  SparseMatrix::reinit(const IndexSet & local_rows,
138  const IndexSet & local_columns,
139  const SparsityPatternType &sparsity_pattern,
140  const MPI_Comm & communicator)
141  {
142  this->communicator = communicator;
143 
144  // get rid of old matrix and generate a new one
145  const PetscErrorCode ierr = destroy_matrix(matrix);
146  AssertThrow(ierr == 0, ExcPETScError(ierr));
147 
148  do_reinit(local_rows, local_columns, sparsity_pattern);
149  }
150 
151 
152 
153  template <typename SparsityPatternType>
154  void
155  SparseMatrix::do_reinit(const IndexSet & local_rows,
156  const IndexSet & local_columns,
157  const SparsityPatternType &sparsity_pattern)
158  {
159  Assert(sparsity_pattern.n_rows() == local_rows.size(),
160  ExcMessage(
161  "SparsityPattern and IndexSet have different number of rows"));
162  Assert(
163  sparsity_pattern.n_cols() == local_columns.size(),
164  ExcMessage(
165  "SparsityPattern and IndexSet have different number of columns"));
166  Assert(local_rows.is_contiguous() && local_columns.is_contiguous(),
167  ExcMessage("PETSc only supports contiguous row/column ranges"));
170 
171 # ifdef DEBUG
172  {
173  // check indexsets
174  types::global_dof_index row_owners =
176  types::global_dof_index col_owners =
177  Utilities::MPI::sum(local_columns.n_elements(), communicator);
178  Assert(row_owners == sparsity_pattern.n_rows(),
179  ExcMessage(
180  std::string(
181  "Each row has to be owned by exactly one owner (n_rows()=") +
182  std::to_string(sparsity_pattern.n_rows()) +
183  " but sum(local_rows.n_elements())=" +
184  std::to_string(row_owners) + ")"));
185  Assert(
186  col_owners == sparsity_pattern.n_cols(),
187  ExcMessage(
188  std::string(
189  "Each column has to be owned by exactly one owner (n_cols()=") +
190  std::to_string(sparsity_pattern.n_cols()) +
191  " but sum(local_columns.n_elements())=" +
192  std::to_string(col_owners) + ")"));
193  }
194 # endif
195 
196 
197  // create the matrix. We do not set row length but set the
198  // correct SparsityPattern later.
199  PetscErrorCode ierr = MatCreate(communicator, &matrix);
200  AssertThrow(ierr == 0, ExcPETScError(ierr));
201 
202  ierr = MatSetSizes(matrix,
203  local_rows.n_elements(),
204  local_columns.n_elements(),
205  sparsity_pattern.n_rows(),
206  sparsity_pattern.n_cols());
207  AssertThrow(ierr == 0, ExcPETScError(ierr));
208 
209  ierr = MatSetType(matrix, MATMPIAIJ);
210  AssertThrow(ierr == 0, ExcPETScError(ierr));
211 
212 
213  // next preset the exact given matrix
214  // entries with zeros. this doesn't avoid any
215  // memory allocations, but it at least
216  // avoids some searches later on. the
217  // key here is that we can use the
218  // matrix set routines that set an
219  // entire row at once, not a single
220  // entry at a time
221  //
222  // for the usefulness of this option
223  // read the documentation of this
224  // class.
225  // if (preset_nonzero_locations == true)
226  if (local_rows.n_elements() > 0)
227  {
228  // MatMPIAIJSetPreallocationCSR
229  // can be used to allocate the sparsity
230  // pattern of a matrix
231 
232  const PetscInt local_row_start = local_rows.nth_index_in_set(0);
233  const PetscInt local_row_end =
234  local_row_start + local_rows.n_elements();
235 
236 
237  // first set up the column number
238  // array for the rows to be stored
239  // on the local processor. have one
240  // dummy entry at the end to make
241  // sure petsc doesn't read past the
242  // end
243  std::vector<PetscInt>
244 
245  rowstart_in_window(local_row_end - local_row_start + 1, 0),
246  colnums_in_window;
247  {
248  unsigned int n_cols = 0;
249  for (PetscInt i = local_row_start; i < local_row_end; ++i)
250  {
251  const PetscInt row_length = sparsity_pattern.row_length(i);
252  rowstart_in_window[i + 1 - local_row_start] =
253  rowstart_in_window[i - local_row_start] + row_length;
254  n_cols += row_length;
255  }
256  colnums_in_window.resize(n_cols + 1, -1);
257  }
258 
259  // now copy over the information
260  // from the sparsity pattern.
261  {
262  PetscInt *ptr = colnums_in_window.data();
263  for (PetscInt i = local_row_start; i < local_row_end; ++i)
264  for (typename SparsityPatternType::iterator p =
265  sparsity_pattern.begin(i);
266  p != sparsity_pattern.end(i);
267  ++p, ++ptr)
268  *ptr = p->column();
269  }
270 
271 
272  // then call the petsc function
273  // that summarily allocates these
274  // entries:
275  ierr = MatMPIAIJSetPreallocationCSR(matrix,
276  rowstart_in_window.data(),
277  colnums_in_window.data(),
278  nullptr);
279  AssertThrow(ierr == 0, ExcPETScError(ierr));
280  }
281  else
282  {
283  PetscInt i = 0;
284  ierr = MatMPIAIJSetPreallocationCSR(matrix, &i, &i, nullptr);
285  AssertThrow(ierr == 0, ExcPETScError(ierr));
286  }
288 
289  {
292  }
293  }
294 
295 
296  template <typename SparsityPatternType>
297  void
299  const SparsityPatternType & sparsity_pattern,
300  const std::vector<size_type> &local_rows_per_process,
301  const std::vector<size_type> &local_columns_per_process,
302  const unsigned int this_process,
303  const bool preset_nonzero_locations)
304  {
305  Assert(local_rows_per_process.size() == local_columns_per_process.size(),
306  ExcDimensionMismatch(local_rows_per_process.size(),
307  local_columns_per_process.size()));
308  Assert(this_process < local_rows_per_process.size(), ExcInternalError());
310 
311  // for each row that we own locally, we
312  // have to count how many of the
313  // entries in the sparsity pattern lie
314  // in the column area we have locally,
315  // and how many aren't. for this, we
316  // first have to know which areas are
317  // ours
318  size_type local_row_start = 0;
319  for (unsigned int p = 0; p < this_process; ++p)
320  local_row_start += local_rows_per_process[p];
321  const size_type local_row_end =
322  local_row_start + local_rows_per_process[this_process];
323 
324  // create the matrix. We
325  // do not set row length but set the
326  // correct SparsityPattern later.
327  PetscErrorCode ierr = MatCreate(communicator, &matrix);
328  AssertThrow(ierr == 0, ExcPETScError(ierr));
329 
330  ierr = MatSetSizes(matrix,
331  local_rows_per_process[this_process],
332  local_columns_per_process[this_process],
333  sparsity_pattern.n_rows(),
334  sparsity_pattern.n_cols());
335  AssertThrow(ierr == 0, ExcPETScError(ierr));
336 
337  ierr = MatSetType(matrix, MATMPIAIJ);
338  AssertThrow(ierr == 0, ExcPETScError(ierr));
339 
340  // next preset the exact given matrix
341  // entries with zeros, if the user
342  // requested so. this doesn't avoid any
343  // memory allocations, but it at least
344  // avoids some searches later on. the
345  // key here is that we can use the
346  // matrix set routines that set an
347  // entire row at once, not a single
348  // entry at a time
349  //
350  // for the usefulness of this option
351  // read the documentation of this
352  // class.
353  if (preset_nonzero_locations == true)
354  {
355  // MatMPIAIJSetPreallocationCSR
356  // can be used to allocate the sparsity
357  // pattern of a matrix if it is already
358  // available:
359 
360  // first set up the column number
361  // array for the rows to be stored
362  // on the local processor. have one
363  // dummy entry at the end to make
364  // sure petsc doesn't read past the
365  // end
366  std::vector<PetscInt>
367 
368  rowstart_in_window(local_row_end - local_row_start + 1, 0),
369  colnums_in_window;
370  {
371  size_type n_cols = 0;
372  for (size_type i = local_row_start; i < local_row_end; ++i)
373  {
374  const size_type row_length = sparsity_pattern.row_length(i);
375  rowstart_in_window[i + 1 - local_row_start] =
376  rowstart_in_window[i - local_row_start] + row_length;
377  n_cols += row_length;
378  }
379  colnums_in_window.resize(n_cols + 1, -1);
380  }
381 
382  // now copy over the information
383  // from the sparsity pattern.
384  {
385  PetscInt *ptr = colnums_in_window.data();
386  for (size_type i = local_row_start; i < local_row_end; ++i)
387  for (typename SparsityPatternType::iterator p =
388  sparsity_pattern.begin(i);
389  p != sparsity_pattern.end(i);
390  ++p, ++ptr)
391  *ptr = p->column();
392  }
393 
394 
395  // then call the petsc function
396  // that summarily allocates these
397  // entries:
398  ierr = MatMPIAIJSetPreallocationCSR(matrix,
399  rowstart_in_window.data(),
400  colnums_in_window.data(),
401  nullptr);
402  AssertThrow(ierr == 0, ExcPETScError(ierr));
403 
406  }
407  }
408 
409 # ifndef DOXYGEN
410  // explicit instantiations
411  //
412  template SparseMatrix::SparseMatrix(const MPI_Comm &,
413  const SparsityPattern &,
414  const std::vector<size_type> &,
415  const std::vector<size_type> &,
416  const unsigned int,
417  const bool);
418  template SparseMatrix::SparseMatrix(const MPI_Comm &,
419  const DynamicSparsityPattern &,
420  const std::vector<size_type> &,
421  const std::vector<size_type> &,
422  const unsigned int,
423  const bool);
424 
425  template void
427  const SparsityPattern &,
428  const std::vector<size_type> &,
429  const std::vector<size_type> &,
430  const unsigned int,
431  const bool);
432  template void
434  const DynamicSparsityPattern &,
435  const std::vector<size_type> &,
436  const std::vector<size_type> &,
437  const unsigned int,
438  const bool);
439 
440  template void
442  const IndexSet &,
443  const SparsityPattern &,
444  const MPI_Comm &);
445 
446  template void
448  const IndexSet &,
449  const DynamicSparsityPattern &,
450  const MPI_Comm &);
451 
452  template void
454  const std::vector<size_type> &,
455  const std::vector<size_type> &,
456  const unsigned int,
457  const bool);
458  template void
460  const std::vector<size_type> &,
461  const std::vector<size_type> &,
462  const unsigned int,
463  const bool);
464 
465  template void
467  const IndexSet &,
468  const SparsityPattern &);
469 
470  template void
472  const IndexSet &,
473  const DynamicSparsityPattern &);
474 # endif
475 
476 
477  PetscScalar
479  {
480  Vector tmp(v);
481  vmult(tmp, v);
482  // note, that v*tmp returns sum_i conjugate(v)_i * tmp_i
483  return v * tmp;
484  }
485 
486  PetscScalar
488  {
489  Vector tmp(v);
490  vmult(tmp, v);
491  // note, that v*tmp returns sum_i conjugate(v)_i * tmp_i
492  return u * tmp;
493  }
494 
495  IndexSet
497  {
498  PetscInt n_rows, n_cols, n_loc_rows, n_loc_cols, min, max;
499  PetscErrorCode ierr;
500 
501  ierr = MatGetSize(matrix, &n_rows, &n_cols);
502  AssertThrow(ierr == 0, ExcPETScError(ierr));
503 
504  ierr = MatGetLocalSize(matrix, &n_loc_rows, &n_loc_cols);
505  AssertThrow(ierr == 0, ExcPETScError(ierr));
506 
507  ierr = MatGetOwnershipRangeColumn(matrix, &min, &max);
508  AssertThrow(ierr == 0, ExcPETScError(ierr));
509 
510  Assert(n_loc_cols == max - min,
511  ExcMessage(
512  "PETSc is requiring non contiguous memory allocation."));
513 
514  IndexSet indices(n_cols);
515  indices.add_range(min, max);
516  indices.compress();
517 
518  return indices;
519  }
520 
521  IndexSet
523  {
524  PetscInt n_rows, n_cols, n_loc_rows, n_loc_cols, min, max;
525  PetscErrorCode ierr;
526 
527  ierr = MatGetSize(matrix, &n_rows, &n_cols);
528  AssertThrow(ierr == 0, ExcPETScError(ierr));
529 
530  ierr = MatGetLocalSize(matrix, &n_loc_rows, &n_loc_cols);
531  AssertThrow(ierr == 0, ExcPETScError(ierr));
532 
533  ierr = MatGetOwnershipRange(matrix, &min, &max);
534  AssertThrow(ierr == 0, ExcPETScError(ierr));
535 
536  Assert(n_loc_rows == max - min,
537  ExcMessage(
538  "PETSc is requiring non contiguous memory allocation."));
539 
540  IndexSet indices(n_rows);
541  indices.add_range(min, max);
542  indices.compress();
543 
544  return indices;
545  }
546 
547  void
549  const SparseMatrix &B,
550  const MPI::Vector & V) const
551  {
552  // Simply forward to the protected member function of the base class
553  // that takes abstract matrix and vector arguments (to which the compiler
554  // automatically casts the arguments).
555  MatrixBase::mmult(C, B, V);
556  }
557 
558  void
560  const SparseMatrix &B,
561  const MPI::Vector & V) const
562  {
563  // Simply forward to the protected member function of the base class
564  // that takes abstract matrix and vector arguments (to which the compiler
565  // automatically casts the arguments).
566  MatrixBase::Tmmult(C, B, V);
567  }
568 
569  } // namespace MPI
570 } // namespace PETScWrappers
571 
572 
574 
575 #endif // DEAL_II_WITH_PETSC
PetscErrorCode destroy_matrix(Mat &matrix)
void Tmmult(MatrixBase &C, const MatrixBase &B, const VectorBase &V) const
void copy_from(const SparseMatrix &other)
void vmult(VectorBase &dst, const VectorBase &src) const
void do_reinit(const SparsityPatternType &sparsity_pattern, const std::vector< size_type > &local_rows_per_process, const std::vector< size_type > &local_columns_per_process, const unsigned int this_process, const bool preset_nonzero_locations)
static const char V
size_type row_length(const size_type row) const
#define AssertThrow(cond, exc)
Definition: exceptions.h:1571
SymmetricTensor< 2, dim, Number > C(const Tensor< 2, dim, Number > &F)
bool is_ascending_and_one_to_one(const MPI_Comm &communicator) const
Definition: index_set.cc:674
size_type size() const
Definition: index_set.h:1639
void compress(const VectorOperation::values operation)
void mmult(SparseMatrix &C, const SparseMatrix &B, const MPI::Vector &V=MPI::Vector()) const
MatrixBase & operator=(const MatrixBase &)=delete
static ::ExceptionBase & ExcMessage(std::string arg1)
SparseMatrix & operator=(const value_type d)
PetscScalar matrix_scalar_product(const Vector &u, const Vector &v) const
T sum(const T &t, const MPI_Comm &mpi_communicator)
#define Assert(cond, exc)
Definition: exceptions.h:1461
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:404
void reinit(const MPI_Comm &communicator, const SparsityPatternType &sparsity_pattern, const std::vector< size_type > &local_rows_per_process, const std::vector< size_type > &local_columns_per_process, const unsigned int this_process, const bool preset_nonzero_locations=true)
std::string to_string(const T &t)
Definition: patterns.h:2329
void mmult(MatrixBase &C, const MatrixBase &B, const VectorBase &V) const
void set_keep_zero_rows(Mat &matrix)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
void add_range(const size_type begin, const size_type end)
Definition: index_set.h:1678
void compress() const
Definition: index_set.h:1647
void Tmmult(SparseMatrix &C, const SparseMatrix &B, const MPI::Vector &V=MPI::Vector()) const
bool is_contiguous() const
Definition: index_set.h:1820
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:403
PetscScalar matrix_norm_square(const Vector &v) const
static ::ExceptionBase & ExcNotImplemented()
size_type n_elements() const
Definition: index_set.h:1837
size_type nth_index_in_set(const size_type local_index) const
Definition: index_set.h:1885
static ::ExceptionBase & ExcInternalError()
void close_matrix(Mat &matrix)