Reference documentation for deal.II version 9.2.0
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
petsc_parallel_sparse_matrix.cc
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2004 - 2020 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
17 
18 #ifdef DEAL_II_WITH_PETSC
19 
20 # include <deal.II/base/mpi.h>
21 
23 # include <deal.II/lac/exceptions.h>
27 
29 
30 namespace PETScWrappers
31 {
32  namespace MPI
33  {
35  : communicator(MPI_COMM_SELF)
36  {
37  // just like for vectors: since we
38  // create an empty matrix, we can as
39  // well make it sequential
40  const int m = 0, n = 0, n_nonzero_per_row = 0;
41  const PetscErrorCode ierr = MatCreateSeqAIJ(
42  PETSC_COMM_SELF, m, n, n_nonzero_per_row, nullptr, &matrix);
43  AssertThrow(ierr == 0, ExcPETScError(ierr));
44  }
45 
46 
48  {
50  }
51 
52  SparseMatrix::SparseMatrix(const MPI_Comm &communicator,
53  const size_type m,
54  const size_type n,
55  const size_type local_rows,
56  const size_type local_columns,
57  const size_type n_nonzero_per_row,
58  const bool is_symmetric,
59  const size_type n_offdiag_nonzero_per_row)
60  : communicator(communicator)
61  {
62  do_reinit(m,
63  n,
64  local_rows,
65  local_columns,
66  n_nonzero_per_row,
68  n_offdiag_nonzero_per_row);
69  }
70 
71 
72 
74  const MPI_Comm & communicator,
75  const size_type m,
76  const size_type n,
77  const size_type local_rows,
78  const size_type local_columns,
79  const std::vector<size_type> &row_lengths,
80  const bool is_symmetric,
81  const std::vector<size_type> &offdiag_row_lengths)
82  : communicator(communicator)
83  {
84  do_reinit(m,
85  n,
86  local_rows,
87  local_columns,
88  row_lengths,
90  offdiag_row_lengths);
91  }
92 
93 
94 
95  template <typename SparsityPatternType>
97  const MPI_Comm & communicator,
98  const SparsityPatternType & sparsity_pattern,
99  const std::vector<size_type> &local_rows_per_process,
100  const std::vector<size_type> &local_columns_per_process,
101  const unsigned int this_process,
102  const bool preset_nonzero_locations)
103  : communicator(communicator)
104  {
105  do_reinit(sparsity_pattern,
106  local_rows_per_process,
107  local_columns_per_process,
108  this_process,
109  preset_nonzero_locations);
110  }
111 
112 
113  void
115  {
116  if (&other == this)
117  return;
118 
119  this->communicator = other.communicator;
120 
121  PetscErrorCode ierr = destroy_matrix(matrix);
122  AssertThrow(ierr == 0, ExcPETScError(ierr));
123 
124  ierr = MatDuplicate(other.matrix, MAT_DO_NOT_COPY_VALUES, &matrix);
125  AssertThrow(ierr == 0, ExcPETScError(ierr));
126  }
127 
128 
129  SparseMatrix &
131  {
133  return *this;
134  }
135 
136  void
138  {
139  if (&other == this)
140  return;
141 
142  this->communicator = other.communicator;
143 
144  const PetscErrorCode ierr =
145  MatCopy(other.matrix, matrix, SAME_NONZERO_PATTERN);
146  AssertThrow(ierr == 0, ExcPETScError(ierr));
147  }
148 
149  void
150  SparseMatrix::reinit(const MPI_Comm &communicator,
151  const size_type m,
152  const size_type n,
153  const size_type local_rows,
154  const size_type local_columns,
155  const size_type n_nonzero_per_row,
156  const bool is_symmetric,
157  const size_type n_offdiag_nonzero_per_row)
158  {
159  this->communicator = communicator;
160 
161  // get rid of old matrix and generate a new one
162  const PetscErrorCode ierr = destroy_matrix(matrix);
163  AssertThrow(ierr == 0, ExcPETScError(ierr));
164 
165  do_reinit(m,
166  n,
167  local_rows,
168  local_columns,
169  n_nonzero_per_row,
170  is_symmetric,
171  n_offdiag_nonzero_per_row);
172  }
173 
174 
175 
176  void
177  SparseMatrix::reinit(const MPI_Comm & communicator,
178  const size_type m,
179  const size_type n,
180  const size_type local_rows,
181  const size_type local_columns,
182  const std::vector<size_type> &row_lengths,
183  const bool is_symmetric,
184  const std::vector<size_type> &offdiag_row_lengths)
185  {
186  this->communicator = communicator;
187 
188  // get rid of old matrix and generate a
189  // new one
190  const PetscErrorCode ierr = destroy_matrix(matrix);
191  AssertThrow(ierr == 0, ExcPETScError(ierr));
192 
193  do_reinit(m,
194  n,
195  local_rows,
196  local_columns,
197  row_lengths,
198  is_symmetric,
199  offdiag_row_lengths);
200  }
201 
202 
203 
204  template <typename SparsityPatternType>
205  void
207  const MPI_Comm & communicator,
208  const SparsityPatternType & sparsity_pattern,
209  const std::vector<size_type> &local_rows_per_process,
210  const std::vector<size_type> &local_columns_per_process,
211  const unsigned int this_process,
212  const bool preset_nonzero_locations)
213  {
214  this->communicator = communicator;
215 
216  // get rid of old matrix and generate a new one
217  const PetscErrorCode ierr = destroy_matrix(matrix);
218  AssertThrow(ierr == 0, ExcPETScError(ierr));
219 
220 
221  do_reinit(sparsity_pattern,
222  local_rows_per_process,
223  local_columns_per_process,
224  this_process,
225  preset_nonzero_locations);
226  }
227 
228  template <typename SparsityPatternType>
229  void
230  SparseMatrix::reinit(const IndexSet & local_rows,
231  const IndexSet & local_columns,
232  const SparsityPatternType &sparsity_pattern,
233  const MPI_Comm & communicator)
234  {
235  this->communicator = communicator;
236 
237  // get rid of old matrix and generate a new one
238  const PetscErrorCode ierr = destroy_matrix(matrix);
239  AssertThrow(ierr == 0, ExcPETScError(ierr));
240 
241  do_reinit(local_rows, local_columns, sparsity_pattern);
242  }
243 
244  void
246  const size_type n,
247  const size_type local_rows,
248  const size_type local_columns,
249  const size_type n_nonzero_per_row,
250  const bool is_symmetric,
251  const size_type n_offdiag_nonzero_per_row)
252  {
253  Assert(local_rows <= m, ExcLocalRowsTooLarge(local_rows, m));
254 
255  // use the call sequence indicating only
256  // a maximal number of elements per row
257  // for all rows globally
258  const PetscErrorCode ierr = MatCreateAIJ(communicator,
259  local_rows,
260  local_columns,
261  m,
262  n,
263  n_nonzero_per_row,
264  nullptr,
265  n_offdiag_nonzero_per_row,
266  nullptr,
267  &matrix);
268  set_matrix_option(matrix, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
269  AssertThrow(ierr == 0, ExcPETScError(ierr));
270 
271  // set symmetric flag, if so requested
272  if (is_symmetric == true)
273  {
274  set_matrix_option(matrix, MAT_SYMMETRIC, PETSC_TRUE);
275  }
276  }
277 
278 
279 
280  void
282  const size_type n,
283  const size_type local_rows,
284  const size_type local_columns,
285  const std::vector<size_type> &row_lengths,
286  const bool is_symmetric,
287  const std::vector<size_type> &offdiag_row_lengths)
288  {
289  Assert(local_rows <= m, ExcLocalRowsTooLarge(local_rows, m));
290 
291  Assert(row_lengths.size() == m,
292  ExcDimensionMismatch(row_lengths.size(), m));
293 
294  // For the case that local_columns is smaller than one of the row lengths
295  // MatCreateMPIAIJ throws an error. In this case use a
296  // PETScWrappers::SparseMatrix
297  for (const size_type row_length : row_lengths)
298  {
299  (void)row_length;
300  Assert(row_length <= local_columns,
301  ExcIndexRange(row_length, 1, local_columns + 1));
302  }
303 
304  // use the call sequence indicating a
305  // maximal number of elements for each
306  // row individually. annoyingly, we
307  // always use unsigned ints for cases
308  // like this, while PETSc wants to see
309  // signed integers. so we have to
310  // convert, unless we want to play dirty
311  // tricks with conversions of pointers
312  const std::vector<PetscInt> int_row_lengths(row_lengths.begin(),
313  row_lengths.end());
314  const std::vector<PetscInt> int_offdiag_row_lengths(
315  offdiag_row_lengths.begin(), offdiag_row_lengths.end());
316 
317  // TODO: There must be a significantly better way to provide information
318  // about the off-diagonal blocks of the matrix. this way, petsc keeps
319  // allocating tiny chunks of memory, and gets completely hung up over this
320  const PetscErrorCode ierr =
321  MatCreateAIJ(communicator,
322  local_rows,
323  local_columns,
324  m,
325  n,
326  0,
327  int_row_lengths.data(),
328  0,
329  offdiag_row_lengths.size() ?
330  int_offdiag_row_lengths.data() :
331  nullptr,
332  &matrix);
333 
334  // TODO: Sometimes the actual number of nonzero entries allocated is
335  // greater than the number of nonzero entries, which petsc will complain
336  // about unless explicitly disabled with MatSetOption. There is probably a
337  // way to prevent a different number nonzero elements being allocated in
338  // the first place. (See also previous TODO).
339  set_matrix_option(matrix, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
340  AssertThrow(ierr == 0, ExcPETScError(ierr));
341 
342  // set symmetric flag, if so requested
343  if (is_symmetric == true)
344  {
345  set_matrix_option(matrix, MAT_SYMMETRIC, PETSC_TRUE);
346  }
347  }
348 
349 
350  template <typename SparsityPatternType>
351  void
352  SparseMatrix::do_reinit(const IndexSet & local_rows,
353  const IndexSet & local_columns,
354  const SparsityPatternType &sparsity_pattern)
355  {
356  Assert(sparsity_pattern.n_rows() == local_rows.size(),
357  ExcMessage(
358  "SparsityPattern and IndexSet have different number of rows"));
359  Assert(
360  sparsity_pattern.n_cols() == local_columns.size(),
361  ExcMessage(
362  "SparsityPattern and IndexSet have different number of columns"));
363  Assert(local_rows.is_contiguous() && local_columns.is_contiguous(),
364  ExcMessage("PETSc only supports contiguous row/column ranges"));
367 
368 # ifdef DEBUG
369  {
370  // check indexsets
371  types::global_dof_index row_owners =
373  types::global_dof_index col_owners =
374  Utilities::MPI::sum(local_columns.n_elements(), communicator);
375  Assert(row_owners == sparsity_pattern.n_rows(),
376  ExcMessage(
377  std::string(
378  "Each row has to be owned by exactly one owner (n_rows()=") +
379  std::to_string(sparsity_pattern.n_rows()) +
380  " but sum(local_rows.n_elements())=" +
381  std::to_string(row_owners) + ")"));
382  Assert(
383  col_owners == sparsity_pattern.n_cols(),
384  ExcMessage(
385  std::string(
386  "Each column has to be owned by exactly one owner (n_cols()=") +
387  std::to_string(sparsity_pattern.n_cols()) +
388  " but sum(local_columns.n_elements())=" +
389  std::to_string(col_owners) + ")"));
390  }
391 # endif
392 
393 
394  // create the matrix. We do not set row length but set the
395  // correct SparsityPattern later.
396  PetscErrorCode ierr = MatCreate(communicator, &matrix);
397  AssertThrow(ierr == 0, ExcPETScError(ierr));
398 
399  ierr = MatSetSizes(matrix,
400  local_rows.n_elements(),
401  local_columns.n_elements(),
402  sparsity_pattern.n_rows(),
403  sparsity_pattern.n_cols());
404  AssertThrow(ierr == 0, ExcPETScError(ierr));
405 
406  ierr = MatSetType(matrix, MATMPIAIJ);
407  AssertThrow(ierr == 0, ExcPETScError(ierr));
408 
409 
410  // next preset the exact given matrix
411  // entries with zeros. this doesn't avoid any
412  // memory allocations, but it at least
413  // avoids some searches later on. the
414  // key here is that we can use the
415  // matrix set routines that set an
416  // entire row at once, not a single
417  // entry at a time
418  //
419  // for the usefulness of this option
420  // read the documentation of this
421  // class.
422  // if (preset_nonzero_locations == true)
423  if (local_rows.n_elements() > 0)
424  {
425  // MatMPIAIJSetPreallocationCSR
426  // can be used to allocate the sparsity
427  // pattern of a matrix
428 
429  const PetscInt local_row_start = local_rows.nth_index_in_set(0);
430  const PetscInt local_row_end =
431  local_row_start + local_rows.n_elements();
432 
433 
434  // first set up the column number
435  // array for the rows to be stored
436  // on the local processor. have one
437  // dummy entry at the end to make
438  // sure petsc doesn't read past the
439  // end
440  std::vector<PetscInt>
441 
442  rowstart_in_window(local_row_end - local_row_start + 1, 0),
443  colnums_in_window;
444  {
445  unsigned int n_cols = 0;
446  for (PetscInt i = local_row_start; i < local_row_end; ++i)
447  {
448  const PetscInt row_length = sparsity_pattern.row_length(i);
449  rowstart_in_window[i + 1 - local_row_start] =
450  rowstart_in_window[i - local_row_start] + row_length;
451  n_cols += row_length;
452  }
453  colnums_in_window.resize(n_cols + 1, -1);
454  }
455 
456  // now copy over the information
457  // from the sparsity pattern.
458  {
459  PetscInt *ptr = colnums_in_window.data();
460  for (PetscInt i = local_row_start; i < local_row_end; ++i)
461  for (typename SparsityPatternType::iterator p =
462  sparsity_pattern.begin(i);
463  p != sparsity_pattern.end(i);
464  ++p, ++ptr)
465  *ptr = p->column();
466  }
467 
468 
469  // then call the petsc function
470  // that summarily allocates these
471  // entries:
472  ierr = MatMPIAIJSetPreallocationCSR(matrix,
473  rowstart_in_window.data(),
474  colnums_in_window.data(),
475  nullptr);
476  AssertThrow(ierr == 0, ExcPETScError(ierr));
477  }
478  else
479  {
480  PetscInt i = 0;
481  ierr = MatMPIAIJSetPreallocationCSR(matrix, &i, &i, nullptr);
482  AssertThrow(ierr == 0, ExcPETScError(ierr));
483  }
485 
486  {
489  }
490  }
491 
492 
493  template <typename SparsityPatternType>
494  void
496  const SparsityPatternType & sparsity_pattern,
497  const std::vector<size_type> &local_rows_per_process,
498  const std::vector<size_type> &local_columns_per_process,
499  const unsigned int this_process,
500  const bool preset_nonzero_locations)
501  {
502  Assert(local_rows_per_process.size() == local_columns_per_process.size(),
503  ExcDimensionMismatch(local_rows_per_process.size(),
504  local_columns_per_process.size()));
505  Assert(this_process < local_rows_per_process.size(), ExcInternalError());
507 
508  // for each row that we own locally, we
509  // have to count how many of the
510  // entries in the sparsity pattern lie
511  // in the column area we have locally,
512  // and how many aren't. for this, we
513  // first have to know which areas are
514  // ours
515  size_type local_row_start = 0;
516  size_type local_col_start = 0;
517  for (unsigned int p = 0; p < this_process; ++p)
518  {
519  local_row_start += local_rows_per_process[p];
520  local_col_start += local_columns_per_process[p];
521  }
522  const size_type local_row_end =
523  local_row_start + local_rows_per_process[this_process];
524 
525  // create the matrix. We
526  // do not set row length but set the
527  // correct SparsityPattern later.
528  PetscErrorCode ierr = MatCreate(communicator, &matrix);
529  AssertThrow(ierr == 0, ExcPETScError(ierr));
530 
531  ierr = MatSetSizes(matrix,
532  local_rows_per_process[this_process],
533  local_columns_per_process[this_process],
534  sparsity_pattern.n_rows(),
535  sparsity_pattern.n_cols());
536  AssertThrow(ierr == 0, ExcPETScError(ierr));
537 
538  ierr = MatSetType(matrix, MATMPIAIJ);
539  AssertThrow(ierr == 0, ExcPETScError(ierr));
540 
541  // next preset the exact given matrix
542  // entries with zeros, if the user
543  // requested so. this doesn't avoid any
544  // memory allocations, but it at least
545  // avoids some searches later on. the
546  // key here is that we can use the
547  // matrix set routines that set an
548  // entire row at once, not a single
549  // entry at a time
550  //
551  // for the usefulness of this option
552  // read the documentation of this
553  // class.
554  if (preset_nonzero_locations == true)
555  {
556  // MatMPIAIJSetPreallocationCSR
557  // can be used to allocate the sparsity
558  // pattern of a matrix if it is already
559  // available:
560 
561  // first set up the column number
562  // array for the rows to be stored
563  // on the local processor. have one
564  // dummy entry at the end to make
565  // sure petsc doesn't read past the
566  // end
567  std::vector<PetscInt>
568 
569  rowstart_in_window(local_row_end - local_row_start + 1, 0),
570  colnums_in_window;
571  {
572  size_type n_cols = 0;
573  for (size_type i = local_row_start; i < local_row_end; ++i)
574  {
575  const size_type row_length = sparsity_pattern.row_length(i);
576  rowstart_in_window[i + 1 - local_row_start] =
577  rowstart_in_window[i - local_row_start] + row_length;
578  n_cols += row_length;
579  }
580  colnums_in_window.resize(n_cols + 1, -1);
581  }
582 
583  // now copy over the information
584  // from the sparsity pattern.
585  {
586  PetscInt *ptr = colnums_in_window.data();
587  for (size_type i = local_row_start; i < local_row_end; ++i)
588  for (typename SparsityPatternType::iterator p =
589  sparsity_pattern.begin(i);
590  p != sparsity_pattern.end(i);
591  ++p, ++ptr)
592  *ptr = p->column();
593  }
594 
595 
596  // then call the petsc function
597  // that summarily allocates these
598  // entries:
599  ierr = MatMPIAIJSetPreallocationCSR(matrix,
600  rowstart_in_window.data(),
601  colnums_in_window.data(),
602  nullptr);
603  AssertThrow(ierr == 0, ExcPETScError(ierr));
604 
607  }
608  }
609 
610 # ifndef DOXYGEN
611  // explicit instantiations
612  //
613  template SparseMatrix::SparseMatrix(const MPI_Comm &,
614  const SparsityPattern &,
615  const std::vector<size_type> &,
616  const std::vector<size_type> &,
617  const unsigned int,
618  const bool);
619  template SparseMatrix::SparseMatrix(const MPI_Comm &,
620  const DynamicSparsityPattern &,
621  const std::vector<size_type> &,
622  const std::vector<size_type> &,
623  const unsigned int,
624  const bool);
625 
626  template void
628  const SparsityPattern &,
629  const std::vector<size_type> &,
630  const std::vector<size_type> &,
631  const unsigned int,
632  const bool);
633  template void
635  const DynamicSparsityPattern &,
636  const std::vector<size_type> &,
637  const std::vector<size_type> &,
638  const unsigned int,
639  const bool);
640 
641  template void
643  const IndexSet &,
644  const SparsityPattern &,
645  const MPI_Comm &);
646 
647  template void
649  const IndexSet &,
650  const DynamicSparsityPattern &,
651  const MPI_Comm &);
652 
653  template void
655  const std::vector<size_type> &,
656  const std::vector<size_type> &,
657  const unsigned int,
658  const bool);
659  template void
661  const std::vector<size_type> &,
662  const std::vector<size_type> &,
663  const unsigned int,
664  const bool);
665 
666  template void
668  const IndexSet &,
669  const SparsityPattern &);
670 
671  template void
673  const IndexSet &,
674  const DynamicSparsityPattern &);
675 # endif
676 
677 
678  PetscScalar
680  {
681  Vector tmp(v);
682  vmult(tmp, v);
683  // note, that v*tmp returns sum_i conjugate(v)_i * tmp_i
684  return v * tmp;
685  }
686 
687  PetscScalar
689  {
690  Vector tmp(v);
691  vmult(tmp, v);
692  // note, that v*tmp returns sum_i conjugate(v)_i * tmp_i
693  return u * tmp;
694  }
695 
696  IndexSet
698  {
699  PetscInt n_rows, n_cols, n_loc_rows, n_loc_cols, min, max;
700  PetscErrorCode ierr;
701 
702  ierr = MatGetSize(matrix, &n_rows, &n_cols);
703  AssertThrow(ierr == 0, ExcPETScError(ierr));
704 
705  ierr = MatGetLocalSize(matrix, &n_loc_rows, &n_loc_cols);
706  AssertThrow(ierr == 0, ExcPETScError(ierr));
707 
708  ierr = MatGetOwnershipRangeColumn(matrix, &min, &max);
709  AssertThrow(ierr == 0, ExcPETScError(ierr));
710 
711  Assert(n_loc_cols == max - min,
712  ExcMessage(
713  "PETSc is requiring non contiguous memory allocation."));
714 
715  IndexSet indices(n_cols);
716  indices.add_range(min, max);
717  indices.compress();
718 
719  return indices;
720  }
721 
722  IndexSet
724  {
725  PetscInt n_rows, n_cols, n_loc_rows, n_loc_cols, min, max;
726  PetscErrorCode ierr;
727 
728  ierr = MatGetSize(matrix, &n_rows, &n_cols);
729  AssertThrow(ierr == 0, ExcPETScError(ierr));
730 
731  ierr = MatGetLocalSize(matrix, &n_loc_rows, &n_loc_cols);
732  AssertThrow(ierr == 0, ExcPETScError(ierr));
733 
734  ierr = MatGetOwnershipRange(matrix, &min, &max);
735  AssertThrow(ierr == 0, ExcPETScError(ierr));
736 
737  Assert(n_loc_rows == max - min,
738  ExcMessage(
739  "PETSc is requiring non contiguous memory allocation."));
740 
741  IndexSet indices(n_rows);
742  indices.add_range(min, max);
743  indices.compress();
744 
745  return indices;
746  }
747 
748  void
750  const SparseMatrix &B,
751  const MPI::Vector & V) const
752  {
753  // Simply forward to the protected member function of the base class
754  // that takes abstract matrix and vector arguments (to which the compiler
755  // automatically casts the arguments).
756  MatrixBase::mmult(C, B, V);
757  }
758 
759  void
761  const SparseMatrix &B,
762  const MPI::Vector & V) const
763  {
764  // Simply forward to the protected member function of the base class
765  // that takes abstract matrix and vector arguments (to which the compiler
766  // automatically casts the arguments).
767  MatrixBase::Tmmult(C, B, V);
768  }
769 
770  } // namespace MPI
771 } // namespace PETScWrappers
772 
773 
775 
776 #endif // DEAL_II_WITH_PETSC
IndexSet::compress
void compress() const
Definition: index_set.h:1643
IndexSet
Definition: index_set.h:74
dynamic_sparsity_pattern.h
PETScWrappers::destroy_matrix
PetscErrorCode destroy_matrix(Mat &matrix)
Definition: petsc_compatibility.h:71
PETScWrappers
Definition: petsc_block_sparse_matrix.h:36
petsc_compatibility.h
PETScWrappers::MatrixBase::Tmmult
void Tmmult(MatrixBase &C, const MatrixBase &B, const VectorBase &V) const
Definition: petsc_matrix_base.cc:572
VectorOperation::insert
@ insert
Definition: vector_operation.h:49
StandardExceptions::ExcNotImplemented
static ::ExceptionBase & ExcNotImplemented()
IndexSet::is_contiguous
bool is_contiguous() const
Definition: index_set.h:1816
PETScWrappers::MPI::SparseMatrix
Definition: petsc_sparse_matrix.h:367
PETScWrappers::MatrixBase::mmult
void mmult(MatrixBase &C, const MatrixBase &B, const VectorBase &V) const
Definition: petsc_matrix_base.cc:564
PETScWrappers::MPI::SparseMatrix::operator=
SparseMatrix & operator=(const value_type d)
Definition: petsc_parallel_sparse_matrix.cc:130
IndexSet::size
size_type size() const
Definition: index_set.h:1635
LAPACKSupport::V
static const char V
Definition: lapack_support.h:175
PETScWrappers::MatrixBase::n
size_type n() const
Definition: petsc_matrix_base.cc:250
petsc_sparse_matrix.h
Patterns::Tools::to_string
std::string to_string(const T &t)
Definition: patterns.h:2360
PETScWrappers::MPI::SparseMatrix::do_reinit
void do_reinit(const size_type m, const size_type n, const size_type local_rows, const size_type local_columns, const size_type n_nonzero_per_row, const bool is_symmetric=false, const size_type n_offdiag_nonzero_per_row=0)
Definition: petsc_parallel_sparse_matrix.cc:245
PETScWrappers::MPI::SparseMatrix::ExcLocalRowsTooLarge
static ::ExceptionBase & ExcLocalRowsTooLarge(int arg1, int arg2)
Physics::Elasticity::Kinematics::C
SymmetricTensor< 2, dim, Number > C(const Tensor< 2, dim, Number > &F)
MPI_Comm
exceptions.h
PETScWrappers::MatrixBase::is_symmetric
PetscBool is_symmetric(const double tolerance=1.e-12)
Definition: petsc_matrix_base.cc:620
PETScWrappers::MPI::SparseMatrix::SparseMatrix
SparseMatrix()
Definition: petsc_parallel_sparse_matrix.cc:34
Physics::Elasticity::Kinematics::d
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
PETScWrappers::MPI::SparseMatrix::mmult
void mmult(SparseMatrix &C, const SparseMatrix &B, const MPI::Vector &V=MPI::Vector()) const
Definition: petsc_parallel_sparse_matrix.cc:749
PETScWrappers::MatrixBase::vmult
void vmult(VectorBase &dst, const VectorBase &src) const
Definition: petsc_matrix_base.cc:449
PETScWrappers::set_matrix_option
void set_matrix_option(Mat &matrix, const MatOption option_name, const PetscBool option_value=PETSC_FALSE)
Definition: petsc_compatibility.h:106
IndexSet::n_elements
size_type n_elements() const
Definition: index_set.h:1833
PETScWrappers::MPI::SparseMatrix::Tmmult
void Tmmult(SparseMatrix &C, const SparseMatrix &B, const MPI::Vector &V=MPI::Vector()) const
Definition: petsc_parallel_sparse_matrix.cc:760
PETScWrappers::MatrixBase::row_length
size_type row_length(const size_type row) const
Definition: petsc_matrix_base.cc:302
PETScWrappers::MPI::SparseMatrix::matrix_scalar_product
PetscScalar matrix_scalar_product(const Vector &u, const Vector &v) const
Definition: petsc_parallel_sparse_matrix.cc:688
StandardExceptions::ExcIndexRange
static ::ExceptionBase & ExcIndexRange(int arg1, int arg2, int arg3)
PETScWrappers::MPI::SparseMatrix::~SparseMatrix
~SparseMatrix() override
Definition: petsc_parallel_sparse_matrix.cc:47
StandardExceptions::ExcMessage
static ::ExceptionBase & ExcMessage(std::string arg1)
mpi.h
DEAL_II_NAMESPACE_OPEN
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:358
IndexSet::is_ascending_and_one_to_one
bool is_ascending_and_one_to_one(const MPI_Comm &communicator) const
Definition: index_set.cc:666
DynamicSparsityPattern
Definition: dynamic_sparsity_pattern.h:323
SparsityPattern
Definition: sparsity_pattern.h:865
Utilities::MPI::sum
T sum(const T &t, const MPI_Comm &mpi_communicator)
PETScWrappers::MatrixBase::compress
void compress(const VectorOperation::values operation)
Definition: petsc_matrix_base.cc:193
sparsity_pattern.h
PETScWrappers::MatrixBase::assert_is_compressed
void assert_is_compressed()
PETScWrappers::MPI::SparseMatrix::copy_from
void copy_from(const SparseMatrix &other)
Definition: petsc_parallel_sparse_matrix.cc:137
PETScWrappers::MPI::Vector
Definition: petsc_vector.h:158
PETScWrappers::set_keep_zero_rows
void set_keep_zero_rows(Mat &matrix)
Definition: petsc_compatibility.h:138
PETScWrappers::MatrixBase::matrix
Mat matrix
Definition: petsc_matrix_base.h:968
PETScWrappers::MPI::SparseMatrix::locally_owned_range_indices
IndexSet locally_owned_range_indices() const
Definition: petsc_parallel_sparse_matrix.cc:723
unsigned int
StandardExceptions::ExcInternalError
static ::ExceptionBase & ExcInternalError()
Assert
#define Assert(cond, exc)
Definition: exceptions.h:1419
IndexSet::nth_index_in_set
size_type nth_index_in_set(const size_type local_index) const
Definition: index_set.h:1881
PETScWrappers::MatrixBase::value_type
PetscScalar value_type
Definition: petsc_matrix_base.h:300
PETScWrappers::MPI::SparseMatrix::locally_owned_domain_indices
IndexSet locally_owned_domain_indices() const
Definition: petsc_parallel_sparse_matrix.cc:697
LACExceptions::ExcPETScError
Definition: exceptions.h:56
PETScWrappers::MPI::SparseMatrix::communicator
MPI_Comm communicator
Definition: petsc_sparse_matrix.h:701
Utilities::MPI::min
T min(const T &t, const MPI_Comm &mpi_communicator)
PETScWrappers::MatrixBase::m
size_type m() const
Definition: petsc_matrix_base.cc:237
StandardExceptions::ExcDimensionMismatch
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
petsc_vector.h
PETScWrappers::MatrixBase::operator=
MatrixBase & operator=(const MatrixBase &)=delete
DEAL_II_NAMESPACE_CLOSE
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:359
PETScWrappers::MPI::SparseMatrix::matrix_norm_square
PetscScalar matrix_norm_square(const Vector &v) const
Definition: petsc_parallel_sparse_matrix.cc:679
PETScWrappers::close_matrix
void close_matrix(Mat &matrix)
Definition: petsc_compatibility.h:121
PETScWrappers::MPI::SparseMatrix::reinit
void reinit(const MPI_Comm &communicator, const size_type m, const size_type n, const size_type local_rows, const size_type local_columns, const size_type n_nonzero_per_row, const bool is_symmetric=false, const size_type n_offdiag_nonzero_per_row=0)
Definition: petsc_parallel_sparse_matrix.cc:150
AssertThrow
#define AssertThrow(cond, exc)
Definition: exceptions.h:1531
IndexSet::add_range
void add_range(const size_type begin, const size_type end)
Definition: index_set.h:1674
Utilities::MPI::max
T max(const T &t, const MPI_Comm &mpi_communicator)