deal.II version GIT relicensing-3540-g7552a02177 2025-06-20 13:50:00+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
petsc_parallel_sparse_matrix.cc
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2004 - 2023 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
16
17#ifdef DEAL_II_WITH_PETSC
18
19# include <deal.II/base/mpi.h>
20
26
28
29namespace PETScWrappers
30{
31 namespace MPI
32 {
34 {
35 // just like for vectors: since we
36 // create an empty matrix, we can as
37 // well make it sequential
38 const int m = 0, n = 0, n_nonzero_per_row = 0;
39 const PetscErrorCode ierr = MatCreateSeqAIJ(
40 PETSC_COMM_SELF, m, n, n_nonzero_per_row, nullptr, &matrix);
41 AssertThrow(ierr == 0, ExcPETScError(ierr));
42 }
43
45 : MatrixBase(A)
46 {}
47
49 {
50 PetscErrorCode ierr = MatDestroy(&matrix);
51 AssertNothrow(ierr == 0, ExcPETScError(ierr));
52 }
53
54
55
56 template <typename SparsityPatternType>
58 const MPI_Comm communicator,
59 const SparsityPatternType &sparsity_pattern,
60 const std::vector<size_type> &local_rows_per_process,
61 const std::vector<size_type> &local_columns_per_process,
62 const unsigned int this_process,
63 const bool preset_nonzero_locations)
64 {
65 do_reinit(communicator,
66 sparsity_pattern,
67 local_rows_per_process,
68 local_columns_per_process,
69 this_process,
70 preset_nonzero_locations);
71 }
72
73
74
75 void
77 {
78 if (&other == this)
79 return;
80
81 PetscErrorCode ierr = MatDestroy(&matrix);
82 AssertThrow(ierr == 0, ExcPETScError(ierr));
83
84 ierr = MatDuplicate(other.matrix, MAT_DO_NOT_COPY_VALUES, &matrix);
85 AssertThrow(ierr == 0, ExcPETScError(ierr));
86 }
87
88 template <typename SparsityPatternType>
89 void
90 SparseMatrix::reinit(const IndexSet &local_rows,
91 const IndexSet &local_active_rows,
92 const IndexSet &local_columns,
93 const IndexSet &local_active_columns,
94 const SparsityPatternType &sparsity_pattern,
95 const MPI_Comm communicator)
96 {
97 // get rid of old matrix and generate a new one
98 const PetscErrorCode ierr = MatDestroy(&matrix);
99 AssertThrow(ierr == 0, ExcPETScError(ierr));
100
101 do_reinit(communicator,
102 local_rows,
103 local_active_rows,
104 local_columns,
105 local_active_columns,
106 sparsity_pattern);
107 }
108
109
112 {
114 return *this;
115 }
116
117 void
119 {
120 if (&other == this)
121 return;
122
123 const PetscErrorCode ierr =
124 MatCopy(other.matrix, matrix, SAME_NONZERO_PATTERN);
125 AssertThrow(ierr == 0, ExcPETScError(ierr));
126 }
127
128
129
130 template <typename SparsityPatternType>
131 void
133 const MPI_Comm communicator,
134 const SparsityPatternType &sparsity_pattern,
135 const std::vector<size_type> &local_rows_per_process,
136 const std::vector<size_type> &local_columns_per_process,
137 const unsigned int this_process,
138 const bool preset_nonzero_locations)
139 {
140 // get rid of old matrix and generate a new one
141 const PetscErrorCode ierr = MatDestroy(&matrix);
142 AssertThrow(ierr == 0, ExcPETScError(ierr));
143
144
145 do_reinit(communicator,
146 sparsity_pattern,
147 local_rows_per_process,
148 local_columns_per_process,
149 this_process,
150 preset_nonzero_locations);
151 }
152
153
154
155 template <typename SparsityPatternType>
156 void
158 const SparsityPatternType &sparsity_pattern,
159 const MPI_Comm communicator)
160 {
161 do_reinit(communicator, local_rows, local_rows, sparsity_pattern);
162 }
163
164 template <typename SparsityPatternType>
165 void
167 const IndexSet &local_columns,
168 const SparsityPatternType &sparsity_pattern,
169 const MPI_Comm communicator)
170 {
171 // get rid of old matrix and generate a new one
172 const PetscErrorCode ierr = MatDestroy(&matrix);
173 AssertThrow(ierr == 0, ExcPETScError(ierr));
174
175 do_reinit(communicator, local_rows, local_columns, sparsity_pattern);
176 }
177
178
179
180 template <typename SparsityPatternType>
181 void
183 const IndexSet &local_rows,
184 const IndexSet &local_columns,
185 const SparsityPatternType &sparsity_pattern)
186 {
187 // If the sparsity pattern's dimensions can be converted to PetscInts then
188 // the rest of the conversions will succeed
189 AssertThrowIntegerConversion(static_cast<PetscInt>(
190 sparsity_pattern.n_rows()),
191 sparsity_pattern.n_rows());
192 AssertThrowIntegerConversion(static_cast<PetscInt>(
193 sparsity_pattern.n_cols()),
194 sparsity_pattern.n_cols());
195
196 Assert(sparsity_pattern.n_rows() == local_rows.size(),
198 "SparsityPattern and IndexSet have different number of rows"));
199 Assert(
200 sparsity_pattern.n_cols() == local_columns.size(),
202 "SparsityPattern and IndexSet have different number of columns"));
203 Assert(local_rows.is_contiguous() && local_columns.is_contiguous(),
204 ExcMessage("PETSc only supports contiguous row/column ranges"));
205 Assert(local_rows.is_ascending_and_one_to_one(communicator),
207
208 if constexpr (running_in_debug_mode())
209 {
210 // check indexsets
211 types::global_dof_index row_owners =
212 Utilities::MPI::sum(local_rows.n_elements(), communicator);
213 types::global_dof_index col_owners =
214 Utilities::MPI::sum(local_columns.n_elements(), communicator);
215 Assert(
216 row_owners == sparsity_pattern.n_rows(),
218 std::string(
219 "Each row has to be owned by exactly one owner (n_rows()=") +
220 std::to_string(sparsity_pattern.n_rows()) +
221 " but sum(local_rows.n_elements())=" +
222 std::to_string(row_owners) + ")"));
223 Assert(
224 col_owners == sparsity_pattern.n_cols(),
226 std::string(
227 "Each column has to be owned by exactly one owner (n_cols()=") +
228 std::to_string(sparsity_pattern.n_cols()) +
229 " but sum(local_columns.n_elements())=" +
230 std::to_string(col_owners) + ")"));
231 }
232
233
234 // create the matrix. We do not set row length but set the
235 // correct SparsityPattern later.
236 PetscErrorCode ierr = MatCreate(communicator, &matrix);
237 AssertThrow(ierr == 0, ExcPETScError(ierr));
238
239 ierr = MatSetSizes(matrix,
240 local_rows.n_elements(),
241 local_columns.n_elements(),
242 sparsity_pattern.n_rows(),
243 sparsity_pattern.n_cols());
244 AssertThrow(ierr == 0, ExcPETScError(ierr));
245
246 // Use MATAIJ which dispatches to SEQAIJ
247 // if the size of the communicator is 1,
248 // and to MPIAIJ otherwise.
249 ierr = MatSetType(matrix, MATAIJ);
250 AssertThrow(ierr == 0, ExcPETScError(ierr));
251
252
253 // next preset the exact given matrix
254 // entries with zeros. this doesn't avoid any
255 // memory allocations, but it at least
256 // avoids some searches later on. the
257 // key here is that we can use the
258 // matrix set routines that set an
259 // entire row at once, not a single
260 // entry at a time
261 //
262 // for the usefulness of this option
263 // read the documentation of this
264 // class.
265 // if (preset_nonzero_locations == true)
266 if (local_rows.n_elements() > 0)
267 {
268 // MatXXXAIJSetPreallocationCSR
269 // can be used to allocate the sparsity
270 // pattern of a matrix
271
272 const PetscInt local_row_start = local_rows.nth_index_in_set(0);
273 const PetscInt local_row_end =
274 local_row_start + local_rows.n_elements();
275
276
277 // first set up the column number
278 // array for the rows to be stored
279 // on the local processor. have one
280 // dummy entry at the end to make
281 // sure petsc doesn't read past the
282 // end
283 std::vector<PetscInt> rowstart_in_window(local_row_end -
284 local_row_start + 1,
285 0),
286 colnums_in_window;
287 {
288 unsigned int n_cols = 0;
289 for (PetscInt i = local_row_start; i < local_row_end; ++i)
290 {
291 const PetscInt row_length = sparsity_pattern.row_length(i);
292 rowstart_in_window[i + 1 - local_row_start] =
293 rowstart_in_window[i - local_row_start] + row_length;
294 n_cols += row_length;
295 }
296 colnums_in_window.resize(n_cols + 1, -1);
297 }
298
299 // now copy over the information
300 // from the sparsity pattern.
301 {
302 PetscInt *ptr = colnums_in_window.data();
303 for (PetscInt i = local_row_start; i < local_row_end; ++i)
304 for (typename SparsityPatternType::iterator p =
305 sparsity_pattern.begin(i);
306 p != sparsity_pattern.end(i);
307 ++p, ++ptr)
308 *ptr = p->column();
309 }
310
311
312 // then call the petsc functions
313 // that summarily allocates these
314 // entries.
315 // Here we both call the specific API since this is how
316 // PETSc polymorphism works. If the matrix is of type MPIAIJ,
317 // the second call is dummy. If the matrix is of type SEQAIJ,
318 // the first call is dummy.
319 ierr = MatMPIAIJSetPreallocationCSR(matrix,
320 rowstart_in_window.data(),
321 colnums_in_window.data(),
322 nullptr);
323 ierr = MatSeqAIJSetPreallocationCSR(matrix,
324 rowstart_in_window.data(),
325 colnums_in_window.data(),
326 nullptr);
327 AssertThrow(ierr == 0, ExcPETScError(ierr));
328 }
329 else
330 {
331 PetscInt i = 0;
332
333 ierr = MatSeqAIJSetPreallocationCSR(matrix, &i, &i, nullptr);
334 AssertThrow(ierr == 0, ExcPETScError(ierr));
335 ierr = MatMPIAIJSetPreallocationCSR(matrix, &i, &i, nullptr);
336 AssertThrow(ierr == 0, ExcPETScError(ierr));
337 }
339
340 {
343 }
344 }
345
346
347 template <typename SparsityPatternType>
348 void
350 const MPI_Comm communicator,
351 const SparsityPatternType &sparsity_pattern,
352 const std::vector<size_type> &local_rows_per_process,
353 const std::vector<size_type> &local_columns_per_process,
354 const unsigned int this_process,
355 const bool preset_nonzero_locations)
356 {
357 Assert(local_rows_per_process.size() == local_columns_per_process.size(),
358 ExcDimensionMismatch(local_rows_per_process.size(),
359 local_columns_per_process.size()));
360 Assert(this_process < local_rows_per_process.size(), ExcInternalError());
362 // If the sparsity pattern's dimensions can be converted to PetscInts then
363 // the rest of the conversions will succeed
364 AssertThrowIntegerConversion(static_cast<PetscInt>(
365 sparsity_pattern.n_rows()),
366 sparsity_pattern.n_rows());
367 AssertThrowIntegerConversion(static_cast<PetscInt>(
368 sparsity_pattern.n_cols()),
369 sparsity_pattern.n_cols());
370
371 // for each row that we own locally, we
372 // have to count how many of the
373 // entries in the sparsity pattern lie
374 // in the column area we have locally,
375 // and how many aren't. for this, we
376 // first have to know which areas are
377 // ours
378 size_type local_row_start = 0;
379 for (unsigned int p = 0; p < this_process; ++p)
380 local_row_start += local_rows_per_process[p];
381 const size_type local_row_end =
382 local_row_start + local_rows_per_process[this_process];
383
384 // create the matrix. We
385 // do not set row length but set the
386 // correct SparsityPattern later.
387 PetscErrorCode ierr = MatCreate(communicator, &matrix);
388 AssertThrow(ierr == 0, ExcPETScError(ierr));
389
390 ierr = MatSetSizes(matrix,
391 local_rows_per_process[this_process],
392 local_columns_per_process[this_process],
393 sparsity_pattern.n_rows(),
394 sparsity_pattern.n_cols());
395 AssertThrow(ierr == 0, ExcPETScError(ierr));
396
397 // Use MATAIJ which dispatches to SEQAIJ
398 // if the size of the communicator is 1,
399 // and to MPIAIJ otherwise.
400 ierr = MatSetType(matrix, MATAIJ);
401 AssertThrow(ierr == 0, ExcPETScError(ierr));
402
403 // next preset the exact given matrix
404 // entries with zeros, if the user
405 // requested so. this doesn't avoid any
406 // memory allocations, but it at least
407 // avoids some searches later on. the
408 // key here is that we can use the
409 // matrix set routines that set an
410 // entire row at once, not a single
411 // entry at a time
412 //
413 // for the usefulness of this option
414 // read the documentation of this
415 // class.
416 if (preset_nonzero_locations == true)
417 {
418 // MatXXXAIJSetPreallocationCSR
419 // can be used to allocate the sparsity
420 // pattern of a matrix if it is already
421 // available:
422
423 // first set up the column number
424 // array for the rows to be stored
425 // on the local processor. have one
426 // dummy entry at the end to make
427 // sure petsc doesn't read past the
428 // end
429 std::vector<PetscInt> rowstart_in_window(local_row_end -
430 local_row_start + 1,
431 0),
432 colnums_in_window;
433 {
434 size_type n_cols = 0;
435 for (size_type i = local_row_start; i < local_row_end; ++i)
436 {
437 const size_type row_length = sparsity_pattern.row_length(i);
438 const auto row_start =
439 rowstart_in_window[i - local_row_start] + row_length;
440 const auto petsc_row_start = static_cast<PetscInt>(row_start);
441 AssertIntegerConversion(petsc_row_start, row_start);
442 rowstart_in_window[i + 1 - local_row_start] = petsc_row_start;
443 n_cols += row_length;
444 }
445 colnums_in_window.resize(n_cols + 1, -1);
446 }
447
448 // now copy over the information
449 // from the sparsity pattern.
450 {
451 PetscInt *ptr = colnums_in_window.data();
452 for (size_type i = local_row_start; i < local_row_end; ++i)
453 for (typename SparsityPatternType::iterator p =
454 sparsity_pattern.begin(i);
455 p != sparsity_pattern.end(i);
456 ++p, ++ptr)
457 {
458 const auto petsc_column = static_cast<PetscInt>(p->column());
459 AssertIntegerConversion(petsc_column, p->column());
460 *ptr = petsc_column;
461 }
462 }
463
464
465 // then call the petsc function
466 // that summarily allocates these
467 // entries.
468 // Here we both call the specific API since this is how
469 // PETSc polymorphism works. If the matrix is of type MPIAIJ,
470 // the second call is dummy. If the matrix is of type SEQAIJ,
471 // the first call is dummy.
472 ierr = MatSeqAIJSetPreallocationCSR(matrix,
473 rowstart_in_window.data(),
474 colnums_in_window.data(),
475 nullptr);
476 ierr = MatMPIAIJSetPreallocationCSR(matrix,
477 rowstart_in_window.data(),
478 colnums_in_window.data(),
479 nullptr);
480 AssertThrow(ierr == 0, ExcPETScError(ierr));
481
484 }
485 }
486
487 // BDDC
488 template <typename SparsityPatternType>
489 void
491 const IndexSet &local_rows,
492 const IndexSet &local_active_rows,
493 const IndexSet &local_columns,
494 const IndexSet &local_active_columns,
495 const SparsityPatternType &sparsity_pattern)
496 {
497 // If the sparsity pattern's dimensions can be converted to PetscInts then
498 // the rest of the conversions will succeed
499 AssertThrowIntegerConversion(static_cast<PetscInt>(
500 sparsity_pattern.n_rows()),
501 sparsity_pattern.n_rows());
502 AssertThrowIntegerConversion(static_cast<PetscInt>(
503 sparsity_pattern.n_cols()),
504 sparsity_pattern.n_cols());
505
506# if DEAL_II_PETSC_VERSION_GTE(3, 10, 0)
507 Assert(sparsity_pattern.n_rows() == local_rows.size(),
509 "SparsityPattern and IndexSet have different number of rows."));
510 Assert(
511 sparsity_pattern.n_cols() == local_columns.size(),
513 "SparsityPattern and IndexSet have different number of columns"));
514 Assert(local_rows.is_contiguous() && local_columns.is_contiguous(),
515 ExcMessage("PETSc only supports contiguous row/column ranges"));
516 Assert(local_rows.is_ascending_and_one_to_one(communicator),
518
519 if constexpr (running_in_debug_mode())
520 {
521 // check indexsets
522 const types::global_dof_index row_owners =
523 Utilities::MPI::sum(local_rows.n_elements(), communicator);
524 const types::global_dof_index col_owners =
525 Utilities::MPI::sum(local_columns.n_elements(), communicator);
526 Assert(
527 row_owners == sparsity_pattern.n_rows(),
529 std::string(
530 "Each row has to be owned by exactly one owner (n_rows()=") +
531 std::to_string(sparsity_pattern.n_rows()) +
532 " but sum(local_rows.n_elements())=" +
533 std::to_string(row_owners) + ")"));
534 Assert(
535 col_owners == sparsity_pattern.n_cols(),
537 std::string(
538 "Each column has to be owned by exactly one owner (n_cols()=") +
539 std::to_string(sparsity_pattern.n_cols()) +
540 " but sum(local_columns.n_elements())=" +
541 std::to_string(col_owners) + ")"));
542 }
543 PetscErrorCode ierr;
544
545 // create the local to global mappings as arrays.
546 const IndexSet::size_type n_local_active_rows =
547 local_active_rows.n_elements();
548 const IndexSet::size_type n_local_active_cols =
549 local_active_columns.n_elements();
550 std::vector<PetscInt> idx_glob_row(n_local_active_rows);
551 std::vector<PetscInt> idx_glob_col(n_local_active_cols);
552 for (IndexSet::size_type k = 0; k < n_local_active_rows; ++k)
553 {
554 idx_glob_row[k] = local_active_rows.nth_index_in_set(k);
555 }
556 for (IndexSet::size_type k = 0; k < n_local_active_cols; ++k)
557 {
558 idx_glob_col[k] = local_active_columns.nth_index_in_set(k);
559 }
560
561
562 IS is_glob_row, is_glob_col;
563 // Create row index set
564 ISLocalToGlobalMapping l2gmap_row;
565 ierr = ISCreateGeneral(communicator,
566 n_local_active_rows,
567 idx_glob_row.data(),
568 PETSC_COPY_VALUES,
569 &is_glob_row);
570 AssertThrow(ierr == 0, ExcPETScError(ierr));
571 ierr = ISLocalToGlobalMappingCreateIS(is_glob_row, &l2gmap_row);
572 AssertThrow(ierr == 0, ExcPETScError(ierr));
573 ierr = ISDestroy(&is_glob_row);
574 AssertThrow(ierr == 0, ExcPETScError(ierr));
575 ierr =
576 ISLocalToGlobalMappingViewFromOptions(l2gmap_row, nullptr, "-view_map");
577 AssertThrow(ierr == 0, ExcPETScError(ierr));
578
579 // Create column index set
580 ISLocalToGlobalMapping l2gmap_col;
581 ierr = ISCreateGeneral(communicator,
582 n_local_active_cols,
583 idx_glob_col.data(),
584 PETSC_COPY_VALUES,
585 &is_glob_col);
586 AssertThrow(ierr == 0, ExcPETScError(ierr));
587 ierr = ISLocalToGlobalMappingCreateIS(is_glob_col, &l2gmap_col);
588 AssertThrow(ierr == 0, ExcPETScError(ierr));
589 ierr = ISDestroy(&is_glob_col);
590 AssertThrow(ierr == 0, ExcPETScError(ierr));
591 ierr =
592 ISLocalToGlobalMappingViewFromOptions(l2gmap_col, nullptr, "-view_map");
593 AssertThrow(ierr == 0, ExcPETScError(ierr));
594
595 // create the matrix with the IS constructor.
596 ierr = MatCreateIS(communicator,
597 1,
598 local_rows.n_elements(),
599 local_columns.n_elements(),
600 sparsity_pattern.n_rows(),
601 sparsity_pattern.n_cols(),
602 l2gmap_row,
603 l2gmap_col,
604 &matrix);
605 AssertThrow(ierr == 0, ExcPETScError(ierr));
606 ierr = ISLocalToGlobalMappingDestroy(&l2gmap_row);
607 AssertThrow(ierr == 0, ExcPETScError(ierr));
608 ierr = ISLocalToGlobalMappingDestroy(&l2gmap_col);
609 AssertThrow(ierr == 0, ExcPETScError(ierr));
610
611 // next preset the exact given matrix
612 // entries with zeros. This doesn't avoid any
613 // memory allocations, but it at least
614 // avoids some searches later on. the
615 // key here is that we can use the
616 // matrix set routines that set an
617 // entire row at once, not a single
618 // entry at a time.
619 //
620 // for the usefulness of this option
621 // read the documentation of this
622 // class.
623
624 Mat local_matrix; // In the MATIS case, we use the local matrix instead
625 ierr = MatISGetLocalMat(matrix, &local_matrix);
626 AssertThrow(ierr == 0, ExcPETScError(ierr));
627 ierr = MatSetType(local_matrix,
628 MATSEQAIJ); // SEQ as it is local! TODO: Allow for
629 // OpenMP parallelization in local node.
630 AssertThrow(ierr == 0, ExcPETScError(ierr));
631 if (local_rows.n_elements() > 0)
632 {
633 // MatSEQAIJSetPreallocationCSR
634 // can be used to allocate the sparsity
635 // pattern of a matrix. Local matrices start from 0 (MATIS).
636 const PetscInt local_row_start = 0;
637 const PetscInt local_row_end = local_active_rows.n_elements();
638
639 // first set up the column number
640 // array for the rows to be stored
641 // on the local processor.
642 std::vector<PetscInt> rowstart_in_window(local_row_end -
643 local_row_start + 1,
644 0),
645 colnums_in_window;
646 unsigned int global_row_index = 0;
647 {
648 unsigned int n_cols = 0;
649 unsigned int global_row_index = 0;
650 for (PetscInt i = local_row_start; i < local_row_end; ++i)
651 {
652 global_row_index = local_active_rows.nth_index_in_set(i);
653 const PetscInt row_length =
654 sparsity_pattern.row_length(global_row_index);
655 rowstart_in_window[i + 1 - local_row_start] =
656 rowstart_in_window[i - local_row_start] + row_length;
657 n_cols += row_length;
658 }
659 colnums_in_window.resize(n_cols + 1, -1);
660 }
661
662
663 // now copy over the information
664 // from the sparsity pattern. For this we first invert the column
665 // index set.
666 std::map<unsigned int, unsigned int> loc_act_cols_inv;
667 for (unsigned int i = 0; i < local_active_columns.n_elements(); ++i)
668 {
669 loc_act_cols_inv[local_active_columns.nth_index_in_set(i)] = i;
670 }
671
672 {
673 PetscInt *ptr = colnums_in_window.data();
674 for (PetscInt i = local_row_start; i < local_row_end; ++i)
675 {
676 global_row_index = local_active_rows.nth_index_in_set(i);
677 for (typename SparsityPatternType::iterator p =
678 sparsity_pattern.begin(global_row_index);
679 p != sparsity_pattern.end(global_row_index);
680 ++p, ++ptr)
681 *ptr = loc_act_cols_inv[p->column()];
682 }
683 }
684
685 // then call the petsc function
686 // that summarily allocates these
687 // entries:
688 ierr = MatSeqAIJSetPreallocationCSR(local_matrix,
689 rowstart_in_window.data(),
690 colnums_in_window.data(),
691 nullptr);
692 AssertThrow(ierr == 0, ExcPETScError(ierr));
693 }
694 else
695 {
696 PetscInt i = 0;
697 ierr = MatSeqAIJSetPreallocationCSR(local_matrix, &i, &i, nullptr);
698 AssertThrow(ierr == 0, ExcPETScError(ierr));
699 }
701
702 {
703 close_matrix(local_matrix);
704 set_keep_zero_rows(local_matrix);
705 }
706 ierr = MatISRestoreLocalMat(matrix, &local_matrix);
707 AssertThrow(ierr == 0, ExcPETScError(ierr));
708# else
709 {
710 // Use this to avoid unused variables warning
711 (void)communicator;
712 (void)local_rows;
713 (void)local_active_rows;
714 (void)local_columns;
715 (void)local_active_columns;
716 (void)sparsity_pattern;
717 AssertThrow(false,
719 "BDDC preconditioner requires PETSc 3.10.0 or newer"));
720 }
721# endif
722 }
723
724# ifndef DOXYGEN
725 // explicit instantiations
726 //
728 const SparsityPattern &,
729 const std::vector<size_type> &,
730 const std::vector<size_type> &,
731 const unsigned int,
732 const bool);
735 const std::vector<size_type> &,
736 const std::vector<size_type> &,
737 const unsigned int,
738 const bool);
739
740 template void
742 const SparsityPattern &,
743 const std::vector<size_type> &,
744 const std::vector<size_type> &,
745 const unsigned int,
746 const bool);
747 template void
750 const std::vector<size_type> &,
751 const std::vector<size_type> &,
752 const unsigned int,
753 const bool);
754
755 template void
757 const SparsityPattern &,
758 const MPI_Comm);
759
760 template void
762 const IndexSet &,
763 const SparsityPattern &,
764 const MPI_Comm);
765
766 template void
769 const MPI_Comm);
770
771 template void
773 const IndexSet &,
775 const MPI_Comm);
776
777 template void
779 const SparsityPattern &,
780 const std::vector<size_type> &,
781 const std::vector<size_type> &,
782 const unsigned int,
783 const bool);
784 template void
787 const std::vector<size_type> &,
788 const std::vector<size_type> &,
789 const unsigned int,
790 const bool);
791
792 template void
794 const IndexSet &,
795 const IndexSet &,
796 const SparsityPattern &);
797
798 template void
800 const IndexSet &,
801 const IndexSet &,
802 const DynamicSparsityPattern &);
803
804 template void
806 const IndexSet &,
807 const IndexSet &,
808 const IndexSet &,
809 const SparsityPattern &,
810 const MPI_Comm);
811 template void
813 const IndexSet &,
814 const IndexSet &,
815 const IndexSet &,
817 const MPI_Comm);
818
819 template void
821 const IndexSet &,
822 const IndexSet &,
823 const IndexSet &,
824 const IndexSet &,
825 const SparsityPattern &);
826 template void
828 const IndexSet &,
829 const IndexSet &,
830 const IndexSet &,
831 const IndexSet &,
832 const DynamicSparsityPattern &);
833# endif
834
835
836 PetscScalar
838 {
839 Vector tmp(v);
840 vmult(tmp, v);
841 // note, that v*tmp returns sum_i conjugate(v)_i * tmp_i
842 return v * tmp;
843 }
844
845 PetscScalar
847 {
848 Vector tmp(v);
849 vmult(tmp, v);
850 // note, that v*tmp returns sum_i conjugate(v)_i * tmp_i
851 return u * tmp;
852 }
853
856 {
857 PetscInt n_rows, n_cols, n_loc_rows, n_loc_cols, min, max;
858 PetscErrorCode ierr;
859
860 ierr = MatGetSize(matrix, &n_rows, &n_cols);
861 AssertThrow(ierr == 0, ExcPETScError(ierr));
862
863 ierr = MatGetLocalSize(matrix, &n_loc_rows, &n_loc_cols);
864 AssertThrow(ierr == 0, ExcPETScError(ierr));
865
866 ierr = MatGetOwnershipRangeColumn(matrix, &min, &max);
867 AssertThrow(ierr == 0, ExcPETScError(ierr));
868
869 Assert(n_loc_cols == max - min,
871 "PETSc is requiring non contiguous memory allocation."));
872
873 IndexSet indices(n_cols);
874 indices.add_range(min, max);
875 indices.compress();
876
877 return indices;
878 }
879
882 {
883 PetscInt n_rows, n_cols, n_loc_rows, n_loc_cols, min, max;
884 PetscErrorCode ierr;
885
886 ierr = MatGetSize(matrix, &n_rows, &n_cols);
887 AssertThrow(ierr == 0, ExcPETScError(ierr));
888
889 ierr = MatGetLocalSize(matrix, &n_loc_rows, &n_loc_cols);
890 AssertThrow(ierr == 0, ExcPETScError(ierr));
891
892 ierr = MatGetOwnershipRange(matrix, &min, &max);
893 AssertThrow(ierr == 0, ExcPETScError(ierr));
894
895 Assert(n_loc_rows == max - min,
897 "PETSc is requiring non contiguous memory allocation."));
898
899 IndexSet indices(n_rows);
900 indices.add_range(min, max);
901 indices.compress();
902
903 return indices;
904 }
905
906 void
908 const SparseMatrix &B,
909 const MPI::Vector &V) const
910 {
911 // Simply forward to the protected member function of the base class
912 // that takes abstract matrix and vector arguments (to which the compiler
913 // automatically casts the arguments).
914 MatrixBase::mmult(C, B, V);
915 }
916
917 void
919 const SparseMatrix &B,
920 const MPI::Vector &V) const
921 {
922 // Simply forward to the protected member function of the base class
923 // that takes abstract matrix and vector arguments (to which the compiler
924 // automatically casts the arguments).
925 MatrixBase::Tmmult(C, B, V);
926 }
927
928 } // namespace MPI
929} // namespace PETScWrappers
930
931
933
934#endif // DEAL_II_WITH_PETSC
bool is_ascending_and_one_to_one(const MPI_Comm communicator) const
bool is_contiguous() const
Definition index_set.h:1928
size_type size() const
Definition index_set.h:1787
size_type n_elements() const
Definition index_set.h:1945
void add_range(const size_type begin, const size_type end)
Definition index_set.h:1814
size_type nth_index_in_set(const size_type local_index) const
Definition index_set.h:1994
void compress() const
Definition index_set.h:1795
SparseMatrix & operator=(const value_type d)
void copy_from(const SparseMatrix &other)
void reinit(const MPI_Comm communicator, const SparsityPatternType &sparsity_pattern, const std::vector< size_type > &local_rows_per_process, const std::vector< size_type > &local_columns_per_process, const unsigned int this_process, const bool preset_nonzero_locations=true)
void mmult(SparseMatrix &C, const SparseMatrix &B, const MPI::Vector &V=MPI::Vector()) const
PetscScalar matrix_scalar_product(const Vector &u, const Vector &v) const
void Tmmult(SparseMatrix &C, const SparseMatrix &B, const MPI::Vector &V=MPI::Vector()) const
PetscScalar matrix_norm_square(const Vector &v) const
void do_reinit(const MPI_Comm comm, const SparsityPatternType &sparsity_pattern, const std::vector< size_type > &local_rows_per_process, const std::vector< size_type > &local_columns_per_process, const unsigned int this_process, const bool preset_nonzero_locations)
size_type row_length(const size_type row) const
void vmult(VectorBase &dst, const VectorBase &src) const
void mmult(MatrixBase &C, const MatrixBase &B, const VectorBase &V) const
MatrixBase & operator=(const MatrixBase &)=delete
void Tmmult(MatrixBase &C, const MatrixBase &B, const VectorBase &V) const
void compress(const VectorOperation::values operation)
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:35
constexpr bool running_in_debug_mode()
Definition config.h:73
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:36
#define AssertIntegerConversion(index1, index2)
#define AssertThrowIntegerConversion(index1, index2)
static ::ExceptionBase & ExcNotImplemented()
#define Assert(cond, exc)
#define AssertNothrow(cond, exc)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
void set_keep_zero_rows(Mat &matrix)
void close_matrix(Mat &matrix)
T sum(const T &t, const MPI_Comm mpi_communicator)