deal.II version GIT relicensing-2167-g9622207b8f 2024-11-21 12:40:00+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
petsc_parallel_sparse_matrix.cc
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2004 - 2023 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
16
17#ifdef DEAL_II_WITH_PETSC
18
19# include <deal.II/base/mpi.h>
20
26
28
29namespace PETScWrappers
30{
31 namespace MPI
32 {
34 {
35 // just like for vectors: since we
36 // create an empty matrix, we can as
37 // well make it sequential
38 const int m = 0, n = 0, n_nonzero_per_row = 0;
39 const PetscErrorCode ierr = MatCreateSeqAIJ(
40 PETSC_COMM_SELF, m, n, n_nonzero_per_row, nullptr, &matrix);
41 AssertThrow(ierr == 0, ExcPETScError(ierr));
42 }
43
45 : MatrixBase(A)
46 {}
47
49 {
50 PetscErrorCode ierr = MatDestroy(&matrix);
51 (void)ierr;
52 AssertNothrow(ierr == 0, ExcPETScError(ierr));
53 }
54
55
56
57 template <typename SparsityPatternType>
59 const MPI_Comm communicator,
60 const SparsityPatternType &sparsity_pattern,
61 const std::vector<size_type> &local_rows_per_process,
62 const std::vector<size_type> &local_columns_per_process,
63 const unsigned int this_process,
64 const bool preset_nonzero_locations)
65 {
66 do_reinit(communicator,
67 sparsity_pattern,
68 local_rows_per_process,
69 local_columns_per_process,
70 this_process,
71 preset_nonzero_locations);
72 }
73
74
75
76 void
78 {
79 if (&other == this)
80 return;
81
82 PetscErrorCode ierr = MatDestroy(&matrix);
83 AssertThrow(ierr == 0, ExcPETScError(ierr));
84
85 ierr = MatDuplicate(other.matrix, MAT_DO_NOT_COPY_VALUES, &matrix);
86 AssertThrow(ierr == 0, ExcPETScError(ierr));
87 }
88
89 template <typename SparsityPatternType>
90 void
91 SparseMatrix::reinit(const IndexSet &local_rows,
92 const IndexSet &local_active_rows,
93 const IndexSet &local_columns,
94 const IndexSet &local_active_columns,
95 const SparsityPatternType &sparsity_pattern,
96 const MPI_Comm communicator)
97 {
98 // get rid of old matrix and generate a new one
99 const PetscErrorCode ierr = MatDestroy(&matrix);
100 AssertThrow(ierr == 0, ExcPETScError(ierr));
101
102 do_reinit(communicator,
103 local_rows,
104 local_active_rows,
105 local_columns,
106 local_active_columns,
107 sparsity_pattern);
108 }
109
110
113 {
115 return *this;
116 }
117
118 void
120 {
121 if (&other == this)
122 return;
123
124 const PetscErrorCode ierr =
125 MatCopy(other.matrix, matrix, SAME_NONZERO_PATTERN);
126 AssertThrow(ierr == 0, ExcPETScError(ierr));
127 }
128
129
130
131 template <typename SparsityPatternType>
132 void
134 const MPI_Comm communicator,
135 const SparsityPatternType &sparsity_pattern,
136 const std::vector<size_type> &local_rows_per_process,
137 const std::vector<size_type> &local_columns_per_process,
138 const unsigned int this_process,
139 const bool preset_nonzero_locations)
140 {
141 // get rid of old matrix and generate a new one
142 const PetscErrorCode ierr = MatDestroy(&matrix);
143 AssertThrow(ierr == 0, ExcPETScError(ierr));
144
145
146 do_reinit(communicator,
147 sparsity_pattern,
148 local_rows_per_process,
149 local_columns_per_process,
150 this_process,
151 preset_nonzero_locations);
152 }
153
154
155
156 template <typename SparsityPatternType>
157 void
159 const SparsityPatternType &sparsity_pattern,
160 const MPI_Comm communicator)
161 {
162 do_reinit(communicator, local_rows, local_rows, sparsity_pattern);
163 }
164
165 template <typename SparsityPatternType>
166 void
168 const IndexSet &local_columns,
169 const SparsityPatternType &sparsity_pattern,
170 const MPI_Comm communicator)
171 {
172 // get rid of old matrix and generate a new one
173 const PetscErrorCode ierr = MatDestroy(&matrix);
174 AssertThrow(ierr == 0, ExcPETScError(ierr));
175
176 do_reinit(communicator, local_rows, local_columns, sparsity_pattern);
177 }
178
179
180
181 template <typename SparsityPatternType>
182 void
184 const IndexSet &local_rows,
185 const IndexSet &local_columns,
186 const SparsityPatternType &sparsity_pattern)
187 {
188 // If the sparsity pattern's dimensions can be converted to PetscInts then
189 // the rest of the conversions will succeed
190 AssertThrowIntegerConversion(static_cast<PetscInt>(
191 sparsity_pattern.n_rows()),
192 sparsity_pattern.n_rows());
193 AssertThrowIntegerConversion(static_cast<PetscInt>(
194 sparsity_pattern.n_cols()),
195 sparsity_pattern.n_cols());
196
197 Assert(sparsity_pattern.n_rows() == local_rows.size(),
199 "SparsityPattern and IndexSet have different number of rows"));
200 Assert(
201 sparsity_pattern.n_cols() == local_columns.size(),
203 "SparsityPattern and IndexSet have different number of columns"));
204 Assert(local_rows.is_contiguous() && local_columns.is_contiguous(),
205 ExcMessage("PETSc only supports contiguous row/column ranges"));
206 Assert(local_rows.is_ascending_and_one_to_one(communicator),
208
209# ifdef DEBUG
210 {
211 // check indexsets
212 types::global_dof_index row_owners =
213 Utilities::MPI::sum(local_rows.n_elements(), communicator);
214 types::global_dof_index col_owners =
215 Utilities::MPI::sum(local_columns.n_elements(), communicator);
216 Assert(row_owners == sparsity_pattern.n_rows(),
218 std::string(
219 "Each row has to be owned by exactly one owner (n_rows()=") +
220 std::to_string(sparsity_pattern.n_rows()) +
221 " but sum(local_rows.n_elements())=" +
222 std::to_string(row_owners) + ")"));
223 Assert(
224 col_owners == sparsity_pattern.n_cols(),
226 std::string(
227 "Each column has to be owned by exactly one owner (n_cols()=") +
228 std::to_string(sparsity_pattern.n_cols()) +
229 " but sum(local_columns.n_elements())=" +
230 std::to_string(col_owners) + ")"));
231 }
232# endif
233
234
235 // create the matrix. We do not set row length but set the
236 // correct SparsityPattern later.
237 PetscErrorCode ierr = MatCreate(communicator, &matrix);
238 AssertThrow(ierr == 0, ExcPETScError(ierr));
239
240 ierr = MatSetSizes(matrix,
241 local_rows.n_elements(),
242 local_columns.n_elements(),
243 sparsity_pattern.n_rows(),
244 sparsity_pattern.n_cols());
245 AssertThrow(ierr == 0, ExcPETScError(ierr));
246
247 // Use MATAIJ which dispatches to SEQAIJ
248 // if the size of the communicator is 1,
249 // and to MPIAIJ otherwise.
250 ierr = MatSetType(matrix, MATAIJ);
251 AssertThrow(ierr == 0, ExcPETScError(ierr));
252
253
254 // next preset the exact given matrix
255 // entries with zeros. this doesn't avoid any
256 // memory allocations, but it at least
257 // avoids some searches later on. the
258 // key here is that we can use the
259 // matrix set routines that set an
260 // entire row at once, not a single
261 // entry at a time
262 //
263 // for the usefulness of this option
264 // read the documentation of this
265 // class.
266 // if (preset_nonzero_locations == true)
267 if (local_rows.n_elements() > 0)
268 {
269 // MatXXXAIJSetPreallocationCSR
270 // can be used to allocate the sparsity
271 // pattern of a matrix
272
273 const PetscInt local_row_start = local_rows.nth_index_in_set(0);
274 const PetscInt local_row_end =
275 local_row_start + local_rows.n_elements();
276
277
278 // first set up the column number
279 // array for the rows to be stored
280 // on the local processor. have one
281 // dummy entry at the end to make
282 // sure petsc doesn't read past the
283 // end
284 std::vector<PetscInt> rowstart_in_window(local_row_end -
285 local_row_start + 1,
286 0),
287 colnums_in_window;
288 {
289 unsigned int n_cols = 0;
290 for (PetscInt i = local_row_start; i < local_row_end; ++i)
291 {
292 const PetscInt row_length = sparsity_pattern.row_length(i);
293 rowstart_in_window[i + 1 - local_row_start] =
294 rowstart_in_window[i - local_row_start] + row_length;
295 n_cols += row_length;
296 }
297 colnums_in_window.resize(n_cols + 1, -1);
298 }
299
300 // now copy over the information
301 // from the sparsity pattern.
302 {
303 PetscInt *ptr = colnums_in_window.data();
304 for (PetscInt i = local_row_start; i < local_row_end; ++i)
305 for (typename SparsityPatternType::iterator p =
306 sparsity_pattern.begin(i);
307 p != sparsity_pattern.end(i);
308 ++p, ++ptr)
309 *ptr = p->column();
310 }
311
312
313 // then call the petsc functions
314 // that summarily allocates these
315 // entries.
316 // Here we both call the specific API since this is how
317 // PETSc polymorphism works. If the matrix is of type MPIAIJ,
318 // the second call is dummy. If the matrix is of type SEQAIJ,
319 // the first call is dummy.
320 ierr = MatMPIAIJSetPreallocationCSR(matrix,
321 rowstart_in_window.data(),
322 colnums_in_window.data(),
323 nullptr);
324 ierr = MatSeqAIJSetPreallocationCSR(matrix,
325 rowstart_in_window.data(),
326 colnums_in_window.data(),
327 nullptr);
328 AssertThrow(ierr == 0, ExcPETScError(ierr));
329 }
330 else
331 {
332 PetscInt i = 0;
333
334 ierr = MatSeqAIJSetPreallocationCSR(matrix, &i, &i, nullptr);
335 AssertThrow(ierr == 0, ExcPETScError(ierr));
336 ierr = MatMPIAIJSetPreallocationCSR(matrix, &i, &i, nullptr);
337 AssertThrow(ierr == 0, ExcPETScError(ierr));
338 }
340
341 {
344 }
345 }
346
347
348 template <typename SparsityPatternType>
349 void
351 const MPI_Comm communicator,
352 const SparsityPatternType &sparsity_pattern,
353 const std::vector<size_type> &local_rows_per_process,
354 const std::vector<size_type> &local_columns_per_process,
355 const unsigned int this_process,
356 const bool preset_nonzero_locations)
357 {
358 Assert(local_rows_per_process.size() == local_columns_per_process.size(),
359 ExcDimensionMismatch(local_rows_per_process.size(),
360 local_columns_per_process.size()));
361 Assert(this_process < local_rows_per_process.size(), ExcInternalError());
363 // If the sparsity pattern's dimensions can be converted to PetscInts then
364 // the rest of the conversions will succeed
365 AssertThrowIntegerConversion(static_cast<PetscInt>(
366 sparsity_pattern.n_rows()),
367 sparsity_pattern.n_rows());
368 AssertThrowIntegerConversion(static_cast<PetscInt>(
369 sparsity_pattern.n_cols()),
370 sparsity_pattern.n_cols());
371
372 // for each row that we own locally, we
373 // have to count how many of the
374 // entries in the sparsity pattern lie
375 // in the column area we have locally,
376 // and how many aren't. for this, we
377 // first have to know which areas are
378 // ours
379 size_type local_row_start = 0;
380 for (unsigned int p = 0; p < this_process; ++p)
381 local_row_start += local_rows_per_process[p];
382 const size_type local_row_end =
383 local_row_start + local_rows_per_process[this_process];
384
385 // create the matrix. We
386 // do not set row length but set the
387 // correct SparsityPattern later.
388 PetscErrorCode ierr = MatCreate(communicator, &matrix);
389 AssertThrow(ierr == 0, ExcPETScError(ierr));
390
391 ierr = MatSetSizes(matrix,
392 local_rows_per_process[this_process],
393 local_columns_per_process[this_process],
394 sparsity_pattern.n_rows(),
395 sparsity_pattern.n_cols());
396 AssertThrow(ierr == 0, ExcPETScError(ierr));
397
398 // Use MATAIJ which dispatches to SEQAIJ
399 // if the size of the communicator is 1,
400 // and to MPIAIJ otherwise.
401 ierr = MatSetType(matrix, MATAIJ);
402 AssertThrow(ierr == 0, ExcPETScError(ierr));
403
404 // next preset the exact given matrix
405 // entries with zeros, if the user
406 // requested so. this doesn't avoid any
407 // memory allocations, but it at least
408 // avoids some searches later on. the
409 // key here is that we can use the
410 // matrix set routines that set an
411 // entire row at once, not a single
412 // entry at a time
413 //
414 // for the usefulness of this option
415 // read the documentation of this
416 // class.
417 if (preset_nonzero_locations == true)
418 {
419 // MatXXXAIJSetPreallocationCSR
420 // can be used to allocate the sparsity
421 // pattern of a matrix if it is already
422 // available:
423
424 // first set up the column number
425 // array for the rows to be stored
426 // on the local processor. have one
427 // dummy entry at the end to make
428 // sure petsc doesn't read past the
429 // end
430 std::vector<PetscInt> rowstart_in_window(local_row_end -
431 local_row_start + 1,
432 0),
433 colnums_in_window;
434 {
435 size_type n_cols = 0;
436 for (size_type i = local_row_start; i < local_row_end; ++i)
437 {
438 const size_type row_length = sparsity_pattern.row_length(i);
439 const auto row_start =
440 rowstart_in_window[i - local_row_start] + row_length;
441 const auto petsc_row_start = static_cast<PetscInt>(row_start);
442 AssertIntegerConversion(petsc_row_start, row_start);
443 rowstart_in_window[i + 1 - local_row_start] = petsc_row_start;
444 n_cols += row_length;
445 }
446 colnums_in_window.resize(n_cols + 1, -1);
447 }
448
449 // now copy over the information
450 // from the sparsity pattern.
451 {
452 PetscInt *ptr = colnums_in_window.data();
453 for (size_type i = local_row_start; i < local_row_end; ++i)
454 for (typename SparsityPatternType::iterator p =
455 sparsity_pattern.begin(i);
456 p != sparsity_pattern.end(i);
457 ++p, ++ptr)
458 {
459 const auto petsc_column = static_cast<PetscInt>(p->column());
460 AssertIntegerConversion(petsc_column, p->column());
461 *ptr = petsc_column;
462 }
463 }
464
465
466 // then call the petsc function
467 // that summarily allocates these
468 // entries.
469 // Here we both call the specific API since this is how
470 // PETSc polymorphism works. If the matrix is of type MPIAIJ,
471 // the second call is dummy. If the matrix is of type SEQAIJ,
472 // the first call is dummy.
473 ierr = MatSeqAIJSetPreallocationCSR(matrix,
474 rowstart_in_window.data(),
475 colnums_in_window.data(),
476 nullptr);
477 ierr = MatMPIAIJSetPreallocationCSR(matrix,
478 rowstart_in_window.data(),
479 colnums_in_window.data(),
480 nullptr);
481 AssertThrow(ierr == 0, ExcPETScError(ierr));
482
485 }
486 }
487
488 // BDDC
489 template <typename SparsityPatternType>
490 void
492 const IndexSet &local_rows,
493 const IndexSet &local_active_rows,
494 const IndexSet &local_columns,
495 const IndexSet &local_active_columns,
496 const SparsityPatternType &sparsity_pattern)
497 {
498 // If the sparsity pattern's dimensions can be converted to PetscInts then
499 // the rest of the conversions will succeed
500 AssertThrowIntegerConversion(static_cast<PetscInt>(
501 sparsity_pattern.n_rows()),
502 sparsity_pattern.n_rows());
503 AssertThrowIntegerConversion(static_cast<PetscInt>(
504 sparsity_pattern.n_cols()),
505 sparsity_pattern.n_cols());
506
507# if DEAL_II_PETSC_VERSION_GTE(3, 10, 0)
508 Assert(sparsity_pattern.n_rows() == local_rows.size(),
510 "SparsityPattern and IndexSet have different number of rows."));
511 Assert(
512 sparsity_pattern.n_cols() == local_columns.size(),
514 "SparsityPattern and IndexSet have different number of columns"));
515 Assert(local_rows.is_contiguous() && local_columns.is_contiguous(),
516 ExcMessage("PETSc only supports contiguous row/column ranges"));
517 Assert(local_rows.is_ascending_and_one_to_one(communicator),
519
520# ifdef DEBUG
521 {
522 // check indexsets
523 const types::global_dof_index row_owners =
524 Utilities::MPI::sum(local_rows.n_elements(), communicator);
525 const types::global_dof_index col_owners =
526 Utilities::MPI::sum(local_columns.n_elements(), communicator);
527 Assert(row_owners == sparsity_pattern.n_rows(),
529 std::string(
530 "Each row has to be owned by exactly one owner (n_rows()=") +
531 std::to_string(sparsity_pattern.n_rows()) +
532 " but sum(local_rows.n_elements())=" +
533 std::to_string(row_owners) + ")"));
534 Assert(
535 col_owners == sparsity_pattern.n_cols(),
537 std::string(
538 "Each column has to be owned by exactly one owner (n_cols()=") +
539 std::to_string(sparsity_pattern.n_cols()) +
540 " but sum(local_columns.n_elements())=" +
541 std::to_string(col_owners) + ")"));
542 }
543# endif
544 PetscErrorCode ierr;
545
546 // create the local to global mappings as arrays.
547 const IndexSet::size_type n_local_active_rows =
548 local_active_rows.n_elements();
549 const IndexSet::size_type n_local_active_cols =
550 local_active_columns.n_elements();
551 std::vector<PetscInt> idx_glob_row(n_local_active_rows);
552 std::vector<PetscInt> idx_glob_col(n_local_active_cols);
553 for (IndexSet::size_type k = 0; k < n_local_active_rows; ++k)
554 {
555 idx_glob_row[k] = local_active_rows.nth_index_in_set(k);
556 }
557 for (IndexSet::size_type k = 0; k < n_local_active_cols; ++k)
558 {
559 idx_glob_col[k] = local_active_columns.nth_index_in_set(k);
560 }
561
562
563 IS is_glob_row, is_glob_col;
564 // Create row index set
565 ISLocalToGlobalMapping l2gmap_row;
566 ierr = ISCreateGeneral(communicator,
567 n_local_active_rows,
568 idx_glob_row.data(),
569 PETSC_COPY_VALUES,
570 &is_glob_row);
571 AssertThrow(ierr == 0, ExcPETScError(ierr));
572 ierr = ISLocalToGlobalMappingCreateIS(is_glob_row, &l2gmap_row);
573 AssertThrow(ierr == 0, ExcPETScError(ierr));
574 ierr = ISDestroy(&is_glob_row);
575 AssertThrow(ierr == 0, ExcPETScError(ierr));
576 ierr =
577 ISLocalToGlobalMappingViewFromOptions(l2gmap_row, nullptr, "-view_map");
578 AssertThrow(ierr == 0, ExcPETScError(ierr));
579
580 // Create column index set
581 ISLocalToGlobalMapping l2gmap_col;
582 ierr = ISCreateGeneral(communicator,
583 n_local_active_cols,
584 idx_glob_col.data(),
585 PETSC_COPY_VALUES,
586 &is_glob_col);
587 AssertThrow(ierr == 0, ExcPETScError(ierr));
588 ierr = ISLocalToGlobalMappingCreateIS(is_glob_col, &l2gmap_col);
589 AssertThrow(ierr == 0, ExcPETScError(ierr));
590 ierr = ISDestroy(&is_glob_col);
591 AssertThrow(ierr == 0, ExcPETScError(ierr));
592 ierr =
593 ISLocalToGlobalMappingViewFromOptions(l2gmap_col, nullptr, "-view_map");
594 AssertThrow(ierr == 0, ExcPETScError(ierr));
595
596 // create the matrix with the IS constructor.
597 ierr = MatCreateIS(communicator,
598 1,
599 local_rows.n_elements(),
600 local_columns.n_elements(),
601 sparsity_pattern.n_rows(),
602 sparsity_pattern.n_cols(),
603 l2gmap_row,
604 l2gmap_col,
605 &matrix);
606 AssertThrow(ierr == 0, ExcPETScError(ierr));
607 ierr = ISLocalToGlobalMappingDestroy(&l2gmap_row);
608 AssertThrow(ierr == 0, ExcPETScError(ierr));
609 ierr = ISLocalToGlobalMappingDestroy(&l2gmap_col);
610 AssertThrow(ierr == 0, ExcPETScError(ierr));
611
612 // next preset the exact given matrix
613 // entries with zeros. This doesn't avoid any
614 // memory allocations, but it at least
615 // avoids some searches later on. the
616 // key here is that we can use the
617 // matrix set routines that set an
618 // entire row at once, not a single
619 // entry at a time.
620 //
621 // for the usefulness of this option
622 // read the documentation of this
623 // class.
624
625 Mat local_matrix; // In the MATIS case, we use the local matrix instead
626 ierr = MatISGetLocalMat(matrix, &local_matrix);
627 AssertThrow(ierr == 0, ExcPETScError(ierr));
628 ierr = MatSetType(local_matrix,
629 MATSEQAIJ); // SEQ as it is local! TODO: Allow for
630 // OpenMP parallelization in local node.
631 AssertThrow(ierr == 0, ExcPETScError(ierr));
632 if (local_rows.n_elements() > 0)
633 {
634 // MatSEQAIJSetPreallocationCSR
635 // can be used to allocate the sparsity
636 // pattern of a matrix. Local matrices start from 0 (MATIS).
637 const PetscInt local_row_start = 0;
638 const PetscInt local_row_end = local_active_rows.n_elements();
639
640 // first set up the column number
641 // array for the rows to be stored
642 // on the local processor.
643 std::vector<PetscInt> rowstart_in_window(local_row_end -
644 local_row_start + 1,
645 0),
646 colnums_in_window;
647 unsigned int global_row_index = 0;
648 {
649 unsigned int n_cols = 0;
650 unsigned int global_row_index = 0;
651 for (PetscInt i = local_row_start; i < local_row_end; ++i)
652 {
653 global_row_index = local_active_rows.nth_index_in_set(i);
654 const PetscInt row_length =
655 sparsity_pattern.row_length(global_row_index);
656 rowstart_in_window[i + 1 - local_row_start] =
657 rowstart_in_window[i - local_row_start] + row_length;
658 n_cols += row_length;
659 }
660 colnums_in_window.resize(n_cols + 1, -1);
661 }
662
663
664 // now copy over the information
665 // from the sparsity pattern. For this we first invert the column
666 // index set.
667 std::map<unsigned int, unsigned int> loc_act_cols_inv;
668 for (unsigned int i = 0; i < local_active_columns.n_elements(); ++i)
669 {
670 loc_act_cols_inv[local_active_columns.nth_index_in_set(i)] = i;
671 }
672
673 {
674 PetscInt *ptr = colnums_in_window.data();
675 for (PetscInt i = local_row_start; i < local_row_end; ++i)
676 {
677 global_row_index = local_active_rows.nth_index_in_set(i);
678 for (typename SparsityPatternType::iterator p =
679 sparsity_pattern.begin(global_row_index);
680 p != sparsity_pattern.end(global_row_index);
681 ++p, ++ptr)
682 *ptr = loc_act_cols_inv[p->column()];
683 }
684 }
685
686 // then call the petsc function
687 // that summarily allocates these
688 // entries:
689 ierr = MatSeqAIJSetPreallocationCSR(local_matrix,
690 rowstart_in_window.data(),
691 colnums_in_window.data(),
692 nullptr);
693 AssertThrow(ierr == 0, ExcPETScError(ierr));
694 }
695 else
696 {
697 PetscInt i = 0;
698 ierr = MatSeqAIJSetPreallocationCSR(local_matrix, &i, &i, nullptr);
699 AssertThrow(ierr == 0, ExcPETScError(ierr));
700 }
702
703 {
704 close_matrix(local_matrix);
705 set_keep_zero_rows(local_matrix);
706 }
707 ierr = MatISRestoreLocalMat(matrix, &local_matrix);
708 AssertThrow(ierr == 0, ExcPETScError(ierr));
709# else
710 {
711 // Use this to avoid unused variables warning
712 (void)communicator;
713 (void)local_rows;
714 (void)local_active_rows;
715 (void)local_columns;
716 (void)local_active_columns;
717 (void)sparsity_pattern;
718 AssertThrow(false,
720 "BDDC preconditioner requires PETSc 3.10.0 or newer"));
721 }
722# endif
723 }
724
725# ifndef DOXYGEN
726 // explicit instantiations
727 //
729 const SparsityPattern &,
730 const std::vector<size_type> &,
731 const std::vector<size_type> &,
732 const unsigned int,
733 const bool);
736 const std::vector<size_type> &,
737 const std::vector<size_type> &,
738 const unsigned int,
739 const bool);
740
741 template void
743 const SparsityPattern &,
744 const std::vector<size_type> &,
745 const std::vector<size_type> &,
746 const unsigned int,
747 const bool);
748 template void
751 const std::vector<size_type> &,
752 const std::vector<size_type> &,
753 const unsigned int,
754 const bool);
755
756 template void
758 const SparsityPattern &,
759 const MPI_Comm);
760
761 template void
763 const IndexSet &,
764 const SparsityPattern &,
765 const MPI_Comm);
766
767 template void
770 const MPI_Comm);
771
772 template void
774 const IndexSet &,
776 const MPI_Comm);
777
778 template void
780 const SparsityPattern &,
781 const std::vector<size_type> &,
782 const std::vector<size_type> &,
783 const unsigned int,
784 const bool);
785 template void
788 const std::vector<size_type> &,
789 const std::vector<size_type> &,
790 const unsigned int,
791 const bool);
792
793 template void
795 const IndexSet &,
796 const IndexSet &,
797 const SparsityPattern &);
798
799 template void
801 const IndexSet &,
802 const IndexSet &,
803 const DynamicSparsityPattern &);
804
805 template void
807 const IndexSet &,
808 const IndexSet &,
809 const IndexSet &,
810 const SparsityPattern &,
811 const MPI_Comm);
812 template void
814 const IndexSet &,
815 const IndexSet &,
816 const IndexSet &,
818 const MPI_Comm);
819
820 template void
822 const IndexSet &,
823 const IndexSet &,
824 const IndexSet &,
825 const IndexSet &,
826 const SparsityPattern &);
827 template void
829 const IndexSet &,
830 const IndexSet &,
831 const IndexSet &,
832 const IndexSet &,
833 const DynamicSparsityPattern &);
834# endif
835
836
837 PetscScalar
839 {
840 Vector tmp(v);
841 vmult(tmp, v);
842 // note, that v*tmp returns sum_i conjugate(v)_i * tmp_i
843 return v * tmp;
844 }
845
846 PetscScalar
848 {
849 Vector tmp(v);
850 vmult(tmp, v);
851 // note, that v*tmp returns sum_i conjugate(v)_i * tmp_i
852 return u * tmp;
853 }
854
857 {
858 PetscInt n_rows, n_cols, n_loc_rows, n_loc_cols, min, max;
859 PetscErrorCode ierr;
860
861 ierr = MatGetSize(matrix, &n_rows, &n_cols);
862 AssertThrow(ierr == 0, ExcPETScError(ierr));
863
864 ierr = MatGetLocalSize(matrix, &n_loc_rows, &n_loc_cols);
865 AssertThrow(ierr == 0, ExcPETScError(ierr));
866
867 ierr = MatGetOwnershipRangeColumn(matrix, &min, &max);
868 AssertThrow(ierr == 0, ExcPETScError(ierr));
869
870 Assert(n_loc_cols == max - min,
872 "PETSc is requiring non contiguous memory allocation."));
873
874 IndexSet indices(n_cols);
875 indices.add_range(min, max);
876 indices.compress();
877
878 return indices;
879 }
880
883 {
884 PetscInt n_rows, n_cols, n_loc_rows, n_loc_cols, min, max;
885 PetscErrorCode ierr;
886
887 ierr = MatGetSize(matrix, &n_rows, &n_cols);
888 AssertThrow(ierr == 0, ExcPETScError(ierr));
889
890 ierr = MatGetLocalSize(matrix, &n_loc_rows, &n_loc_cols);
891 AssertThrow(ierr == 0, ExcPETScError(ierr));
892
893 ierr = MatGetOwnershipRange(matrix, &min, &max);
894 AssertThrow(ierr == 0, ExcPETScError(ierr));
895
896 Assert(n_loc_rows == max - min,
898 "PETSc is requiring non contiguous memory allocation."));
899
900 IndexSet indices(n_rows);
901 indices.add_range(min, max);
902 indices.compress();
903
904 return indices;
905 }
906
907 void
909 const SparseMatrix &B,
910 const MPI::Vector &V) const
911 {
912 // Simply forward to the protected member function of the base class
913 // that takes abstract matrix and vector arguments (to which the compiler
914 // automatically casts the arguments).
915 MatrixBase::mmult(C, B, V);
916 }
917
918 void
920 const SparseMatrix &B,
921 const MPI::Vector &V) const
922 {
923 // Simply forward to the protected member function of the base class
924 // that takes abstract matrix and vector arguments (to which the compiler
925 // automatically casts the arguments).
926 MatrixBase::Tmmult(C, B, V);
927 }
928
929 } // namespace MPI
930} // namespace PETScWrappers
931
932
934
935#endif // DEAL_II_WITH_PETSC
bool is_ascending_and_one_to_one(const MPI_Comm communicator) const
bool is_contiguous() const
Definition index_set.h:1917
size_type size() const
Definition index_set.h:1776
size_type n_elements() const
Definition index_set.h:1934
void add_range(const size_type begin, const size_type end)
Definition index_set.h:1803
size_type nth_index_in_set(const size_type local_index) const
Definition index_set.h:1982
void compress() const
Definition index_set.h:1784
SparseMatrix & operator=(const value_type d)
void copy_from(const SparseMatrix &other)
void reinit(const MPI_Comm communicator, const SparsityPatternType &sparsity_pattern, const std::vector< size_type > &local_rows_per_process, const std::vector< size_type > &local_columns_per_process, const unsigned int this_process, const bool preset_nonzero_locations=true)
void mmult(SparseMatrix &C, const SparseMatrix &B, const MPI::Vector &V=MPI::Vector()) const
PetscScalar matrix_scalar_product(const Vector &u, const Vector &v) const
void Tmmult(SparseMatrix &C, const SparseMatrix &B, const MPI::Vector &V=MPI::Vector()) const
PetscScalar matrix_norm_square(const Vector &v) const
void do_reinit(const MPI_Comm comm, const SparsityPatternType &sparsity_pattern, const std::vector< size_type > &local_rows_per_process, const std::vector< size_type > &local_columns_per_process, const unsigned int this_process, const bool preset_nonzero_locations)
size_type row_length(const size_type row) const
void vmult(VectorBase &dst, const VectorBase &src) const
void mmult(MatrixBase &C, const MatrixBase &B, const VectorBase &V) const
MatrixBase & operator=(const MatrixBase &)=delete
void Tmmult(MatrixBase &C, const MatrixBase &B, const VectorBase &V) const
void compress(const VectorOperation::values operation)
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:498
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:499
static ::ExceptionBase & ExcNotImplemented()
#define Assert(cond, exc)
#define AssertNothrow(cond, exc)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
#define AssertIntegerConversion(index1, index2)
#define AssertThrowIntegerConversion(index1, index2)
void set_keep_zero_rows(Mat &matrix)
void close_matrix(Mat &matrix)
T sum(const T &t, const MPI_Comm mpi_communicator)