Loading [MathJax]/extensions/TeX/AMSsymbols.js
 deal.II version GIT relicensing-3132-g78fd2c863f 2025-04-24 02:10:00+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages Concepts
trilinos_tpetra_sparsity_pattern.cc
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2024 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
15#include <deal.II/base/config.h>
16
17#ifdef DEAL_II_TRILINOS_WITH_TPETRA
18
19# include <deal.II/base/mpi.h>
21
26
27# include <limits>
28
30
31namespace LinearAlgebra
32{
33
34 namespace TpetraWrappers
35 {
37 {
38 template <typename MemorySpace>
39 void
41 {
42 // if we are asked to visit the past-the-end line, then simply
43 // release all our caches and go on with life
44 if (static_cast<std::size_t>(this->a_row) == sparsity_pattern->n_rows())
45 {
46 colnum_cache.reset();
47 return;
48 }
49
50 // otherwise first flush Trilinos caches if necessary
51 if (!sparsity_pattern->is_compressed())
52 sparsity_pattern->compress();
53
54 colnum_cache =
55 std::make_shared<std::vector<::types::signed_global_dof_index>>(
56 sparsity_pattern->row_length(this->a_row));
57
58 if (colnum_cache->size() > 0)
59 {
60 // get a representation of the present row
61 std::size_t ncols;
63 MemorySpace>::nonconst_global_inds_host_view_type
64 column_indices_view(colnum_cache->data(), colnum_cache->size());
65
66 sparsity_pattern->graph->getGlobalRowCopy(this->a_row,
67 column_indices_view,
68 ncols);
69 AssertThrow(ncols == colnum_cache->size(), ExcInternalError());
70 }
71 }
72 } // namespace SparsityPatternIterators
73
74
75 // The constructor is actually the only point where we have to check whether
76 // we build a serial or a parallel Trilinos matrix. Actually, it does not
77 // even matter how many threads there are, but only if we use an MPI
78 // compiler or a standard compiler. So, even one thread on a configuration
79 // with MPI will still get a parallel interface.
80 template <typename MemorySpace>
94
95
96
97 template <typename MemorySpace>
99 const size_type m,
100 const size_type n,
101 const size_type n_entries_per_row)
102 {
103 reinit(m, n, n_entries_per_row);
104 }
105
106
107
108 template <typename MemorySpace>
110 const size_type m,
111 const size_type n,
112 const std::vector<size_type> &n_entries_per_row)
113 {
114 reinit(m, n, n_entries_per_row);
115 }
116
117
118
119 template <typename MemorySpace>
121 SparsityPattern<MemorySpace> &&other) noexcept
122 : SparsityPatternBase(std::move(other))
123 , column_space_map(std::move(other.column_space_map))
124 , graph(std::move(other.graph))
125 , nonlocal_graph(std::move(other.nonlocal_graph))
126 {}
127
128
129
130 // Copy function only works if the sparsity pattern is empty.
131 template <typename MemorySpace>
133 const SparsityPattern<MemorySpace> &input_sparsity)
134 : SparsityPatternBase(input_sparsity)
135 , column_space_map(Utilities::Trilinos::internal::make_rcp<
136 TpetraTypes::MapType<MemorySpace>>(
137 0,
138 0,
139 Utilities::Trilinos::tpetra_comm_self()))
140 , graph(Utilities::Trilinos::internal::make_rcp<
141 TpetraTypes::GraphType<MemorySpace>>(column_space_map,
142 column_space_map,
143 0))
144 {
145 (void)input_sparsity;
146 Assert(input_sparsity.n_rows() == 0,
148 "Copy constructor only works for empty sparsity patterns."));
149 }
150
151
152
153 template <typename MemorySpace>
155 const IndexSet &parallel_partitioning,
156 const MPI_Comm communicator,
157 const size_type n_entries_per_row)
158 {
159 reinit(parallel_partitioning,
160 parallel_partitioning,
161 communicator,
162 n_entries_per_row);
163 }
164
165
166
167 template <typename MemorySpace>
169 const IndexSet &parallel_partitioning,
170 const MPI_Comm communicator,
171 const std::vector<size_type> &n_entries_per_row)
172 {
173 reinit(parallel_partitioning,
174 parallel_partitioning,
175 communicator,
176 n_entries_per_row);
177 }
178
179
180
181 template <typename MemorySpace>
183 const IndexSet &row_parallel_partitioning,
184 const IndexSet &col_parallel_partitioning,
185 const MPI_Comm communicator,
186 const size_type n_entries_per_row)
187 {
188 reinit(row_parallel_partitioning,
189 col_parallel_partitioning,
190 communicator,
191 n_entries_per_row);
192 }
193
194
195
196 template <typename MemorySpace>
198 const IndexSet &row_parallel_partitioning,
199 const IndexSet &col_parallel_partitioning,
200 const MPI_Comm communicator,
201 const std::vector<size_type> &n_entries_per_row)
202 {
203 reinit(row_parallel_partitioning,
204 col_parallel_partitioning,
205 communicator,
206 n_entries_per_row);
207 }
208
209
210
211 template <typename MemorySpace>
213 const IndexSet &row_parallel_partitioning,
214 const IndexSet &col_parallel_partitioning,
215 const IndexSet &writable_rows,
216 const MPI_Comm communicator,
217 const size_type n_max_entries_per_row)
218 {
219 reinit(row_parallel_partitioning,
220 col_parallel_partitioning,
221 writable_rows,
222 communicator,
223 n_max_entries_per_row);
224 }
225
226
227
228 template <typename MemorySpace>
229 void
231 const size_type n,
232 const size_type n_entries_per_row)
233 {
234 reinit(complete_index_set(m),
236 MPI_COMM_SELF,
237 n_entries_per_row);
238 }
239
240
241
242 template <typename MemorySpace>
243 void
245 const size_type m,
246 const size_type n,
247 const std::vector<size_type> &n_entries_per_row)
248 {
249 reinit(complete_index_set(m),
251 MPI_COMM_SELF,
252 n_entries_per_row);
253 }
254
255
256
257 namespace SparsityPatternImpl
258 {
259 template <typename MemorySpace>
261
262 template <typename MemorySpace>
263 void
265 const Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> &row_map,
266 const Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> &col_map,
267 const size_type<MemorySpace> n_entries_per_row,
268 Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> &column_space_map,
269 Teuchos::RCP<TpetraTypes::GraphType<MemorySpace>> &graph,
270 Teuchos::RCP<TpetraTypes::GraphType<MemorySpace>> &nonlocal_graph)
271 {
272 Assert(row_map->isOneToOne(),
273 ExcMessage("Row map must be 1-to-1, i.e., no overlap between "
274 "the maps of different processors."));
275 Assert(col_map->isOneToOne(),
276 ExcMessage("Column map must be 1-to-1, i.e., no overlap between "
277 "the maps of different processors."));
278
279 nonlocal_graph.reset();
280 graph.reset();
281 column_space_map = col_map;
282
283 // for more than one processor, need to specify only row map first and
284 // let the matrix entries decide about the column map (which says which
285 // columns are present in the matrix, not to be confused with the
286 // col_map that tells how the domain dofs of the matrix will be
287 // distributed). for only one processor, we can directly assign the
288 // columns as well. If we use a recent Trilinos version, we can also
289 // require building a non-local graph which gives us thread-safe
290 // initialization.
293 row_map,
294 n_entries_per_row);
295 }
296
297
298
299 template <typename MemorySpace>
300 void
302 const Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> &row_map,
303 const Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> &col_map,
304 const std::vector<size_type<MemorySpace>> &n_entries_per_row,
305 Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> &column_space_map,
307 Teuchos::RCP<TpetraTypes::GraphType<MemorySpace>> &nonlocal_graph)
308 {
309 Assert(row_map->isOneToOne(),
310 ExcMessage("Row map must be 1-to-1, i.e., no overlap between "
311 "the maps of different processors."));
312 Assert(col_map->isOneToOne(),
313 ExcMessage("Column map must be 1-to-1, i.e., no overlap between "
314 "the maps of different processors."));
315
316 // release memory before reallocation
317 nonlocal_graph.reset();
318 graph.reset();
319 AssertDimension(n_entries_per_row.size(),
320 row_map->getGlobalNumElements());
321
322 column_space_map = col_map;
323
324 // Translate the vector of row lengths into one that only stores
325 // those entries that related to the locally stored rows of the matrix:
326 Kokkos::DualView<size_t *, typename MemorySpace::kokkos_space>
327 local_entries_per_row("local_entries_per_row",
328 row_map->getMaxGlobalIndex() -
329 row_map->getMinGlobalIndex());
330
331 auto local_entries_per_row_host =
332 local_entries_per_row
333 .template view<Kokkos::DefaultHostExecutionSpace>();
335 std::uint64_t total_size = 0;
336 for (unsigned int i = 0; i < local_entries_per_row.extent(0); ++i)
337 {
338 local_entries_per_row_host(i) =
339 n_entries_per_row[row_map->getMinGlobalIndex() + i];
340 total_size += local_entries_per_row_host[i];
341 }
342 local_entries_per_row
343 .template modify<Kokkos::DefaultHostExecutionSpace>();
344 local_entries_per_row
345 .template sync<typename MemorySpace::kokkos_space>();
346
348 total_size < static_cast<std::uint64_t>(
349 std::numeric_limits<
352 "You are requesting to store more elements than global ordinal type allows."));
353
356 col_map,
357 local_entries_per_row);
358 }
359
360
361
362 template <typename SparsityPatternType, typename MemorySpace>
363 void
365 const Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> &row_map,
366 const Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> &col_map,
367 const SparsityPatternType &sp,
368 [[maybe_unused]] const bool exchange_data,
369 Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> &column_space_map,
370 Teuchos::RCP<TpetraTypes::GraphType<MemorySpace>> &graph,
371 Teuchos::RCP<TpetraTypes::GraphType<MemorySpace>> &nonlocal_graph)
372 {
373 nonlocal_graph.reset();
374 graph.reset();
375
376 AssertDimension(sp.n_rows(), row_map->getGlobalNumElements());
377 AssertDimension(sp.n_cols(), col_map->getGlobalNumElements());
378
382 Assert(row_map->isContiguous() == true,
384 "This function only works if the row map is contiguous."));
385
386 const size_type<MemorySpace> first_row = row_map->getMinGlobalIndex(),
387 last_row =
388 row_map->getMaxGlobalIndex() + 1;
389 Teuchos::Array<size_t> n_entries_per_row(last_row - first_row);
391 for (size_type<MemorySpace> row = first_row; row < last_row; ++row)
392 n_entries_per_row[row - first_row] = sp.row_length(row);
393
395 std::accumulate(n_entries_per_row.begin(),
396 n_entries_per_row.end(),
397 std::uint64_t(0)) <
398 static_cast<std::uint64_t>(std::numeric_limits<int>::max()),
400 "The TrilinosWrappers use Tpetra internally, and "
401 "Trilinos/Tpetra was compiled with 'local ordinate = int'. "
402 "Therefore, 'signed int' is used to represent local indices, "
403 "and only 2,147,483,647 nonzero matrix entries can be stored "
404 "on a single process, but you are requesting more than "
405 "that. Either use more MPI processes or recompile Trilinos "
406 "with 'local ordinate = long long' "));
407
408 if (row_map->getComm()->getSize() > 1)
410 TpetraTypes::GraphType<MemorySpace>>(row_map, n_entries_per_row);
411 else
414 col_map,
415 n_entries_per_row);
416
417 AssertDimension(sp.n_rows(), graph->getGlobalNumRows());
418 AssertDimension(sp.n_cols(), graph->getGlobalNumEntries());
419
420 std::vector<TrilinosWrappers::types::int_type> row_indices;
421
422 for (size_type<MemorySpace> row = first_row; row < last_row; ++row)
423 {
424 const TrilinosWrappers::types::int_type row_length =
425 sp.row_length(row);
426 if (row_length == 0)
427 continue;
428
429 row_indices.resize(row_length, -1);
431 typename SparsityPatternType::iterator p = sp.begin(row);
432 // avoid incrementing p over the end of the current row because
433 // it is slow for DynamicSparsityPattern in parallel
434 for (int col = 0; col < row_length;)
435 {
436 row_indices[col++] = p->column();
437 if (col < row_length)
438 ++p;
439 }
440 }
441 graph->insertGlobalIndices(row, row_length, row_indices.data());
442 }
443
444 graph->globalAssemble();
445 }
446 } // namespace SparsityPatternImpl
447
448
449 template <typename MemorySpace>
450 void
451 SparsityPattern<MemorySpace>::reinit(const IndexSet &parallel_partitioning,
452 const MPI_Comm communicator,
453 const size_type n_entries_per_row)
454 {
455 SparsityPatternBase::resize(parallel_partitioning.size(),
456 parallel_partitioning.size());
457 Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> map =
458 parallel_partitioning
459 .template make_tpetra_map_rcp<TpetraTypes::NodeType<MemorySpace>>(
460 communicator, false);
461 SparsityPatternImpl::reinit_sp<MemorySpace>(
462 map, map, n_entries_per_row, column_space_map, graph, nonlocal_graph);
464
465
466
467 template <typename MemorySpace>
468 void
470 const IndexSet &parallel_partitioning,
471 const MPI_Comm communicator,
472 const std::vector<size_type> &n_entries_per_row)
473 {
474 SparsityPatternBase::resize(parallel_partitioning.size(),
475 parallel_partitioning.size());
476 Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> map =
477 parallel_partitioning
478 .template make_tpetra_map_rcp<TpetraTypes::NodeType<MemorySpace>>(
479 communicator, false);
480 SparsityPatternImpl::reinit_sp<MemorySpace>(
481 map, map, n_entries_per_row, column_space_map, graph, nonlocal_graph);
482 }
483
484
485
486 template <typename MemorySpace>
487 void
489 const IndexSet &row_parallel_partitioning,
490 const IndexSet &col_parallel_partitioning,
491 const MPI_Comm communicator,
492 const size_type n_entries_per_row)
493 {
494 SparsityPatternBase::resize(row_parallel_partitioning.size(),
495 col_parallel_partitioning.size());
496 Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> row_map =
497 row_parallel_partitioning
498 .template make_tpetra_map_rcp<TpetraTypes::NodeType<MemorySpace>>(
499 communicator, false);
500 Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> col_map =
501 col_parallel_partitioning
502 .template make_tpetra_map_rcp<TpetraTypes::NodeType<MemorySpace>>(
503 communicator, false);
504 SparsityPatternImpl::reinit_sp<MemorySpace>(row_map,
505 col_map,
506 n_entries_per_row,
507 column_space_map,
508 graph,
509 nonlocal_graph);
510 }
511
512
513
514 template <typename MemorySpace>
515 void
517 const IndexSet &row_parallel_partitioning,
518 const IndexSet &col_parallel_partitioning,
519 const MPI_Comm communicator,
520 const std::vector<size_type> &n_entries_per_row)
521 {
522 SparsityPatternBase::resize(row_parallel_partitioning.size(),
523 col_parallel_partitioning.size());
524 Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> row_map =
525 row_parallel_partitioning
526 .template make_tpetra_map_rcp<TpetraTypes::NodeType<MemorySpace>>(
527 communicator, false);
528 Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> col_map =
529 col_parallel_partitioning
530 .template make_tpetra_map_rcp<TpetraTypes::NodeType<MemorySpace>>(
531 communicator, false);
532 SparsityPatternImpl::reinit_sp<MemorySpace>(row_map,
533 col_map,
534 n_entries_per_row,
535 column_space_map,
536 graph,
537 nonlocal_graph);
538 }
539
540
541
542 template <typename MemorySpace>
543 void
545 const IndexSet &row_parallel_partitioning,
546 const IndexSet &col_parallel_partitioning,
547 const IndexSet &writable_rows,
548 const MPI_Comm communicator,
549 const size_type n_entries_per_row)
551 SparsityPatternBase::resize(row_parallel_partitioning.size(),
552 col_parallel_partitioning.size());
553 Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> row_map =
554 row_parallel_partitioning
555 .template make_tpetra_map_rcp<TpetraTypes::NodeType<MemorySpace>>(
556 communicator, false);
557 Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> col_map =
558 col_parallel_partitioning
559 .template make_tpetra_map_rcp<TpetraTypes::NodeType<MemorySpace>>(
560 communicator, false);
561 SparsityPatternImpl::reinit_sp<MemorySpace>(row_map,
562 col_map,
563 n_entries_per_row,
564 column_space_map,
565 graph,
566 nonlocal_graph);
567
568 IndexSet nonlocal_partitioner = writable_rows;
569 AssertDimension(nonlocal_partitioner.size(),
570 row_parallel_partitioning.size());
571 if constexpr (running_in_debug_mode())
572 {
573 {
574 IndexSet tmp = writable_rows & row_parallel_partitioning;
575 Assert(tmp == row_parallel_partitioning,
577 "The set of writable rows passed to this method does not "
578 "contain the locally owned rows, which is not allowed."));
579 }
580 }
581 nonlocal_partitioner.subtract_set(row_parallel_partitioning);
582 if (Utilities::MPI::n_mpi_processes(communicator) > 1)
583 {
584 Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> nonlocal_map =
585 nonlocal_partitioner
586 .template make_tpetra_map_rcp<TpetraTypes::NodeType<MemorySpace>>(
587 communicator, true);
589 TpetraTypes::GraphType<MemorySpace>>(nonlocal_map, col_map, 0);
590 }
591 else
592 Assert(nonlocal_partitioner.n_elements() == 0, ExcInternalError());
593 }
594
595
596
597 template <typename MemorySpace>
598 template <typename SparsityPatternType>
599 void
601 const IndexSet &row_parallel_partitioning,
602 const IndexSet &col_parallel_partitioning,
603 const SparsityPatternType &nontrilinos_sparsity_pattern,
604 const MPI_Comm communicator,
605 const bool exchange_data)
606 {
607 SparsityPatternBase::resize(row_parallel_partitioning.size(),
608 col_parallel_partitioning.size());
609 Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> row_map =
610 row_parallel_partitioning
611 .template make_tpetra_map_rcp<TpetraTypes::NodeType<MemorySpace>>(
612 communicator, false);
613 Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> col_map =
614 col_parallel_partitioning
615 .template make_tpetra_map_rcp<TpetraTypes::NodeType<MemorySpace>>(
616 communicator, false);
617 SparsityPatternImpl::reinit_sp<SparsityPatternType, MemorySpace>(
618 row_map,
619 col_map,
620 nontrilinos_sparsity_pattern,
621 exchange_data,
622 column_space_map,
623 graph,
624 nonlocal_graph);
625 }
626
627
629 template <typename MemorySpace>
630 template <typename SparsityPatternType>
631 void
633 const IndexSet &parallel_partitioning,
634 const SparsityPatternType &nontrilinos_sparsity_pattern,
635 const MPI_Comm communicator,
636 const bool exchange_data)
637 {
638 AssertDimension(nontrilinos_sparsity_pattern.n_rows(),
639 parallel_partitioning.size());
640 AssertDimension(nontrilinos_sparsity_pattern.n_cols(),
641 parallel_partitioning.size());
642 SparsityPatternBase::resize(parallel_partitioning.size(),
643 parallel_partitioning.size());
644 Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> map =
645 parallel_partitioning
646 .template make_tpetra_map_rcp<TpetraTypes::NodeType<MemorySpace>>(
647 communicator, false);
648 SparsityPatternImpl::reinit_sp<SparsityPatternType, MemorySpace>(
649 map,
650 map,
651 nontrilinos_sparsity_pattern,
652 exchange_data,
653 column_space_map,
654 graph,
655 nonlocal_graph);
656 }
657
658
659
660 template <typename MemorySpace>
668
669
670
671 template <typename MemorySpace>
672 void
675 {
682 if (sp.nonlocal_graph.get() != nullptr)
685 else
686 nonlocal_graph.reset();
687 }
688
689
690
691 template <typename MemorySpace>
692 template <typename SparsityPatternType>
693 void
694 SparsityPattern<MemorySpace>::copy_from(const SparsityPatternType &sp)
695 {
696 SparsityPatternBase::resize(sp.n_rows(), sp.n_cols());
697 Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> rows =
701 0,
703 Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> columns =
707 0,
709
710 SparsityPatternImpl::reinit_sp<SparsityPatternType, MemorySpace>(
711 rows, columns, sp, false, column_space_map, graph, nonlocal_graph);
712 }
713
714
715
716 template <typename MemorySpace>
717 void
719 {
721 // When we clear the matrix, reset
722 // the pointer and generate an
723 // empty sparsity pattern.
730 TpetraTypes::GraphType<MemorySpace>>(column_space_map,
731 column_space_map,
732 0);
733 graph->fillComplete();
734
735 nonlocal_graph.reset();
736 }
738
739
740 template <typename MemorySpace>
741 void
743 {
744 Assert(column_space_map.get(), ExcInternalError());
745 if (nonlocal_graph.get() != nullptr)
746 {
747# if DEAL_II_TRILINOS_VERSION_GTE(14, 0, 0)
748 if (nonlocal_graph->getRowMap()->getLocalNumElements() > 0 &&
749 column_space_map->getGlobalNumElements() > 0)
750# else
751 if (nonlocal_graph->getRowMap()->getNodeNumElements() > 0 &&
752 column_space_map->getGlobalNumElements() > 0)
753# endif
754 {
755 // Insert dummy element at (row, column) that corresponds to row 0
756 // in local index counting.
758 nonlocal_graph->getRowMap()->getGlobalElement(0);
760
761 // in case we have a square sparsity pattern, add the entry on the
762 // diagonal
763 if (column_space_map->getGlobalNumElements() ==
764 graph->getRangeMap()->getGlobalNumElements())
765 column = row;
766 // if not, take a column index that we have ourselves since we
767 // know for sure it is there (and it will not create spurious
768 // messages to many ranks like putting index 0 on many
769 // processors)
770# if DEAL_II_TRILINOS_VERSION_GTE(14, 0, 0)
771 else if (column_space_map->getLocalNumElements() > 0)
772# else
773 else if (column_space_map->getNodeNumElements() > 0)
774# endif
775 column = column_space_map->getGlobalElement(0);
776 nonlocal_graph->insertGlobalIndices(row, 1, &column);
777 }
778# if DEAL_II_TRILINOS_VERSION_GTE(14, 0, 0)
779 Assert(nonlocal_graph->getRowMap()->getLocalNumElements() == 0 ||
780 column_space_map->getGlobalNumElements() == 0,
782# else
783 Assert(nonlocal_graph->getRowMap()->getNodeNumElements() == 0 ||
784 column_space_map->getGlobalNumElements() == 0,
786# endif
787
788 nonlocal_graph->fillComplete(column_space_map, graph->getRowMap());
789 }
790 graph->fillComplete(column_space_map, graph->getRowMap());
791
792 // Check consistency between the sizes set at the beginning and what
793 // Trilinos stores:
794 using namespace deal_II_exceptions::internals;
795 AssertDimension(n_rows(), graph->getGlobalNumRows());
796 AssertDimension(n_cols(), graph->getGlobalNumCols());
797 }
798
799
800
801 template <typename MemorySpace>
802 bool
804 {
805 return graph->getRowMap()->getLocalElement(i) !=
806 Teuchos::OrdinalTraits<int>::invalid();
807 }
808
809
810
811 template <typename MemorySpace>
812 bool
814 const size_type j) const
815 {
816 if (!row_is_stored_locally(i))
817 return false;
818
819 // Extract local indices in the matrix.
820 const auto trilinos_i = graph->getRowMap()->getLocalElement(i);
821 const auto trilinos_j = graph->getColMap()->getLocalElement(j);
822
824 col_indices;
825
826 // Generate the view.
827 graph->getLocalRowView(trilinos_i, col_indices);
828
829 // Search the index
830 const size_type local_col_index =
831 std::find(col_indices.data(),
832 col_indices.data() + col_indices.size(),
833 trilinos_j) -
834 col_indices.data();
835
836 return static_cast<std::size_t>(local_col_index) != col_indices.size();
837 }
839
840
841 template <typename MemorySpace>
844 {
845 size_type local_b = 0;
846 for (int i = 0; i < static_cast<int>(local_size()); ++i)
847 {
848# if DEAL_II_TRILINOS_VERSION_GTE(13, 2, 0)
849 typename TpetraTypes::GraphType<
850 MemorySpace>::local_inds_host_view_type indices;
851# else
852 Teuchos::ArrayView<const int> indices;
853# endif
854
855 graph->getLocalRowView(i, indices);
856 const auto num_entries = indices.size();
857 for (unsigned int j = 0; j < static_cast<unsigned int>(num_entries);
858 ++j)
859 {
860 if (static_cast<size_type>(std::abs(i - indices[j])) > local_b)
861 local_b = std::abs(i - indices[j]);
862 }
863 }
864
866 Utilities::MPI::max(local_b,
868 graph->getComm()));
869 return static_cast<size_type>(global_b);
870 }
871
872
873
874 template <typename MemorySpace>
875 unsigned int
877 {
878# if DEAL_II_TRILINOS_VERSION_GTE(14, 0, 0)
879 return graph->getLocalNumRows();
880# else
881 return graph->getNodeNumRows();
882# endif
883 }
884
885
886
887 template <typename MemorySpace>
888 std::pair<typename SparsityPattern<MemorySpace>::size_type,
891 {
892 const size_type begin = graph->getRowMap()->getMinGlobalIndex();
893 const size_type end = graph->getRowMap()->getMaxGlobalIndex() + 1;
894
895 return {begin, end};
896 }
897
898
899
900 template <typename MemorySpace>
901 std::uint64_t
903 {
904 return graph->getGlobalNumEntries();
905 }
906
907
908
909 template <typename MemorySpace>
910 unsigned int
912 {
913# if DEAL_II_TRILINOS_VERSION_GTE(14, 0, 0)
914 return graph->getLocalMaxNumRowEntries();
915# else
916 return graph->getNodeMaxNumRowEntries();
917# endif
919
920
921
922 template <typename MemorySpace>
925 {
926 Assert(row < (size_type)n_rows(), ExcInternalError());
927
928 // Get a representation of the where the present row is located on
929 // the current processor
931 graph->getRowMap()->getLocalElement(row);
932
933 // On the processor who owns this row, we'll have a non-negative
934 // value for `local_row` and can ask for the length of the row.
935 if (local_row >= 0)
936 return graph->getNumEntriesInLocalRow(local_row);
937 else
938 return static_cast<size_type>(-1);
939 }
940
941
942
943 template <typename MemorySpace>
944 void
946 const ::types::global_dof_index &row,
948 const bool indices_are_sorted)
949 {
950 add_entries(row, columns.begin(), columns.end(), indices_are_sorted);
951 }
952
953
954
955 template <typename MemorySpace>
956 Teuchos::RCP<const typename TpetraTypes::MapType<MemorySpace>>
958 {
959 return graph->getDomainMap();
960 }
961
962
963
964 template <typename MemorySpace>
965 Teuchos::RCP<const typename TpetraTypes::MapType<MemorySpace>>
967 {
968 return graph->getRangeMap();
969 }
970
971
972
973 template <typename MemorySpace>
976 {
978 graph->getRangeMap()->getComm());
979 }
980
981
982
983 template <typename MemorySpace>
984 Teuchos::RCP<const Teuchos::Comm<int>>
986 {
987 return graph->getRangeMap()->getComm();
988 }
989
990
991
992 // As of now, no particularly neat
993 // output is generated in case of
994 // multiple processors.
995 template <typename MemorySpace>
996 void
998 std::ostream &out,
999 const bool write_extended_trilinos_info) const
1000 {
1001 if (write_extended_trilinos_info)
1002 out << *graph;
1003 else
1004 {
1005# if DEAL_II_TRILINOS_VERSION_GTE(14, 0, 0)
1006 for (unsigned int i = 0; i < graph->getLocalNumRows(); ++i)
1007# else
1008 for (unsigned int i = 0; i < graph->getNodeNumRows(); ++i)
1009# endif
1010 {
1011# if DEAL_II_TRILINOS_VERSION_GTE(13, 2, 0)
1012 typename TpetraTypes::GraphType<
1013 MemorySpace>::local_inds_host_view_type indices;
1014# else
1015 Teuchos::ArrayView<const int> indices;
1016# endif
1017 graph->getLocalRowView(i, indices);
1018 int num_entries = indices.size();
1019 for (int j = 0; j < num_entries; ++j)
1020 out << "(" << graph->getRowMap()->getGlobalElement(i) << ","
1021 << graph->getColMap()->getGlobalElement(indices[j]) << ") "
1022 << std::endl;
1023 }
1024 }
1025
1026 AssertThrow(out.fail() == false, ExcIO());
1027 }
1028
1029
1030
1031 template <typename MemorySpace>
1032 void
1034 {
1035 Assert(graph->isFillComplete() == true, ExcInternalError());
1036
1037 for (unsigned int row = 0; row < local_size(); ++row)
1038 {
1039# if DEAL_II_TRILINOS_VERSION_GTE(13, 2, 0)
1040 typename TpetraTypes::GraphType<
1041 MemorySpace>::local_inds_host_view_type indices;
1042# else
1043 Teuchos::ArrayView<const int> indices;
1044# endif
1045 graph->getLocalRowView(row, indices);
1046 int num_entries = indices.size();
1047
1048 Assert(num_entries >= 0, ExcInternalError());
1049 // avoid sign comparison warning
1050 const ::types::signed_global_dof_index num_entries_ =
1051 num_entries;
1052 for (::types::signed_global_dof_index j = 0; j < num_entries_;
1053 ++j)
1054 // while matrix entries are usually
1055 // written (i,j), with i vertical and
1056 // j horizontal, gnuplot output is
1057 // x-y, that is we have to exchange
1058 // the order of output
1059 out << static_cast<int>(
1060 graph->getColMap()->getGlobalElement(indices[j]))
1061 << " "
1062 << -static_cast<int>(graph->getRowMap()->getGlobalElement(row))
1063 << std::endl;
1064 }
1065
1066 AssertThrow(out.fail() == false, ExcIO());
1067 }
1068
1069 // TODO: Implement!
1070 template <typename MemorySpace>
1071 std::size_t
1077
1078
1079# ifndef DOXYGEN
1080 // explicit instantiations
1082
1083 template void
1085 const ::SparsityPattern &);
1086 template void
1088 const ::DynamicSparsityPattern &);
1089
1090 template void
1092 const IndexSet &,
1093 const ::SparsityPattern &,
1094 const MPI_Comm,
1095 bool);
1096 template void
1098 const IndexSet &,
1099 const ::DynamicSparsityPattern &,
1100 const MPI_Comm,
1101 bool);
1102
1103
1104 template void
1106 const IndexSet &,
1107 const IndexSet &,
1108 const ::SparsityPattern &,
1109 const MPI_Comm,
1110 bool);
1111 template void
1113 const IndexSet &,
1114 const IndexSet &,
1115 const ::DynamicSparsityPattern &,
1116 const MPI_Comm,
1117 bool);
1118
1119
1121
1122 template void
1124 const ::SparsityPattern &);
1125 template void
1127 const ::DynamicSparsityPattern &);
1128
1129 template void
1131 const IndexSet &,
1132 const ::SparsityPattern &,
1133 const MPI_Comm,
1134 bool);
1135 template void
1137 const IndexSet &,
1138 const ::DynamicSparsityPattern &,
1139 const MPI_Comm,
1140 bool);
1141
1142
1143 template void
1145 const IndexSet &,
1146 const IndexSet &,
1147 const ::SparsityPattern &,
1148 const MPI_Comm,
1149 bool);
1150 template void
1152 const IndexSet &,
1153 const IndexSet &,
1154 const ::DynamicSparsityPattern &,
1155 const MPI_Comm,
1156 bool);
1157
1158# endif
1159
1160 } // namespace TpetraWrappers
1161
1162} // namespace LinearAlgebra
1163
1165
1166#endif // DEAL_II_TRILINOS_WITH_TPETRA
iterator begin() const
Definition array_view.h:707
iterator end() const
Definition array_view.h:716
size_type size() const
Definition index_set.h:1787
size_type n_elements() const
Definition index_set.h:1945
void subtract_set(const IndexSet &other)
Definition index_set.cc:478
Teuchos::RCP< TpetraTypes::GraphType< MemorySpace > > graph
Teuchos::RCP< const Teuchos::Comm< int > > get_teuchos_mpi_communicator() const
Teuchos::RCP< const TpetraTypes::MapType< MemorySpace > > range_partitioner() const
void copy_from(const SparsityPattern< MemorySpace > &input_sparsity_pattern)
SparsityPattern< MemorySpace > & operator=(const SparsityPattern< MemorySpace > &input_sparsity_pattern)
void print(std::ostream &out, const bool write_extended_trilinos_info=false) const
Teuchos::RCP< TpetraTypes::GraphType< MemorySpace > > nonlocal_graph
Teuchos::RCP< const TpetraTypes::MapType< MemorySpace > > domain_partitioner() const
bool exists(const size_type i, const size_type j) const
Teuchos::RCP< TpetraTypes::MapType< MemorySpace > > column_space_map
virtual void add_row_entries(const ::types::global_dof_index &row, const ArrayView< const ::types::global_dof_index > &columns, const bool indices_are_sorted=false) override
void reinit(const size_type m, const size_type n, const size_type n_entries_per_row=0)
virtual void resize(const size_type rows, const size_type cols)
size_type n_rows() const
size_type n_cols() const
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:35
constexpr bool running_in_debug_mode()
Definition config.h:73
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:36
#define DEAL_II_NOT_IMPLEMENTED()
static ::ExceptionBase & ExcIO()
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
IndexSet complete_index_set(const IndexSet::size_type N)
Definition index_set.h:1215
void reinit_sp(const Teuchos::RCP< TpetraTypes::MapType< MemorySpace > > &row_map, const Teuchos::RCP< TpetraTypes::MapType< MemorySpace > > &col_map, const size_type< MemorySpace > n_entries_per_row, Teuchos::RCP< TpetraTypes::MapType< MemorySpace > > &column_space_map, Teuchos::RCP< TpetraTypes::GraphType< MemorySpace > > &graph, Teuchos::RCP< TpetraTypes::GraphType< MemorySpace > > &nonlocal_graph)
typename SparsityPattern< MemorySpace >::size_type size_type
Tpetra::CrsGraph< LO, GO, NodeType< MemorySpace > > GraphType
Tpetra::Map< LO, GO, NodeType< MemorySpace > > MapType
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition mpi.cc:99
T max(const T &t, const MPI_Comm mpi_communicator)
Teuchos::RCP< T > make_rcp(Args &&...args)
MPI_Comm teuchos_comm_to_mpi_comm(const Teuchos::RCP< const Teuchos::Comm< int > > &teuchos_comm)
const Teuchos::RCP< const Teuchos::Comm< int > > & tpetra_comm_self()
::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)