Reference documentation for deal.II version GIT relicensing-1062-gc06da148b8 2024-07-15 19:20:02+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
trilinos_tpetra_sparsity_pattern.cc
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2024 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
15#include <deal.II/base/config.h>
16
17#ifdef DEAL_II_TRILINOS_WITH_TPETRA
18
19# include <deal.II/base/mpi.h>
21
26
27# include <limits>
28
30
31namespace LinearAlgebra
32{
33
34 namespace TpetraWrappers
35 {
37 {
38 template <typename MemorySpace>
39 void
41 {
42 // if we are asked to visit the past-the-end line, then simply
43 // release all our caches and go on with life
44 if (static_cast<size_t>(this->a_row) == sparsity_pattern->n_rows())
45 {
46 colnum_cache.reset();
47 return;
48 }
49
50 // otherwise first flush Trilinos caches if necessary
51 if (!sparsity_pattern->is_compressed())
52 sparsity_pattern->compress();
53
54 colnum_cache =
55 std::make_shared<std::vector<::types::signed_global_dof_index>>(
56 sparsity_pattern->row_length(this->a_row));
57
58 if (colnum_cache->size() > 0)
59 {
60 // get a representation of the present row
61 std::size_t ncols;
62# if DEAL_II_TRILINOS_VERSION_GTE(13, 2, 0)
64 MemorySpace>::nonconst_global_inds_host_view_type
65 column_indices_view(colnum_cache->data(), colnum_cache->size());
66# else
67 Teuchos::ArrayView<::types::signed_global_dof_index>
68 column_indices_view(colnum_cache->data(), colnum_cache->size());
69# endif
70 sparsity_pattern->graph->getGlobalRowCopy(this->a_row,
71 column_indices_view,
72 ncols);
73 AssertThrow(ncols == colnum_cache->size(), ExcInternalError());
74 }
75 }
76 } // namespace SparsityPatternIterators
77
78
79 // The constructor is actually the only point where we have to check whether
80 // we build a serial or a parallel Trilinos matrix. Actually, it does not
81 // even matter how many threads there are, but only if we use an MPI
82 // compiler or a standard compiler. So, even one thread on a configuration
83 // with MPI will still get a parallel interface.
84 template <typename MemorySpace>
98
99
100
101 template <typename MemorySpace>
103 const size_type m,
104 const size_type n,
105 const size_type n_entries_per_row)
106 {
107 reinit(m, n, n_entries_per_row);
108 }
109
110
111
112 template <typename MemorySpace>
114 const size_type m,
115 const size_type n,
116 const std::vector<size_type> &n_entries_per_row)
117 {
118 reinit(m, n, n_entries_per_row);
119 }
120
121
122
123 template <typename MemorySpace>
125 SparsityPattern<MemorySpace> &&other) noexcept
126 : SparsityPatternBase(std::move(other))
127 , column_space_map(std::move(other.column_space_map))
128 , graph(std::move(other.graph))
129 , nonlocal_graph(std::move(other.nonlocal_graph))
130 {}
131
132
133
134 // Copy function only works if the sparsity pattern is empty.
135 template <typename MemorySpace>
137 const SparsityPattern<MemorySpace> &input_sparsity)
138 : SparsityPatternBase(input_sparsity)
139 , column_space_map(Utilities::Trilinos::internal::make_rcp<
140 TpetraTypes::MapType<MemorySpace>>(
141 0,
142 0,
143 Utilities::Trilinos::tpetra_comm_self()))
144 , graph(Utilities::Trilinos::internal::make_rcp<
145 TpetraTypes::GraphType<MemorySpace>>(column_space_map,
146 column_space_map,
147 0))
148 {
149 (void)input_sparsity;
150 Assert(input_sparsity.n_rows() == 0,
152 "Copy constructor only works for empty sparsity patterns."));
153 }
154
155
156
157 template <typename MemorySpace>
159 const IndexSet &parallel_partitioning,
160 const MPI_Comm communicator,
161 const size_type n_entries_per_row)
162 {
163 reinit(parallel_partitioning,
164 parallel_partitioning,
165 communicator,
166 n_entries_per_row);
167 }
168
169
170
171 template <typename MemorySpace>
173 const IndexSet &parallel_partitioning,
174 const MPI_Comm communicator,
175 const std::vector<size_type> &n_entries_per_row)
176 {
177 reinit(parallel_partitioning,
178 parallel_partitioning,
179 communicator,
180 n_entries_per_row);
181 }
182
183
184
185 template <typename MemorySpace>
187 const IndexSet &row_parallel_partitioning,
188 const IndexSet &col_parallel_partitioning,
189 const MPI_Comm communicator,
190 const size_type n_entries_per_row)
191 {
192 reinit(row_parallel_partitioning,
193 col_parallel_partitioning,
194 communicator,
195 n_entries_per_row);
196 }
197
198
199
200 template <typename MemorySpace>
202 const IndexSet &row_parallel_partitioning,
203 const IndexSet &col_parallel_partitioning,
204 const MPI_Comm communicator,
205 const std::vector<size_type> &n_entries_per_row)
206 {
207 reinit(row_parallel_partitioning,
208 col_parallel_partitioning,
209 communicator,
210 n_entries_per_row);
211 }
212
213
214
215 template <typename MemorySpace>
217 const IndexSet &row_parallel_partitioning,
218 const IndexSet &col_parallel_partitioning,
219 const IndexSet &writable_rows,
220 const MPI_Comm communicator,
221 const size_type n_max_entries_per_row)
222 {
223 reinit(row_parallel_partitioning,
224 col_parallel_partitioning,
225 writable_rows,
226 communicator,
227 n_max_entries_per_row);
228 }
229
230
231
232 template <typename MemorySpace>
233 void
235 const size_type n,
236 const size_type n_entries_per_row)
237 {
238 reinit(complete_index_set(m),
240 MPI_COMM_SELF,
241 n_entries_per_row);
242 }
243
244
245
246 template <typename MemorySpace>
247 void
249 const size_type m,
250 const size_type n,
251 const std::vector<size_type> &n_entries_per_row)
252 {
253 reinit(complete_index_set(m),
255 MPI_COMM_SELF,
256 n_entries_per_row);
257 }
258
259
260
261 namespace SparsityPatternImpl
262 {
263 template <typename MemorySpace>
265
266 template <typename MemorySpace>
267 void
269 const Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> &row_map,
270 const Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> &col_map,
271 const size_type<MemorySpace> n_entries_per_row,
272 Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> &column_space_map,
273 Teuchos::RCP<TpetraTypes::GraphType<MemorySpace>> &graph,
274 Teuchos::RCP<TpetraTypes::GraphType<MemorySpace>> &nonlocal_graph)
275 {
276 Assert(row_map->isOneToOne(),
277 ExcMessage("Row map must be 1-to-1, i.e., no overlap between "
278 "the maps of different processors."));
279 Assert(col_map->isOneToOne(),
280 ExcMessage("Column map must be 1-to-1, i.e., no overlap between "
281 "the maps of different processors."));
282
283 nonlocal_graph.reset();
284 graph.reset();
285 column_space_map = col_map;
286
287 // for more than one processor, need to specify only row map first and
288 // let the matrix entries decide about the column map (which says which
289 // columns are present in the matrix, not to be confused with the
290 // col_map that tells how the domain dofs of the matrix will be
291 // distributed). for only one processor, we can directly assign the
292 // columns as well. If we use a recent Trilinos version, we can also
293 // require building a non-local graph which gives us thread-safe
294 // initialization.
297 row_map,
298 n_entries_per_row);
299 }
300
301
302
303 template <typename MemorySpace>
304 void
306 const Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> &row_map,
307 const Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> &col_map,
308 const std::vector<size_type<MemorySpace>> &n_entries_per_row,
309 Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> &column_space_map,
310 Teuchos::RCP<TpetraTypes::GraphType<MemorySpace>> &graph,
311 Teuchos::RCP<TpetraTypes::GraphType<MemorySpace>> &nonlocal_graph)
312 {
313 Assert(row_map->isOneToOne(),
314 ExcMessage("Row map must be 1-to-1, i.e., no overlap between "
315 "the maps of different processors."));
316 Assert(col_map->isOneToOne(),
317 ExcMessage("Column map must be 1-to-1, i.e., no overlap between "
318 "the maps of different processors."));
319
320 // release memory before reallocation
321 nonlocal_graph.reset();
322 graph.reset();
323 AssertDimension(n_entries_per_row.size(),
324 row_map->getGlobalNumElements());
325
326 column_space_map = col_map;
327
328 // Translate the vector of row lengths into one that only stores
329 // those entries that related to the locally stored rows of the matrix:
330 Kokkos::DualView<size_t *, typename MemorySpace::kokkos_space>
331 local_entries_per_row("local_entries_per_row",
332 row_map->getMaxGlobalIndex() -
333 row_map->getMinGlobalIndex());
334
335 auto local_entries_per_row_host =
336 local_entries_per_row
337 .template view<Kokkos::DefaultHostExecutionSpace>();
338
339 std::uint64_t total_size = 0;
340 for (unsigned int i = 0; i < local_entries_per_row.extent(0); ++i)
341 {
342 local_entries_per_row_host(i) =
343 n_entries_per_row[row_map->getMinGlobalIndex() + i];
344 total_size += local_entries_per_row_host[i];
345 }
346 local_entries_per_row
347 .template modify<Kokkos::DefaultHostExecutionSpace>();
348 local_entries_per_row
349 .template sync<typename MemorySpace::kokkos_space>();
350
352 total_size < static_cast<std::uint64_t>(
353 std::numeric_limits<
356 "You are requesting to store more elements than global ordinal type allows."));
357
360 col_map,
361 local_entries_per_row);
362 }
363
364
365
366 template <typename SparsityPatternType, typename MemorySpace>
367 void
369 const Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> &row_map,
370 const Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> &col_map,
371 const SparsityPatternType &sp,
372 [[maybe_unused]] const bool exchange_data,
373 Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> &column_space_map,
374 Teuchos::RCP<TpetraTypes::GraphType<MemorySpace>> &graph,
375 Teuchos::RCP<TpetraTypes::GraphType<MemorySpace>> &nonlocal_graph)
376 {
377 nonlocal_graph.reset();
378 graph.reset();
379
380 AssertDimension(sp.n_rows(), row_map->getGlobalNumElements());
381 AssertDimension(sp.n_cols(), col_map->getGlobalNumElements());
385
386 Assert(row_map->isContiguous() == true,
388 "This function only works if the row map is contiguous."));
389
390 const size_type<MemorySpace> first_row = row_map->getMinGlobalIndex(),
391 last_row =
392 row_map->getMaxGlobalIndex() + 1;
393 Teuchos::Array<size_t> n_entries_per_row(last_row - first_row);
394
395 for (size_type<MemorySpace> row = first_row; row < last_row; ++row)
396 n_entries_per_row[row - first_row] = sp.row_length(row);
397
399 std::accumulate(n_entries_per_row.begin(),
400 n_entries_per_row.end(),
401 std::uint64_t(0)) <
402 static_cast<std::uint64_t>(std::numeric_limits<int>::max()),
404 "The TrilinosWrappers use Tpetra internally, and "
405 "Trilinos/Tpetra was compiled with 'local ordinate = int'. "
406 "Therefore, 'signed int' is used to represent local indices, "
407 "and only 2,147,483,647 nonzero matrix entries can be stored "
408 "on a single process, but you are requesting more than "
409 "that. Either use more MPI processes or recompile Trilinos "
410 "with 'local ordinate = long long' "));
411
412# if DEAL_II_TRILINOS_VERSION_GTE(12, 16, 0)
413 if (row_map->getComm()->getSize() > 1)
415 TpetraTypes::GraphType<MemorySpace>>(row_map, n_entries_per_row);
416 else
419 col_map,
420 n_entries_per_row);
421# else
422 if (row_map->getComm()->getSize() > 1)
425 row_map, Teuchos::arcpFromArray(n_entries_per_row));
426 else
429 row_map, col_map, Teuchos::arcpFromArray(n_entries_per_row));
430
431# endif
432
433 AssertDimension(sp.n_rows(), graph->getGlobalNumRows());
434 AssertDimension(sp.n_cols(), graph->getGlobalNumEntries());
435
436 std::vector<TrilinosWrappers::types::int_type> row_indices;
437
438 for (size_type<MemorySpace> row = first_row; row < last_row; ++row)
439 {
440 const TrilinosWrappers::types::int_type row_length =
441 sp.row_length(row);
442 if (row_length == 0)
443 continue;
444
445 row_indices.resize(row_length, -1);
446 {
447 typename SparsityPatternType::iterator p = sp.begin(row);
448 // avoid incrementing p over the end of the current row because
449 // it is slow for DynamicSparsityPattern in parallel
450 for (int col = 0; col < row_length;)
451 {
452 row_indices[col++] = p->column();
453 if (col < row_length)
454 ++p;
455 }
456 }
457 graph->insertGlobalIndices(row, row_length, row_indices.data());
458 }
459
460 graph->globalAssemble();
461 }
462 } // namespace SparsityPatternImpl
463
465 template <typename MemorySpace>
466 void
467 SparsityPattern<MemorySpace>::reinit(const IndexSet &parallel_partitioning,
468 const MPI_Comm communicator,
469 const size_type n_entries_per_row)
470 {
471 SparsityPatternBase::resize(parallel_partitioning.size(),
472 parallel_partitioning.size());
473 Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> map =
474 parallel_partitioning.make_tpetra_map_rcp(communicator, false);
475 SparsityPatternImpl::reinit_sp<MemorySpace>(
476 map, map, n_entries_per_row, column_space_map, graph, nonlocal_graph);
477 }
478
479
480
481 template <typename MemorySpace>
482 void
484 const IndexSet &parallel_partitioning,
485 const MPI_Comm communicator,
486 const std::vector<size_type> &n_entries_per_row)
487 {
488 SparsityPatternBase::resize(parallel_partitioning.size(),
489 parallel_partitioning.size());
490 Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> map =
491 parallel_partitioning.make_tpetra_map_rcp(communicator, false);
492 SparsityPatternImpl::reinit_sp<MemorySpace>(
493 map, map, n_entries_per_row, column_space_map, graph, nonlocal_graph);
494 }
495
496
497
498 template <typename MemorySpace>
499 void
501 const IndexSet &row_parallel_partitioning,
502 const IndexSet &col_parallel_partitioning,
503 const MPI_Comm communicator,
504 const size_type n_entries_per_row)
505 {
506 SparsityPatternBase::resize(row_parallel_partitioning.size(),
507 col_parallel_partitioning.size());
508 Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> row_map =
509 row_parallel_partitioning.make_tpetra_map_rcp(communicator, false);
510 Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> col_map =
511 col_parallel_partitioning.make_tpetra_map_rcp(communicator, false);
512 SparsityPatternImpl::reinit_sp<MemorySpace>(row_map,
513 col_map,
514 n_entries_per_row,
515 column_space_map,
516 graph,
517 nonlocal_graph);
518 }
519
520
521
522 template <typename MemorySpace>
523 void
525 const IndexSet &row_parallel_partitioning,
526 const IndexSet &col_parallel_partitioning,
527 const MPI_Comm communicator,
528 const std::vector<size_type> &n_entries_per_row)
530 SparsityPatternBase::resize(row_parallel_partitioning.size(),
531 col_parallel_partitioning.size());
532 Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> row_map =
533 row_parallel_partitioning.make_tpetra_map_rcp(communicator, false);
534 Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> col_map =
535 col_parallel_partitioning.make_tpetra_map_rcp(communicator, false);
536 SparsityPatternImpl::reinit_sp<MemorySpace>(row_map,
537 col_map,
538 n_entries_per_row,
539 column_space_map,
540 graph,
541 nonlocal_graph);
542 }
543
544
545
546 template <typename MemorySpace>
547 void
549 const IndexSet &row_parallel_partitioning,
550 const IndexSet &col_parallel_partitioning,
551 const IndexSet &writable_rows,
552 const MPI_Comm communicator,
553 const size_type n_entries_per_row)
554 {
555 SparsityPatternBase::resize(row_parallel_partitioning.size(),
556 col_parallel_partitioning.size());
557 Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> row_map =
558 row_parallel_partitioning.make_tpetra_map_rcp(communicator, false);
559 Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> col_map =
560 col_parallel_partitioning.make_tpetra_map_rcp(communicator, false);
561 SparsityPatternImpl::reinit_sp<MemorySpace>(row_map,
562 col_map,
563 n_entries_per_row,
564 column_space_map,
565 graph,
566 nonlocal_graph);
567
568 IndexSet nonlocal_partitioner = writable_rows;
569 AssertDimension(nonlocal_partitioner.size(),
570 row_parallel_partitioning.size());
571# ifdef DEBUG
572 {
573 IndexSet tmp = writable_rows & row_parallel_partitioning;
574 Assert(tmp == row_parallel_partitioning,
576 "The set of writable rows passed to this method does not "
577 "contain the locally owned rows, which is not allowed."));
578 }
579# endif
580 nonlocal_partitioner.subtract_set(row_parallel_partitioning);
581 if (Utilities::MPI::n_mpi_processes(communicator) > 1)
582 {
583 Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> nonlocal_map =
584 nonlocal_partitioner.make_tpetra_map_rcp(communicator, true);
586 TpetraTypes::GraphType<MemorySpace>>(nonlocal_map, col_map, 0);
588 else
589 Assert(nonlocal_partitioner.n_elements() == 0, ExcInternalError());
590 }
591
592
593
594 template <typename MemorySpace>
595 template <typename SparsityPatternType>
596 void
598 const IndexSet &row_parallel_partitioning,
599 const IndexSet &col_parallel_partitioning,
600 const SparsityPatternType &nontrilinos_sparsity_pattern,
601 const MPI_Comm communicator,
602 const bool exchange_data)
603 {
604 SparsityPatternBase::resize(row_parallel_partitioning.size(),
605 col_parallel_partitioning.size());
606 Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> row_map =
607 row_parallel_partitioning.make_tpetra_map_rcp(communicator, false);
608 Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> col_map =
609 col_parallel_partitioning.make_tpetra_map_rcp(communicator, false);
610 SparsityPatternImpl::reinit_sp<SparsityPatternType, MemorySpace>(
611 row_map,
612 col_map,
613 nontrilinos_sparsity_pattern,
614 exchange_data,
615 column_space_map,
616 graph,
617 nonlocal_graph);
619
620
621
622 template <typename MemorySpace>
623 template <typename SparsityPatternType>
624 void
626 const IndexSet &parallel_partitioning,
627 const SparsityPatternType &nontrilinos_sparsity_pattern,
628 const MPI_Comm communicator,
629 const bool exchange_data)
630 {
631 AssertDimension(nontrilinos_sparsity_pattern.n_rows(),
632 parallel_partitioning.size());
633 AssertDimension(nontrilinos_sparsity_pattern.n_cols(),
634 parallel_partitioning.size());
635 SparsityPatternBase::resize(parallel_partitioning.size(),
636 parallel_partitioning.size());
637 Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> map =
638 parallel_partitioning.make_tpetra_map_rcp(communicator, false);
639 SparsityPatternImpl::reinit_sp<SparsityPatternType, MemorySpace>(
640 map,
641 map,
642 nontrilinos_sparsity_pattern,
643 exchange_data,
644 column_space_map,
645 graph,
646 nonlocal_graph);
647 }
648
649
650
651 template <typename MemorySpace>
659
660
662 template <typename MemorySpace>
663 void
679
680
681
682 template <typename MemorySpace>
683 template <typename SparsityPatternType>
684 void
685 SparsityPattern<MemorySpace>::copy_from(const SparsityPatternType &sp)
686 {
687 SparsityPatternBase::resize(sp.n_rows(), sp.n_cols());
688 Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> rows =
692 0,
694 Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> columns =
698 0,
700
701 SparsityPatternImpl::reinit_sp<SparsityPatternType, MemorySpace>(
702 rows, columns, sp, false, column_space_map, graph, nonlocal_graph);
703 }
704
706
707 template <typename MemorySpace>
708 void
710 {
712 // When we clear the matrix, reset
713 // the pointer and generate an
714 // empty sparsity pattern.
721 TpetraTypes::GraphType<MemorySpace>>(column_space_map,
722 column_space_map,
723 0);
724 graph->fillComplete();
725
726 nonlocal_graph.reset();
727 }
728
730
731 template <typename MemorySpace>
732 void
734 {
735 Assert(column_space_map.get(), ExcInternalError());
736 if (nonlocal_graph.get() != nullptr)
737 {
738# if DEAL_II_TRILINOS_VERSION_GTE(14, 0, 0)
739 if (nonlocal_graph->getRowMap()->getLocalNumElements() > 0 &&
740 column_space_map->getGlobalNumElements() > 0)
741# else
742 if (nonlocal_graph->getRowMap()->getNodeNumElements() > 0 &&
743 column_space_map->getGlobalNumElements() > 0)
744# endif
745 {
746 // Insert dummy element at (row, column) that corresponds to row 0
747 // in local index counting.
749 nonlocal_graph->getRowMap()->getGlobalElement(0);
751
752 // in case we have a square sparsity pattern, add the entry on the
753 // diagonal
754 if (column_space_map->getGlobalNumElements() ==
755 graph->getRangeMap()->getGlobalNumElements())
756 column = row;
757 // if not, take a column index that we have ourselves since we
758 // know for sure it is there (and it will not create spurious
759 // messages to many ranks like putting index 0 on many
760 // processors)
761# if DEAL_II_TRILINOS_VERSION_GTE(14, 0, 0)
762 else if (column_space_map->getLocalNumElements() > 0)
763# else
764 else if (column_space_map->getNodeNumElements() > 0)
765# endif
766 column = column_space_map->getGlobalElement(0);
767 nonlocal_graph->insertGlobalIndices(row, 1, &column);
768 }
769# if DEAL_II_TRILINOS_VERSION_GTE(14, 0, 0)
770 Assert(nonlocal_graph->getRowMap()->getLocalNumElements() == 0 ||
771 column_space_map->getGlobalNumElements() == 0,
773# else
774 Assert(nonlocal_graph->getRowMap()->getNodeNumElements() == 0 ||
775 column_space_map->getGlobalNumElements() == 0,
777# endif
778
779 nonlocal_graph->fillComplete(column_space_map, graph->getRowMap());
780 }
781 graph->fillComplete(column_space_map, graph->getRowMap());
782
783 // Check consistency between the sizes set at the beginning and what
784 // Trilinos stores:
785 using namespace deal_II_exceptions::internals;
786 AssertDimension(n_rows(), graph->getGlobalNumRows());
787 AssertDimension(n_cols(), graph->getGlobalNumCols());
788 }
789
790
792 template <typename MemorySpace>
793 bool
795 {
796 return graph->getRowMap()->getLocalElement(i) !=
797 Teuchos::OrdinalTraits<int>::invalid();
798 }
799
800
801
802 template <typename MemorySpace>
803 bool
805 const size_type j) const
806 {
807# if DEAL_II_TRILINOS_VERSION_GTE(13, 2, 0)
808 if (!row_is_stored_locally(i))
809 return false;
810
811 // Extract local indices in the matrix.
812 const auto trilinos_i = graph->getRowMap()->getLocalElement(i);
813 const auto trilinos_j = graph->getColMap()->getLocalElement(j);
814
816 col_indices;
817
818 // Generate the view.
819 graph->getLocalRowView(trilinos_i, col_indices);
820
821 // Search the index
822 const size_type local_col_index =
823 std::find(col_indices.data(),
824 col_indices.data() + col_indices.size(),
825 trilinos_j) -
826 col_indices.data();
828 return static_cast<size_t>(local_col_index) != col_indices.size();
829# else
830 (void)i;
831 (void)j;
833 return false;
834# endif
835 }
836
837
838
839 template <typename MemorySpace>
842 {
843 size_type local_b = 0;
844 for (int i = 0; i < static_cast<int>(local_size()); ++i)
845 {
846# if DEAL_II_TRILINOS_VERSION_GTE(13, 2, 0)
847 typename TpetraTypes::GraphType<
848 MemorySpace>::local_inds_host_view_type indices;
849# else
850 Teuchos::ArrayView<const int> indices;
851# endif
852
853 graph->getLocalRowView(i, indices);
854 const auto num_entries = indices.size();
855 for (unsigned int j = 0; j < static_cast<unsigned int>(num_entries);
856 ++j)
857 {
858 if (static_cast<size_type>(std::abs(i - indices[j])) > local_b)
859 local_b = std::abs(i - indices[j]);
860 }
861 }
862
864 Utilities::MPI::max(local_b,
866 graph->getComm()));
867 return static_cast<size_type>(global_b);
868 }
869
870
871
872 template <typename MemorySpace>
873 unsigned int
875 {
876# if DEAL_II_TRILINOS_VERSION_GTE(14, 0, 0)
877 return graph->getLocalNumRows();
878# else
879 return graph->getNodeNumRows();
880# endif
881 }
882
883
884
885 template <typename MemorySpace>
886 std::pair<typename SparsityPattern<MemorySpace>::size_type,
889 {
890 const size_type begin = graph->getRowMap()->getMinGlobalIndex();
891 const size_type end = graph->getRowMap()->getMaxGlobalIndex() + 1;
892
893 return {begin, end};
894 }
895
896
897
898 template <typename MemorySpace>
899 std::uint64_t
901 {
902 return graph->getGlobalNumEntries();
903 }
904
905
906
907 template <typename MemorySpace>
908 unsigned int
910 {
911# if DEAL_II_TRILINOS_VERSION_GTE(14, 0, 0)
912 return graph->getLocalMaxNumRowEntries();
913# else
914 return graph->getNodeMaxNumRowEntries();
915# endif
916 }
917
918
920 template <typename MemorySpace>
923 {
924 Assert(row < (size_type)n_rows(), ExcInternalError());
925
926 // Get a representation of the where the present row is located on
927 // the current processor
929 graph->getRowMap()->getLocalElement(row);
930
931 // On the processor who owns this row, we'll have a non-negative
932 // value for `local_row` and can ask for the length of the row.
933 if (local_row >= 0)
934 return graph->getNumEntriesInLocalRow(local_row);
935 else
936 return static_cast<size_type>(-1);
938
939
940
941 template <typename MemorySpace>
942 void
944 const ::types::global_dof_index &row,
946 const bool indices_are_sorted)
947 {
948 add_entries(row, columns.begin(), columns.end(), indices_are_sorted);
949 }
950
951
952
953 template <typename MemorySpace>
954 Teuchos::RCP<const typename TpetraTypes::MapType<MemorySpace>>
956 {
957 return graph->getDomainMap();
958 }
959
960
961
962 template <typename MemorySpace>
963 Teuchos::RCP<const typename TpetraTypes::MapType<MemorySpace>>
965 {
966 return graph->getRangeMap();
967 }
968
969
970
971 template <typename MemorySpace>
974 {
976 graph->getRangeMap()->getComm());
977 }
978
979
980
981 template <typename MemorySpace>
982 Teuchos::RCP<const Teuchos::Comm<int>>
984 {
985 return graph->getRangeMap()->getComm();
986 }
987
988
989
990 // As of now, no particularly neat
991 // output is generated in case of
992 // multiple processors.
993 template <typename MemorySpace>
994 void
996 std::ostream &out,
997 const bool write_extended_trilinos_info) const
998 {
999 if (write_extended_trilinos_info)
1000 out << *graph;
1001 else
1002 {
1003# if DEAL_II_TRILINOS_VERSION_GTE(14, 0, 0)
1004 for (unsigned int i = 0; i < graph->getLocalNumRows(); ++i)
1005# else
1006 for (unsigned int i = 0; i < graph->getNodeNumRows(); ++i)
1007# endif
1008 {
1009# if DEAL_II_TRILINOS_VERSION_GTE(13, 2, 0)
1010 typename TpetraTypes::GraphType<
1011 MemorySpace>::local_inds_host_view_type indices;
1012# else
1013 Teuchos::ArrayView<const int> indices;
1014# endif
1015 graph->getLocalRowView(i, indices);
1016 int num_entries = indices.size();
1017 for (int j = 0; j < num_entries; ++j)
1018 out << "(" << graph->getRowMap()->getGlobalElement(i) << ","
1019 << graph->getColMap()->getGlobalElement(indices[j]) << ") "
1020 << std::endl;
1021 }
1022 }
1023
1024 AssertThrow(out.fail() == false, ExcIO());
1025 }
1026
1027
1028
1029 template <typename MemorySpace>
1030 void
1032 {
1033 Assert(graph->isFillComplete() == true, ExcInternalError());
1034
1035 for (unsigned int row = 0; row < local_size(); ++row)
1036 {
1037# if DEAL_II_TRILINOS_VERSION_GTE(13, 2, 0)
1038 typename TpetraTypes::GraphType<
1039 MemorySpace>::local_inds_host_view_type indices;
1040# else
1041 Teuchos::ArrayView<const int> indices;
1042# endif
1043 graph->getLocalRowView(row, indices);
1044 int num_entries = indices.size();
1045
1046 Assert(num_entries >= 0, ExcInternalError());
1047 // avoid sign comparison warning
1048 const ::types::signed_global_dof_index num_entries_ =
1049 num_entries;
1050 for (::types::signed_global_dof_index j = 0; j < num_entries_;
1051 ++j)
1052 // while matrix entries are usually
1053 // written (i,j), with i vertical and
1054 // j horizontal, gnuplot output is
1055 // x-y, that is we have to exchange
1056 // the order of output
1057 out << static_cast<int>(
1058 graph->getColMap()->getGlobalElement(indices[j]))
1059 << " "
1060 << -static_cast<int>(graph->getRowMap()->getGlobalElement(row))
1061 << std::endl;
1062 }
1063
1064 AssertThrow(out.fail() == false, ExcIO());
1065 }
1066
1067 // TODO: Implement!
1068 template <typename MemorySpace>
1069 std::size_t
1075
1076
1077# ifndef DOXYGEN
1078 // explicit instantiations
1080
1081 template void
1083 const ::SparsityPattern &);
1084 template void
1086 const ::DynamicSparsityPattern &);
1087
1088 template void
1090 const IndexSet &,
1091 const ::SparsityPattern &,
1092 const MPI_Comm,
1093 bool);
1094 template void
1096 const IndexSet &,
1097 const ::DynamicSparsityPattern &,
1098 const MPI_Comm,
1099 bool);
1100
1101
1102 template void
1104 const IndexSet &,
1105 const IndexSet &,
1106 const ::SparsityPattern &,
1107 const MPI_Comm,
1108 bool);
1109 template void
1111 const IndexSet &,
1112 const IndexSet &,
1113 const ::DynamicSparsityPattern &,
1114 const MPI_Comm,
1115 bool);
1116# endif
1117
1118 } // namespace TpetraWrappers
1119
1120} // namespace LinearAlgebra
1121
1123
1124#endif // DEAL_II_TRILINOS_WITH_TPETRA
iterator begin() const
Definition array_view.h:702
iterator end() const
Definition array_view.h:711
size_type size() const
Definition index_set.h:1766
size_type n_elements() const
Definition index_set.h:1924
void subtract_set(const IndexSet &other)
Definition index_set.cc:471
Teuchos::RCP< Tpetra::Map< int, types::signed_global_dof_index > > make_tpetra_map_rcp(const MPI_Comm communicator=MPI_COMM_WORLD, const bool overlapping=false) const
Definition index_set.cc:963
Teuchos::RCP< TpetraTypes::GraphType< MemorySpace > > graph
Teuchos::RCP< const Teuchos::Comm< int > > get_teuchos_mpi_communicator() const
Teuchos::RCP< const TpetraTypes::MapType< MemorySpace > > range_partitioner() const
void copy_from(const SparsityPattern< MemorySpace > &input_sparsity_pattern)
SparsityPattern< MemorySpace > & operator=(const SparsityPattern< MemorySpace > &input_sparsity_pattern)
void print(std::ostream &out, const bool write_extended_trilinos_info=false) const
Teuchos::RCP< TpetraTypes::GraphType< MemorySpace > > nonlocal_graph
Teuchos::RCP< const TpetraTypes::MapType< MemorySpace > > domain_partitioner() const
bool exists(const size_type i, const size_type j) const
Teuchos::RCP< TpetraTypes::MapType< MemorySpace > > column_space_map
virtual void add_row_entries(const ::types::global_dof_index &row, const ArrayView< const ::types::global_dof_index > &columns, const bool indices_are_sorted=false) override
void reinit(const size_type m, const size_type n, const size_type n_entries_per_row=0)
virtual void resize(const size_type rows, const size_type cols)
size_type n_rows() const
size_type n_cols() const
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:502
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:503
static ::ExceptionBase & ExcIO()
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
#define DEAL_II_NOT_IMPLEMENTED()
IndexSet complete_index_set(const IndexSet::size_type N)
Definition index_set.h:1194
void reinit_sp(const Teuchos::RCP< TpetraTypes::MapType< MemorySpace > > &row_map, const Teuchos::RCP< TpetraTypes::MapType< MemorySpace > > &col_map, const size_type< MemorySpace > n_entries_per_row, Teuchos::RCP< TpetraTypes::MapType< MemorySpace > > &column_space_map, Teuchos::RCP< TpetraTypes::GraphType< MemorySpace > > &graph, Teuchos::RCP< TpetraTypes::GraphType< MemorySpace > > &nonlocal_graph)
typename SparsityPattern< MemorySpace >::size_type size_type
Tpetra::CrsGraph< LO, GO, NodeType< MemorySpace > > GraphType
Tpetra::Map< LO, GO, NodeType< MemorySpace > > MapType
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition mpi.cc:92
T max(const T &t, const MPI_Comm mpi_communicator)
Teuchos::RCP< T > make_rcp(Args &&...args)
MPI_Comm teuchos_comm_to_mpi_comm(const Teuchos::RCP< const Teuchos::Comm< int > > &teuchos_comm)
const Teuchos::RCP< const Teuchos::Comm< int > > & tpetra_comm_self()
::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)