19 #ifdef DEAL_II_WITH_TRILINOS
31 # include <boost/container/small_vector.hpp>
33 # ifdef DEAL_II_TRILINOS_WITH_EPETRAEXT
34 # include <EpetraExt_MatrixMatrix.h>
36 # include <Epetra_Export.h>
37 # include <Teuchos_RCP.hpp>
38 # include <ml_epetra_utils.h>
39 # include <ml_struct.h>
49 template <
typename VectorType>
50 typename VectorType::value_type *
56 template <
typename VectorType>
57 const typename VectorType::value_type *
63 template <
typename VectorType>
64 typename VectorType::value_type *
70 template <
typename VectorType>
71 const typename VectorType::value_type *
77 # ifdef DEAL_II_WITH_MPI
82 return V.trilinos_vector()[0];
89 return V.trilinos_vector()[0];
96 return V.trilinos_vector()[0] +
V.trilinos_vector().MyLength();
103 return V.trilinos_vector()[0] +
V.trilinos_vector().MyLength();
106 # ifdef DEAL_II_TRILINOS_WITH_TPETRA
107 template <
typename Number>
111 return V.trilinos_vector().getDataNonConst().get();
114 template <
typename Number>
118 return V.trilinos_vector().getData().get();
121 template <
typename Number>
125 return V.trilinos_vector().getDataNonConst().get() +
126 V.trilinos_vector().getLocalLength();
129 template <
typename Number>
133 return V.trilinos_vector().getData().get() +
134 V.trilinos_vector().getLocalLength();
167 std::make_shared<std::vector<TrilinosScalar>>(
matrix->
n());
211 new Epetra_FECrsMatrix(
View, *column_space_map, *column_space_map, 0))
222 const unsigned int n_max_entries_per_row)
237 matrix(new Epetra_FECrsMatrix(
243 n_max_entries_per_row,
253 const std::vector<unsigned int> &n_entries_per_row)
258 ,
matrix(new Epetra_FECrsMatrix(
264 reinterpret_cast<
int *>(
265 const_cast<unsigned
int *>(n_entries_per_row.data())),
275 const unsigned int n_max_entries_per_row)
276 : column_space_map(new Epetra_Map(
277 parallel_partitioning.make_trilinos_map(communicator, false)))
278 ,
matrix(new Epetra_FECrsMatrix(Copy,
280 n_max_entries_per_row,
290 const std::vector<unsigned int> &n_entries_per_row)
291 : column_space_map(new Epetra_Map(
292 parallel_partitioning.make_trilinos_map(communicator, false)))
293 ,
matrix(new Epetra_FECrsMatrix(Copy,
295 reinterpret_cast<
int *>(
296 const_cast<unsigned
int *>(
297 n_entries_per_row.data())),
306 const IndexSet &col_parallel_partitioning,
309 : column_space_map(new Epetra_Map(
310 col_parallel_partitioning.make_trilinos_map(communicator, false)))
311 ,
matrix(new Epetra_FECrsMatrix(
313 row_parallel_partitioning.make_trilinos_map(communicator, false),
314 n_max_entries_per_row,
323 const IndexSet &col_parallel_partitioning,
325 const std::vector<unsigned int> &n_entries_per_row)
326 : column_space_map(new Epetra_Map(
327 col_parallel_partitioning.make_trilinos_map(communicator, false)))
328 ,
matrix(new Epetra_FECrsMatrix(
330 row_parallel_partitioning.make_trilinos_map(communicator, false),
331 reinterpret_cast<
int *>(
332 const_cast<unsigned
int *>(n_entries_per_row.data())),
341 : column_space_map(new Epetra_Map(sparsity_pattern.domain_partitioner()))
343 new Epetra_FECrsMatrix(Copy,
344 sparsity_pattern.trilinos_sparsity_pattern(),
351 "The Trilinos sparsity pattern has not been compressed."));
358 : column_space_map(std::move(other.column_space_map))
359 ,
matrix(std::move(other.matrix))
360 , nonlocal_matrix(std::move(other.nonlocal_matrix))
361 , nonlocal_matrix_exporter(std::move(other.nonlocal_matrix_exporter))
362 , last_action(other.last_action)
363 , compressed(other.compressed)
365 other.last_action = Zero;
366 other.compressed =
false;
383 bool needs_deep_copy =
388 if (!needs_deep_copy)
395 const int row_local =
matrix->RowMap().LID(
399 int n_entries, rhs_n_entries;
401 int * index_ptr, *rhs_index_ptr;
402 int ierr = rhs.
matrix->ExtractMyRowView(row_local,
409 ierr =
matrix->ExtractMyRowView(row_local,
415 if (n_entries != rhs_n_entries ||
416 std::memcmp(
static_cast<void *
>(index_ptr),
417 static_cast<void *
>(rhs_index_ptr),
418 sizeof(
int) * n_entries) != 0)
420 needs_deep_copy =
true;
424 for (
int i = 0; i < n_entries; ++i)
425 value_ptr[i] = rhs_value_ptr[i];
435 matrix = std_cxx14::make_unique<Epetra_FECrsMatrix>(*rhs.
matrix);
442 std_cxx14::make_unique<Epetra_CrsMatrix>(Copy,
452 template <
typename SparsityPatternType>
454 reinit_matrix(
const IndexSet & row_parallel_partitioning,
455 const IndexSet & column_parallel_partitioning,
456 const SparsityPatternType & sparsity_pattern,
457 const bool exchange_data,
459 std::unique_ptr<Epetra_Map> &column_space_map,
460 std::unique_ptr<Epetra_FECrsMatrix> &
matrix,
461 std::unique_ptr<Epetra_CrsMatrix> & nonlocal_matrix,
462 std::unique_ptr<Epetra_Export> & nonlocal_matrix_exporter)
466 nonlocal_matrix.reset();
467 nonlocal_matrix_exporter.reset();
469 column_space_map = std_cxx14::make_unique<Epetra_Map>(
472 if (column_space_map->Comm().MyPID() == 0)
475 row_parallel_partitioning.
size());
477 column_parallel_partitioning.
size());
480 Epetra_Map row_space_map =
490 trilinos_sparsity.
reinit(row_parallel_partitioning,
491 column_parallel_partitioning,
495 matrix = std_cxx14::make_unique<Epetra_FECrsMatrix>(
496 Copy, trilinos_sparsity.trilinos_sparsity_pattern(),
false);
504 std::vector<int> n_entries_per_row(last_row - first_row);
506 for (
size_type row = first_row; row < last_row; ++row)
507 n_entries_per_row[row - first_row] = sparsity_pattern.row_length(row);
523 std::unique_ptr<Epetra_CrsGraph> graph;
524 if (row_space_map.Comm().NumProc() > 1)
525 graph = std_cxx14::make_unique<Epetra_CrsGraph>(
526 Copy, row_space_map, n_entries_per_row.data(),
true);
529 std_cxx14::make_unique<Epetra_CrsGraph>(Copy,
532 n_entries_per_row.data(),
540 std::vector<TrilinosWrappers::types::int_type> row_indices;
542 for (
size_type row = first_row; row < last_row; ++row)
544 const int row_length = sparsity_pattern.row_length(row);
548 row_indices.resize(row_length, -1);
550 typename SparsityPatternType::iterator p =
551 sparsity_pattern.begin(row);
552 for (
size_type col = 0; p != sparsity_pattern.end(row); ++p, ++col)
553 row_indices[col] = p->column();
555 graph->Epetra_CrsGraph::InsertGlobalIndices(row,
564 graph->FillComplete(*column_space_map, row_space_map);
565 graph->OptimizeStorage();
573 matrix = std_cxx14::make_unique<Epetra_FECrsMatrix>(Copy, *graph,
false);
585 class Epetra_CrsGraphMod :
public Epetra_CrsGraph
588 Epetra_CrsGraphMod(
const Epetra_Map &row_map,
589 const int * n_entries_per_row)
590 : Epetra_CrsGraph(Copy, row_map, n_entries_per_row, true)
594 SetIndicesAreGlobal()
596 this->Epetra_CrsGraph::SetIndicesAreGlobal(
true);
606 reinit_matrix(
const IndexSet & row_parallel_partitioning,
607 const IndexSet & column_parallel_partitioning,
609 const bool exchange_data,
611 std::unique_ptr<Epetra_Map> & column_space_map,
612 std::unique_ptr<Epetra_FECrsMatrix> &
matrix,
613 std::unique_ptr<Epetra_CrsMatrix> & nonlocal_matrix,
614 std::unique_ptr<Epetra_Export> & nonlocal_matrix_exporter)
617 nonlocal_matrix.reset();
618 nonlocal_matrix_exporter.reset();
620 column_space_map = std_cxx14::make_unique<Epetra_Map>(
624 row_parallel_partitioning.
size());
626 column_parallel_partitioning.
size());
628 Epetra_Map row_space_map =
633 if (relevant_rows.size() == 0)
635 relevant_rows.set_size(
637 relevant_rows.add_range(
640 relevant_rows.compress();
641 Assert(relevant_rows.n_elements() >=
642 static_cast<unsigned int>(row_space_map.NumMyElements()),
644 "Locally relevant rows of sparsity pattern must contain "
645 "all locally owned rows"));
650 const bool have_ghost_rows = [&]() {
651 std::vector<::types::global_dof_index> indices;
652 relevant_rows.fill_index_vector(indices);
653 Epetra_Map relevant_map(
661 row_space_map.Comm());
662 return !relevant_map.SameAs(row_space_map);
665 const unsigned int n_rows = relevant_rows.n_elements();
666 std::vector<TrilinosWrappers::types::int_type> ghost_rows;
667 std::vector<int> n_entries_per_row(row_space_map.NumMyElements());
668 std::vector<int> n_entries_per_ghost_row;
669 for (
unsigned int i = 0, own = 0; i < n_rows; ++i)
672 relevant_rows.nth_index_in_set(i);
673 if (row_space_map.MyGID(global_row))
674 n_entries_per_row[own++] = sparsity_pattern.
row_length(global_row);
675 else if (sparsity_pattern.
row_length(global_row) > 0)
677 ghost_rows.push_back(global_row);
678 n_entries_per_ghost_row.push_back(
683 Epetra_Map off_processor_map(-1,
685 (ghost_rows.size() > 0) ?
686 (ghost_rows.data()) :
689 row_space_map.Comm());
691 std::unique_ptr<Epetra_CrsGraph> graph;
692 std::unique_ptr<Epetra_CrsGraphMod> nonlocal_graph;
693 if (row_space_map.Comm().NumProc() > 1)
695 graph = std_cxx14::make_unique<Epetra_CrsGraph>(
698 (n_entries_per_row.size() > 0) ? (n_entries_per_row.data()) :
700 exchange_data ?
false :
true);
701 if (have_ghost_rows ==
true)
702 nonlocal_graph = std_cxx14::make_unique<Epetra_CrsGraphMod>(
703 off_processor_map, n_entries_per_ghost_row.data());
706 graph = std_cxx14::make_unique<Epetra_CrsGraph>(
710 (n_entries_per_row.size() > 0) ? (n_entries_per_row.data()) :
nullptr,
714 std::vector<TrilinosWrappers::types::int_type> row_indices;
716 for (
unsigned int i = 0; i < n_rows; ++i)
719 relevant_rows.nth_index_in_set(i);
720 const int row_length = sparsity_pattern.
row_length(global_row);
724 row_indices.resize(row_length, -1);
725 for (
int col = 0; col < row_length; ++col)
726 row_indices[col] = sparsity_pattern.
column_number(global_row, col);
728 if (row_space_map.MyGID(global_row))
729 graph->InsertGlobalIndices(global_row,
735 nonlocal_graph->InsertGlobalIndices(global_row,
742 if (nonlocal_graph.get() !=
nullptr)
748 nonlocal_graph->SetIndicesAreGlobal();
749 Assert(nonlocal_graph->IndicesAreGlobal() ==
true,
751 nonlocal_graph->FillComplete(*column_space_map, row_space_map);
752 nonlocal_graph->OptimizeStorage();
757 Epetra_Export exporter(nonlocal_graph->RowMap(), row_space_map);
758 int ierr = graph->Export(*nonlocal_graph, exporter, Add);
764 std_cxx14::make_unique<Epetra_CrsMatrix>(Copy, *nonlocal_graph);
767 graph->FillComplete(*column_space_map, row_space_map);
768 graph->OptimizeStorage();
773 matrix = std_cxx14::make_unique<Epetra_FECrsMatrix>(Copy, *graph,
false);
779 template <
typename SparsityPatternType>
796 template <
typename SparsityPatternType>
797 inline typename std::enable_if<
798 !std::is_same<SparsityPatternType,
801 const IndexSet & col_parallel_partitioning,
802 const SparsityPatternType &sparsity_pattern,
804 const bool exchange_data)
806 reinit_matrix(row_parallel_partitioning,
807 col_parallel_partitioning,
833 matrix = std_cxx14::make_unique<Epetra_FECrsMatrix>(
851 if (
this == &sparse_matrix)
858 matrix = std_cxx14::make_unique<Epetra_FECrsMatrix>(
873 template <
typename number>
876 const IndexSet & row_parallel_partitioning,
877 const IndexSet & col_parallel_partitioning,
878 const ::SparseMatrix<number> &dealii_sparse_matrix,
880 const double drop_tolerance,
881 const bool copy_values,
882 const ::SparsityPattern * use_this_sparsity)
884 if (copy_values ==
false)
888 if (use_this_sparsity ==
nullptr)
889 reinit(row_parallel_partitioning,
890 col_parallel_partitioning,
891 dealii_sparse_matrix.get_sparsity_pattern(),
895 reinit(row_parallel_partitioning,
896 col_parallel_partitioning,
903 const size_type n_rows = dealii_sparse_matrix.m();
908 const ::SparsityPattern &sparsity_pattern =
909 (use_this_sparsity !=
nullptr) ?
911 dealii_sparse_matrix.get_sparsity_pattern();
913 if (
matrix.get() ==
nullptr ||
m() != n_rows ||
916 reinit(row_parallel_partitioning,
917 col_parallel_partitioning,
929 std::vector<size_type> row_indices(maximum_row_length);
930 std::vector<TrilinosScalar> values(maximum_row_length);
932 for (
size_type row = 0; row < n_rows; ++row)
934 if (row_parallel_partitioning.
is_element(row) ==
true)
937 sparsity_pattern.begin(row);
938 typename ::SparseMatrix<number>::const_iterator it =
939 dealii_sparse_matrix.begin(row);
941 if (sparsity_pattern.n_rows() == sparsity_pattern.n_cols())
945 if (
std::fabs(it->value()) > drop_tolerance)
947 values[col] = it->value();
948 row_indices[col++] = it->column();
954 while (it != dealii_sparse_matrix.end(row) &&
955 select_index != sparsity_pattern.end(row))
957 while (select_index->column() < it->column() &&
958 select_index != sparsity_pattern.end(row))
960 while (it->column() < select_index->column() &&
961 it != dealii_sparse_matrix.end(row))
964 if (it == dealii_sparse_matrix.end(row))
966 if (
std::fabs(it->value()) > drop_tolerance)
968 values[col] = it->value();
969 row_indices[col++] = it->column();
976 reinterpret_cast<size_type *
>(row_indices.data()),
985 template <
typename number>
988 const ::SparseMatrix<number> &dealii_sparse_matrix,
989 const double drop_tolerance,
990 const bool copy_values,
991 const ::SparsityPattern * use_this_sparsity)
995 dealii_sparse_matrix,
1006 const bool copy_values)
1008 Assert(input_matrix.Filled() ==
true,
1009 ExcMessage(
"Input CrsMatrix has not called FillComplete()!"));
1012 std_cxx14::make_unique<Epetra_Map>(input_matrix.DomainMap());
1014 const Epetra_CrsGraph *graph = &input_matrix.Graph();
1019 matrix = std_cxx14::make_unique<Epetra_FECrsMatrix>(Copy, *graph,
false);
1023 if (copy_values ==
true)
1029 const size_type my_nonzeros = input_matrix.NumMyNonzeros();
1030 std::memcpy(values, in_values, my_nonzeros *
sizeof(
TrilinosScalar));
1054 "compress() can only be called with VectorOperation add, insert, or unknown"));
1062 ExcMessage(
"Operation and argument to compress() do not match"));
1088 ierr =
matrix->OptimizeStorage();
1105 std_cxx14::make_unique<Epetra_Map>(0,
1136 matrix->ExtractMyRowView(local_row, num_entries, values, col_indices);
1141 const std::ptrdiff_t diag_index =
1142 std::find(col_indices, col_indices + num_entries, local_row) -
1146 if (diag_index != j || new_diag_value == 0)
1149 if (diag_index != num_entries &&
std::fabs(values[diag_index]) == 0.0 &&
1150 new_diag_value != 0.0)
1151 values[diag_index] = new_diag_value;
1161 for (
const auto row : rows)
1184 if (trilinos_i == -1)
1199 int nnz_present =
matrix->NumMyEntries(trilinos_i);
1208 int ierr =
matrix->ExtractMyRowView(trilinos_i,
1215 Assert(nnz_present == nnz_extracted,
1221 const std::ptrdiff_t local_col_index =
1222 std::find(col_indices, col_indices + nnz_present, trilinos_j) -
1233 if (local_col_index == nnz_present)
1238 value = values[local_col_index];
1264 if ((trilinos_i == -1) || (trilinos_j == -1))
1275 int nnz_present =
matrix->NumMyEntries(trilinos_i);
1283 int ierr =
matrix->ExtractMyRowView(trilinos_i,
1290 Assert(nnz_present == nnz_extracted,
1296 const std::ptrdiff_t local_col_index =
1297 std::find(col_indices, col_indices + nnz_present, trilinos_j) -
1307 if (local_col_index == nnz_present)
1310 value = values[local_col_index];
1356 int ierr =
matrix->NumMyRowEntries(local_row, ncols);
1360 return static_cast<unsigned int>(ncols);
1367 const std::vector<size_type> & col_indices,
1369 const bool elide_zero_values)
1371 Assert(row_indices.size() == values.
m(),
1373 Assert(col_indices.size() == values.
n(),
1376 for (
size_type i = 0; i < row_indices.size(); ++i)
1388 const std::vector<size_type> & col_indices,
1389 const std::vector<TrilinosScalar> &values,
1390 const bool elide_zero_values)
1392 Assert(col_indices.size() == values.size(),
1410 const bool elide_zero_values)
1430 boost::container::small_vector<TrilinosScalar, 200> local_value_array(
1432 boost::container::small_vector<TrilinosWrappers::types::int_type, 200>
1433 local_index_array(n_cols);
1438 if (elide_zero_values ==
false)
1443 col_value_ptr = values;
1450 col_index_ptr = local_index_array.data();
1451 col_value_ptr = local_value_array.data();
1456 const double value = values[j];
1460 local_index_array[n_columns] = col_indices[j];
1461 local_value_array[n_columns] =
value;
1478 if (
matrix->RowMap().MyGID(
1481 if (
matrix->Filled() ==
false)
1483 ierr =
matrix->Epetra_CrsMatrix::InsertGlobalValues(
1484 row,
static_cast<int>(n_columns), col_value_ptr, col_index_ptr);
1493 ierr =
matrix->Epetra_CrsMatrix::ReplaceGlobalValues(row,
1508 if (
matrix->Filled() ==
false)
1510 ierr =
matrix->InsertGlobalValues(1,
1515 Epetra_FECrsMatrix::ROW_MAJOR);
1520 ierr =
matrix->ReplaceGlobalValues(1,
1525 Epetra_FECrsMatrix::ROW_MAJOR);
1542 const bool elide_zero_values)
1544 Assert(indices.size() == values.
m(),
1548 for (
size_type i = 0; i < indices.size(); ++i)
1560 const std::vector<size_type> & col_indices,
1562 const bool elide_zero_values)
1564 Assert(row_indices.size() == values.
m(),
1566 Assert(col_indices.size() == values.
n(),
1569 for (
size_type i = 0; i < row_indices.size(); ++i)
1581 const std::vector<size_type> & col_indices,
1582 const std::vector<TrilinosScalar> &values,
1583 const bool elide_zero_values)
1585 Assert(col_indices.size() == values.size(),
1602 const bool elide_zero_values,
1624 boost::container::small_vector<TrilinosScalar, 100> local_value_array(
1626 boost::container::small_vector<TrilinosWrappers::types::int_type, 100>
1627 local_index_array(n_cols);
1632 if (elide_zero_values ==
false)
1637 col_value_ptr = values;
1648 col_index_ptr = local_index_array.data();
1649 col_value_ptr = local_value_array.data();
1654 const double value = values[j];
1659 local_index_array[n_columns] = col_indices[j];
1660 local_value_array[n_columns] =
value;
1676 if (
matrix->RowMap().MyGID(
1679 ierr =
matrix->Epetra_CrsMatrix::SumIntoGlobalValues(row,
1693 ExcMessage(
"Attempted to write into off-processor matrix row "
1694 "that has not be specified as being writable upon "
1712 ierr =
matrix->SumIntoGlobalValues(1,
1717 Epetra_FECrsMatrix::ROW_MAJOR);
1724 std::cout <<
"------------------------------------------" << std::endl;
1725 std::cout <<
"Got error " << ierr <<
" in row " << row <<
" of proc "
1726 <<
matrix->RowMap().Comm().MyPID()
1727 <<
" when trying to add the columns:" << std::endl;
1729 std::cout << col_index_ptr[i] <<
" ";
1730 std::cout << std::endl << std::endl;
1731 std::cout <<
"Matrix row "
1732 << (
matrix->RowMap().MyGID(
1737 <<
" has the following indices:" << std::endl;
1738 std::vector<TrilinosWrappers::types::int_type> indices;
1739 const Epetra_CrsGraph * graph =
1746 indices.resize(graph->NumGlobalIndices(row));
1748 graph->ExtractGlobalRowCopy(row,
1755 std::cout << indices[i] <<
" ";
1756 std::cout << std::endl << std::endl;
1773 const int ierr =
matrix->PutScalar(
d);
1791 ExcMessage(
"Can only add matrices with same distribution of rows"));
1793 ExcMessage(
"Addition of matrices only allowed if matrices are "
1794 "filled, i.e., compress() has been called"));
1796 const bool same_col_map =
matrix->ColMap().SameAs(rhs.
matrix->ColMap());
1800 const int row_local =
matrix->RowMap().LID(
1808 int n_entries, rhs_n_entries;
1810 int * index_ptr, *rhs_index_ptr;
1811 int ierr = rhs.
matrix->ExtractMyRowView(row_local,
1819 matrix->ExtractMyRowView(row_local, n_entries, value_ptr, index_ptr);
1821 bool expensive_checks = (n_entries != rhs_n_entries || !same_col_map);
1822 if (!expensive_checks)
1826 expensive_checks = std::memcmp(
static_cast<void *
>(index_ptr),
1827 static_cast<void *
>(rhs_index_ptr),
1828 sizeof(
int) * n_entries) != 0;
1829 if (!expensive_checks)
1830 for (
int i = 0; i < n_entries; ++i)
1831 value_ptr[i] += rhs_value_ptr[i] * factor;
1837 if (expensive_checks)
1839 for (
int i = 0; i < rhs_n_entries; ++i)
1841 if (rhs_value_ptr[i] == 0.)
1845 int local_col =
matrix->ColMap().LID(rhs_global_col);
1847 index_ptr + n_entries,
1849 Assert(local_index != index_ptr + n_entries &&
1850 *local_index == local_col,
1852 "Adding the entries from the other matrix "
1853 "failed, because the sparsity pattern "
1854 "of that matrix includes more elements than the "
1855 "calling matrix, which is not allowed."));
1856 value_ptr[local_index - index_ptr] += factor * rhs_value_ptr[i];
1874 if (!
matrix->UseTranspose())
1876 ierr =
matrix->SetUseTranspose(
true);
1881 ierr =
matrix->SetUseTranspose(
false);
1891 const int ierr =
matrix->Scale(a);
1907 const int ierr =
matrix->Scale(factor);
1920 return matrix->NormOne();
1929 return matrix->NormInf();
1938 return matrix->NormFrobenius();
1945 namespace SparseMatrixImplementation
1947 template <
typename VectorType>
1961 "Column map of matrix does not fit with vector map!"));
1963 ExcMessage(
"Row map of matrix does not fit with vector map!"));
1972 template <
typename VectorType>
1973 typename std::enable_if<
1990 Epetra_MultiVector tril_dst(
1992 Epetra_MultiVector tril_src(
View,
1999 const int ierr =
matrix->Multiply(
false, tril_src, tril_dst);
2006 template <
typename VectorType>
2007 typename std::enable_if<
2016 template <
typename VectorType>
2017 typename std::enable_if<
2032 Epetra_MultiVector tril_dst(
2034 Epetra_MultiVector tril_src(
View,
2040 const int ierr =
matrix->Multiply(
true, tril_src, tril_dst);
2047 template <
typename VectorType>
2048 typename std::enable_if<
2057 template <
typename VectorType>
2066 tmp_vector.reinit(dst,
true);
2067 vmult(tmp_vector, src);
2073 template <
typename VectorType>
2082 tmp_vector.reinit(dst,
true);
2095 temp_vector.
reinit(v,
true);
2097 vmult(temp_vector, v);
2098 return temp_vector * v;
2110 temp_vector.
reinit(v,
true);
2112 vmult(temp_vector, v);
2113 return u * temp_vector;
2141 const bool transpose_left)
2143 const bool use_vector = (
V.size() == inputright.
m() ? true :
false);
2144 if (transpose_left ==
false)
2146 Assert(inputleft.
n() == inputright.
m(),
2150 ExcMessage(
"Parallel partitioning of A and B does not fit."));
2154 Assert(inputleft.
m() == inputright.
m(),
2158 ExcMessage(
"Parallel partitioning of A and B does not fit."));
2169 Teuchos::RCP<Epetra_CrsMatrix> mod_B;
2170 if (use_vector ==
false)
2172 mod_B = Teuchos::rcp(
const_cast<Epetra_CrsMatrix *
>(
2178 mod_B = Teuchos::rcp(
2184 ExcMessage(
"Parallel distribution of matrix B and vector V "
2185 "does not match."));
2188 for (
int i = 0; i < local_N; ++i)
2191 double *new_data, *B_data;
2192 mod_B->ExtractMyRowView(i, N_entries, new_data);
2196 double value =
V.trilinos_vector()[0][i];
2198 new_data[j] =
value * B_data[j];
2209 # ifdef DEAL_II_TRILINOS_WITH_EPETRAEXT
2214 const_cast<Epetra_CrsMatrix &
>(
2257 const bool print_detailed_trilinos_information)
const
2259 if (print_detailed_trilinos_information ==
true)
2267 for (
int i = 0; i <
matrix->NumMyRows(); ++i)
2270 matrix->ExtractMyRowView(i, num_entries, values, indices);
2278 <<
") " << values[j] << std::endl;
2291 sizeof(*this) +
sizeof(*matrix) +
sizeof(*
matrix->Graph().DataPtr());
2294 matrix->NumMyNonzeros() +
2303 # ifdef DEAL_II_WITH_MPI
2305 const Epetra_MpiComm *mpi_comm =
2306 dynamic_cast<const Epetra_MpiComm *
>(&
matrix->RangeMap().Comm());
2308 return mpi_comm->Comm();
2311 return MPI_COMM_SELF;
2324 # ifndef DEAL_II_WITH_MPI
2326 make_serial_Epetra_map(
const IndexSet &serial_partitioning)
2333 Epetra_SerialComm());
2338 namespace LinearOperatorImplementation
2341 : use_transpose(false)
2343 # ifdef DEAL_II_WITH_MPI
2344 communicator(MPI_COMM_SELF)
2345 , domain_map(
IndexSet().make_trilinos_map(communicator.Comm()))
2346 , range_map(
IndexSet().make_trilinos_map(communicator.Comm()))
2354 ExcMessage(
"Uninitialized TrilinosPayload::vmult called "
2355 "(Default constructor)"));
2360 ExcMessage(
"Uninitialized TrilinosPayload::Tvmult called "
2361 "(Default constructor)"));
2366 ExcMessage(
"Uninitialized TrilinosPayload::inv_vmult called "
2367 "(Default constructor)"));
2372 ExcMessage(
"Uninitialized TrilinosPayload::inv_Tvmult called "
2373 "(Default constructor)"));
2382 : use_transpose(matrix_exemplar.trilinos_matrix().UseTranspose())
2384 # ifdef DEAL_II_WITH_MPI
2385 communicator(matrix_exemplar.get_mpi_communicator())
2387 matrix_exemplar.locally_owned_domain_indices().make_trilinos_map(
2388 communicator.Comm()))
2390 matrix_exemplar.locally_owned_range_indices().make_trilinos_map(
2391 communicator.Comm()))
2393 domain_map(
internal::make_serial_Epetra_map(
2394 matrix_exemplar.locally_owned_domain_indices()))
2395 , range_map(
internal::make_serial_Epetra_map(
2396 matrix_exemplar.locally_owned_range_indices()))
2400 const Domain &tril_src) {
2402 Assert(&tril_src != &tril_dst,
2412 matrix.trilinos_matrix(),
2415 matrix.trilinos_matrix().UseTranspose());
2417 const int ierr =
matrix.trilinos_matrix().Apply(tril_src, tril_dst);
2422 const Range &tril_src) {
2424 Assert(&tril_src != &tril_dst,
2434 matrix.trilinos_matrix(),
2437 !
matrix.trilinos_matrix().UseTranspose());
2439 Epetra_CrsMatrix &tril_mtrx_non_const =
2440 const_cast<Epetra_CrsMatrix &
>(
matrix.trilinos_matrix());
2441 tril_mtrx_non_const.SetUseTranspose(
2442 !
matrix.trilinos_matrix().UseTranspose());
2443 const int ierr =
matrix.trilinos_matrix().Apply(tril_src, tril_dst);
2445 tril_mtrx_non_const.SetUseTranspose(
2446 !
matrix.trilinos_matrix().UseTranspose());
2451 ExcMessage(
"Uninitialized TrilinosPayload::inv_vmult called "
2452 "(Matrix constructor with matrix exemplar)"));
2457 ExcMessage(
"Uninitialized TrilinosPayload::inv_Tvmult called "
2458 "(Matrix constructor with matrix exemplar)"));
2467 : use_transpose(matrix_exemplar.trilinos_matrix().UseTranspose())
2469 # ifdef DEAL_II_WITH_MPI
2470 communicator(matrix_exemplar.get_mpi_communicator())
2472 matrix_exemplar.locally_owned_domain_indices().make_trilinos_map(
2473 communicator.Comm()))
2475 matrix_exemplar.locally_owned_range_indices().make_trilinos_map(
2476 communicator.Comm()))
2478 domain_map(
internal::make_serial_Epetra_map(
2479 matrix_exemplar.locally_owned_domain_indices()))
2480 , range_map(
internal::make_serial_Epetra_map(
2481 matrix_exemplar.locally_owned_range_indices()))
2484 vmult = [&matrix_exemplar, &preconditioner](
Range & tril_dst,
2485 const Domain &tril_src) {
2488 Assert(&tril_src != &tril_dst,
2506 Tvmult = [&matrix_exemplar, &preconditioner](
Domain & tril_dst,
2507 const Range &tril_src) {
2510 Assert(&tril_src != &tril_dst,
2533 const Range &tril_src) {
2536 Assert(&tril_src != &tril_dst,
2555 &preconditioner](
Range & tril_dst,
2556 const Domain &tril_src) {
2559 Assert(&tril_src != &tril_dst,
2588 preconditioner_exemplar.trilinos_operator().UseTranspose())
2590 # ifdef DEAL_II_WITH_MPI
2591 communicator(preconditioner_exemplar.get_mpi_communicator())
2592 , domain_map(preconditioner_exemplar.locally_owned_domain_indices()
2593 .make_trilinos_map(communicator.Comm()))
2594 , range_map(preconditioner_exemplar.locally_owned_range_indices()
2595 .make_trilinos_map(communicator.Comm()))
2597 domain_map(
internal::make_serial_Epetra_map(
2598 preconditioner_exemplar.locally_owned_domain_indices()))
2599 , range_map(
internal::make_serial_Epetra_map(
2600 preconditioner_exemplar.locally_owned_range_indices()))
2603 vmult = [&preconditioner_exemplar,
2604 &preconditioner](
Range &tril_dst,
const Domain &tril_src) {
2607 Assert(&tril_src != &tril_dst,
2625 Tvmult = [&preconditioner_exemplar,
2626 &preconditioner](
Domain &tril_dst,
const Range &tril_src) {
2629 Assert(&tril_src != &tril_dst,
2652 &preconditioner](
Domain &tril_dst,
const Range &tril_src) {
2655 Assert(&tril_src != &tril_dst,
2674 &preconditioner](
Range & tril_dst,
2675 const Domain &tril_src) {
2678 Assert(&tril_src != &tril_dst,
2704 : vmult(payload.vmult)
2705 , Tvmult(payload.Tvmult)
2706 , inv_vmult(payload.inv_vmult)
2707 , inv_Tvmult(payload.inv_Tvmult)
2708 , use_transpose(payload.use_transpose)
2709 , communicator(payload.communicator)
2710 , domain_map(payload.domain_map)
2711 , range_map(payload.range_map)
2720 : use_transpose(false)
2723 communicator(first_op.communicator)
2724 , domain_map(second_op.domain_map)
2725 , range_map(first_op.range_map)
2736 tril_dst = tril_src;
2740 tril_dst = tril_src;
2744 tril_dst = tril_src;
2748 tril_dst = tril_src;
2762 const int ierr = tril_dst.PutScalar(0.0);
2768 const int ierr = tril_dst.PutScalar(0.0);
2775 ExcMessage(
"Cannot compute inverse of null operator"));
2777 const int ierr = tril_dst.PutScalar(0.0);
2784 ExcMessage(
"Cannot compute inverse of null operator"));
2786 const int ierr = tril_dst.PutScalar(0.0);
2825 # ifdef DEAL_II_WITH_MPI
2828 return MPI_COMM_SELF;
2890 return "TrilinosPayload";
2948 "Operators are set to work on incompatible IndexSets."));
2952 "Operators are set to work on incompatible IndexSets."));
2957 return_op.
vmult = [first_op, second_op](Range & tril_dst,
2958 const Domain &tril_src) {
2966 i->reinit(
IndexSet(first_op_init_map),
2971 const size_type i_local_size = i->end() - i->begin();
2975 (void)second_op_init_map;
2976 Intermediate tril_int(
View,
2984 second_op.
Apply(tril_src, tril_int);
2985 first_op.
Apply(tril_src, tril_dst);
2986 const int ierr = tril_dst.Update(1.0, tril_int, 1.0);
2990 return_op.
Tvmult = [first_op, second_op](Domain & tril_dst,
2991 const Range &tril_src) {
3006 i->reinit(
IndexSet(first_op_init_map),
3011 const size_type i_local_size = i->end() - i->begin();
3015 (void)second_op_init_map;
3016 Intermediate tril_int(
View,
3024 second_op.
Apply(tril_src, tril_int);
3025 first_op.
Apply(tril_src, tril_dst);
3026 const int ierr = tril_dst.Update(1.0, tril_int, 1.0);
3034 return_op.
inv_vmult = [first_op, second_op](Domain & tril_dst,
3035 const Range &tril_src) {
3043 i->reinit(
IndexSet(first_op_init_map),
3048 const size_type i_local_size = i->end() - i->begin();
3052 (void)second_op_init_map;
3053 Intermediate tril_int(
View,
3063 const int ierr = tril_dst.Update(1.0, tril_int, 1.0);
3067 return_op.
inv_Tvmult = [first_op, second_op](Range & tril_dst,
3068 const Domain &tril_src) {
3083 i->reinit(
IndexSet(first_op_init_map),
3088 const size_type i_local_size = i->end() - i->begin();
3092 (void)second_op_init_map;
3093 Intermediate tril_int(
View,
3103 const int ierr = tril_dst.Update(1.0, tril_int, 1.0);
3127 "Operators are set to work on incompatible IndexSets."));
3132 return_op.
vmult = [first_op, second_op](Range & tril_dst,
3133 const Domain &tril_src) {
3141 i->reinit(
IndexSet(first_op_init_map),
3146 const size_type i_local_size = i->end() - i->begin();
3150 (void)second_op_init_map;
3151 Intermediate tril_int(
View,
3159 second_op.
Apply(tril_src, tril_int);
3160 first_op.
Apply(tril_int, tril_dst);
3163 return_op.
Tvmult = [first_op, second_op](Domain & tril_dst,
3164 const Range &tril_src) {
3179 i->reinit(
IndexSet(first_op_init_map),
3184 const size_type i_local_size = i->end() - i->begin();
3188 (void)second_op_init_map;
3189 Intermediate tril_int(
View,
3196 first_op.
Apply(tril_src, tril_int);
3197 second_op.
Apply(tril_int, tril_dst);
3204 return_op.
inv_vmult = [first_op, second_op](Domain & tril_dst,
3205 const Range &tril_src) {
3213 i->reinit(
IndexSet(first_op_init_map),
3218 const size_type i_local_size = i->end() - i->begin();
3222 (void)second_op_init_map;
3223 Intermediate tril_int(
View,
3235 return_op.
inv_Tvmult = [first_op, second_op](Range & tril_dst,
3236 const Domain &tril_src) {
3251 i->reinit(
IndexSet(first_op_init_map),
3256 const size_type i_local_size = i->end() - i->begin();
3260 (void)second_op_init_map;
3261 Intermediate tril_int(
View,
3289 # include "trilinos_sparse_matrix.inst"
3304 const ::SparsityPattern &,
3320 const ::Vector<double> &)
const;
3325 const ::LinearAlgebra::distributed::Vector<double> &)
const;
3327 # ifdef DEAL_II_WITH_MPI
3328 # ifdef DEAL_II_TRILINOS_WITH_TPETRA
3332 const ::LinearAlgebra::TpetraWrappers::Vector<double> &)
const;
3337 const ::LinearAlgebra::TpetraWrappers::Vector<float> &)
const;
3351 const ::Vector<double> &)
const;
3356 const ::LinearAlgebra::distributed::Vector<double> &)
const;
3358 # ifdef DEAL_II_WITH_MPI
3359 # ifdef DEAL_II_TRILINOS_WITH_TPETRA
3363 const ::LinearAlgebra::TpetraWrappers::Vector<double> &)
const;
3368 const ::LinearAlgebra::TpetraWrappers::Vector<float> &)
const;
3382 const ::Vector<double> &)
const;
3387 const ::LinearAlgebra::distributed::Vector<double> &)
const;
3389 # ifdef DEAL_II_WITH_MPI
3390 # ifdef DEAL_II_TRILINOS_WITH_TPETRA
3394 const ::LinearAlgebra::TpetraWrappers::Vector<double> &)
const;
3399 const ::LinearAlgebra::TpetraWrappers::Vector<float> &)
const;
3413 const ::Vector<double> &)
const;
3418 const ::LinearAlgebra::distributed::Vector<double> &)
const;
3420 # ifdef DEAL_II_WITH_MPI
3421 # ifdef DEAL_II_TRILINOS_WITH_TPETRA
3425 const ::LinearAlgebra::TpetraWrappers::Vector<double> &)
const;
3430 const ::LinearAlgebra::TpetraWrappers::Vector<float> &)
const;
3443 #endif // DEAL_II_WITH_TRILINOS