Reference documentation for deal.II version GIT 6da2e5d553 2022-07-01 18:55:02+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
trilinos_block_vector.cc
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2008 - 2021 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
17 
18 #ifdef DEAL_II_WITH_TRILINOS
19 
22 
23 
25 
26 namespace TrilinosWrappers
27 {
28  namespace MPI
29  {
30  BlockVector &
32  {
34  return *this;
35  }
36 
37 
38 
39  BlockVector &
41  {
42  // we only allow assignment to vectors with the same number of blocks
43  // or to an empty BlockVector
44  Assert(n_blocks() == 0 || n_blocks() == v.n_blocks(),
46 
47  if (this->n_blocks() != v.n_blocks())
48  reinit(v.n_blocks());
49 
50  for (size_type i = 0; i < this->n_blocks(); ++i)
51  this->components[i] = v.block(i);
52 
53  collect_sizes();
54 
55  return *this;
56  }
57 
58 
59 
60  BlockVector &
62  {
63  swap(v);
64  return *this;
65  }
66 
67 
68 
69  void
70  BlockVector::reinit(const std::vector<IndexSet> &parallel_partitioning,
71  const MPI_Comm & communicator,
72  const bool omit_zeroing_entries)
73  {
74  const size_type no_blocks = parallel_partitioning.size();
75  std::vector<size_type> block_sizes(no_blocks);
76 
77  for (size_type i = 0; i < no_blocks; ++i)
78  {
79  block_sizes[i] = parallel_partitioning[i].size();
80  }
81 
82  this->block_indices.reinit(block_sizes);
83  if (components.size() != n_blocks())
84  components.resize(n_blocks());
85 
86  for (size_type i = 0; i < n_blocks(); ++i)
87  components[i].reinit(parallel_partitioning[i],
88  communicator,
89  omit_zeroing_entries);
90 
91  collect_sizes();
92  }
93 
94 
95 
96  void
97  BlockVector::reinit(const std::vector<IndexSet> &parallel_partitioning,
98  const std::vector<IndexSet> &ghost_values,
99  const MPI_Comm & communicator,
100  const bool vector_writable)
101  {
102  const size_type no_blocks = parallel_partitioning.size();
103  std::vector<size_type> block_sizes(no_blocks);
104 
105  for (size_type i = 0; i < no_blocks; ++i)
106  {
107  block_sizes[i] = parallel_partitioning[i].size();
108  }
109 
110  this->block_indices.reinit(block_sizes);
111  if (components.size() != n_blocks())
112  components.resize(n_blocks());
113 
114  for (size_type i = 0; i < n_blocks(); ++i)
115  components[i].reinit(parallel_partitioning[i],
116  ghost_values[i],
117  communicator,
118  vector_writable);
119 
120  collect_sizes();
121  }
122 
123 
124 
125  void
126  BlockVector::reinit(const BlockVector &v, const bool omit_zeroing_entries)
127  {
129  if (components.size() != n_blocks())
130  components.resize(n_blocks());
131 
132  for (size_type i = 0; i < n_blocks(); ++i)
133  components[i].reinit(v.block(i), omit_zeroing_entries, false);
134 
135  collect_sizes();
136  }
137 
138 
139 
140  void
141  BlockVector::reinit(const size_type num_blocks)
142  {
143  std::vector<size_type> block_sizes(num_blocks, 0);
144  this->block_indices.reinit(block_sizes);
145  if (this->components.size() != this->n_blocks())
146  this->components.resize(this->n_blocks());
147 
148  for (size_type i = 0; i < this->n_blocks(); ++i)
149  components[i].clear();
150 
151  collect_sizes();
152  }
153 
154 
155 
156  void
159  const BlockVector & v)
160  {
161  Assert(m.n_block_rows() == v.n_blocks(),
163  Assert(m.n_block_cols() == v.n_blocks(),
165 
166  if (v.n_blocks() != n_blocks())
167  {
169  components.resize(v.n_blocks());
170  }
171 
172  for (size_type i = 0; i < this->n_blocks(); ++i)
174 
175  collect_sizes();
176  }
177 
178 
179 
180  void
181  BlockVector::print(std::ostream & out,
182  const unsigned int precision,
183  const bool scientific,
184  const bool across) const
185  {
186  for (size_type i = 0; i < this->n_blocks(); ++i)
187  {
188  if (across)
189  out << 'C' << i << ':';
190  else
191  out << "Component " << i << std::endl;
192  this->components[i].print(out, precision, scientific, across);
193  }
194  }
195 
196  } /* end of namespace MPI */
197 } /* end of namespace TrilinosWrappers */
198 
199 
201 
202 #endif
void reinit(const unsigned int n_blocks, const size_type n_elements_per_block)
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:442
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:443
const BlockIndices & get_block_indices() const
unsigned int n_blocks() const
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
BlockVectorBase & operator=(const value_type s)
#define Assert(cond, exc)
Definition: exceptions.h:1473
std::vector< MPI::Vector > components
BlockType & block(const unsigned int i)
BlockType & block(const unsigned int row, const unsigned int column)
unsigned int n_block_rows() const
unsigned int n_block_cols() const
void print(std::ostream &out, const unsigned int precision=3, const bool scientific=true, const bool across=true) const
void swap(BlockVector &u, BlockVector &v)
void reinit(const std::vector< IndexSet > &parallel_partitioning, const MPI_Comm &communicator=MPI_COMM_WORLD, const bool omit_zeroing_entries=false)
BlockVector & operator=(const value_type s)
void import_nonlocal_data_for_fe(const TrilinosWrappers::BlockSparseMatrix &m, const BlockVector &v)