deal.II version GIT relicensing-2167-g9622207b8f 2024-11-21 12:40:00+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
trilinos_block_vector.cc
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2008 - 2023 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
16
17#ifdef DEAL_II_WITH_TRILINOS
18
21
22
24
25namespace TrilinosWrappers
26{
27 namespace MPI
28 {
31 {
33 return *this;
34 }
35
36
37
40 {
41 // we only allow assignment to vectors with the same number of blocks
42 // or to an empty BlockVector
43 Assert(this->n_blocks() == 0 || this->n_blocks() == v.n_blocks(),
45
46 if (this->n_blocks() != v.n_blocks())
48
49 this->components.resize(this->n_blocks());
50 for (unsigned int i = 0; i < this->n_blocks(); ++i)
51 this->components[i] = v.components[i];
52
53 this->collect_sizes();
54
55 return *this;
56 }
57
58
59
62 {
63 swap(v);
64 return *this;
65 }
66
67
68
69 void
70 BlockVector::reinit(const std::vector<IndexSet> &parallel_partitioning,
71 const MPI_Comm communicator,
72 const bool omit_zeroing_entries)
73 {
74 // update the number of blocks
75 this->block_indices.reinit(parallel_partitioning.size(), 0);
76
77 // initialize each block
78 this->components.resize(this->n_blocks());
79 for (unsigned int i = 0; i < this->n_blocks(); ++i)
80 this->components[i].reinit(parallel_partitioning[i],
81 communicator,
82 omit_zeroing_entries);
83
84 // update block_indices content
85 this->collect_sizes();
86 }
87
88
89
90 void
91 BlockVector::reinit(const std::vector<IndexSet> &parallel_partitioning,
92 const std::vector<IndexSet> &ghost_values,
93 const MPI_Comm communicator,
94 const bool vector_writable)
95 {
96 AssertDimension(parallel_partitioning.size(), ghost_values.size());
97
98 // update the number of blocks
99 this->block_indices.reinit(parallel_partitioning.size(), 0);
100
101 // initialize each block
102 this->components.resize(this->n_blocks());
103 for (unsigned int i = 0; i < this->n_blocks(); ++i)
104 this->components[i].reinit(parallel_partitioning[i],
105 ghost_values[i],
106 communicator,
107 vector_writable);
108
109 // update block_indices content
110 this->collect_sizes();
111 }
112
113
114
115 void
117 const std::vector<std::shared_ptr<const Utilities::MPI::Partitioner>>
118 &partitioners,
119 const bool make_ghosted,
120 const bool vector_writable)
121 {
122 // update the number of blocks
123 this->block_indices.reinit(partitioners.size(), 0);
124
125 // initialize each block
126 this->components.resize(this->n_blocks());
127 for (unsigned int i = 0; i < this->n_blocks(); ++i)
128 this->components[i].reinit(partitioners[i],
129 make_ghosted,
130 vector_writable);
131
132 // update block_indices content
133 this->collect_sizes();
134 }
135
136
137
138 void
139 BlockVector::reinit(const BlockVector &v, const bool omit_zeroing_entries)
140 {
141 if (this->n_blocks() != v.n_blocks())
143
144 this->components.resize(this->n_blocks());
145 for (unsigned int i = 0; i < this->n_blocks(); ++i)
146 this->components[i].reinit(v.components[i],
147 omit_zeroing_entries,
148 false);
149
150 this->collect_sizes();
151 }
152
153
154
155 void
157 {
158 this->block_indices.reinit(num_blocks, 0);
159
160 this->components.resize(this->n_blocks());
161 for (auto &c : this->components)
162 c.clear();
163 }
164
165
166
167 void
170 const BlockVector &v)
171 {
174
175 if (this->n_blocks() != v.n_blocks())
177
178 this->components.resize(this->n_blocks());
179 for (unsigned int i = 0; i < this->n_blocks(); ++i)
181 v.block(i));
182
183 this->collect_sizes();
184 }
185
186
187
188 void
189 BlockVector::print(std::ostream &out,
190 const unsigned int precision,
191 const bool scientific,
192 const bool across) const
193 {
194 for (unsigned int i = 0; i < this->n_blocks(); ++i)
195 {
196 if (across)
197 out << 'C' << i << ':';
198 else
199 out << "Component " << i << std::endl;
200 this->components[i].print(out, precision, scientific, across);
201 }
202 }
203
204 } /* end of namespace MPI */
205} /* end of namespace TrilinosWrappers */
206
207
209
210#endif
void reinit(const unsigned int n_blocks, const size_type n_elements_per_block)
unsigned int n_block_rows() const
unsigned int n_block_cols() const
BlockType & block(const unsigned int row, const unsigned int column)
unsigned int n_blocks() const
BlockVectorBase & operator=(const value_type s)
std::vector< MPI::Vector > components
BlockType & block(const unsigned int i)
const BlockIndices & get_block_indices() const
void print(std::ostream &out, const unsigned int precision=3, const bool scientific=true, const bool across=true) const
BlockVector & operator=(const value_type s)
void reinit(const std::vector< IndexSet > &parallel_partitioning, const MPI_Comm communicator=MPI_COMM_WORLD, const bool omit_zeroing_entries=false)
void import_nonlocal_data_for_fe(const TrilinosWrappers::BlockSparseMatrix &m, const BlockVector &v)
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:498
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:499
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
void swap(BlockVector &u, BlockVector &v) noexcept