Loading [MathJax]/extensions/TeX/newcommand.js
 Reference documentation for deal.II version 9.3.3
\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}} \newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=} \newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]} \newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
trilinos_block_vector.cc
Go to the documentation of this file.
1// ---------------------------------------------------------------------
2//
3// Copyright (C) 2008 - 2018 by the deal.II authors
4//
5// This file is part of the deal.II library.
6//
7// The deal.II library is free software; you can use it, redistribute
8// it, and/or modify it under the terms of the GNU Lesser General
9// Public License as published by the Free Software Foundation; either
10// version 2.1 of the License, or (at your option) any later version.
11// The full text of the license can be found in the file LICENSE.md at
12// the top level directory of deal.II.
13//
14// ---------------------------------------------------------------------
15
17
18#ifdef DEAL_II_WITH_TRILINOS
19
22
23
25
26namespace TrilinosWrappers
27{
28 namespace MPI
29 {
32 {
34 return *this;
35 }
36
37
38
41 {
42 // we only allow assignment to vectors with the same number of blocks
43 // or to an empty BlockVector
44 Assert(n_blocks() == 0 || n_blocks() == v.n_blocks(),
46
47 if (this->n_blocks() != v.n_blocks())
48 reinit(v.n_blocks());
49
50 for (size_type i = 0; i < this->n_blocks(); ++i)
51 this->components[i] = v.block(i);
52
54
55 return *this;
56 }
57
58
59
62 {
63 swap(v);
64 return *this;
65 }
66
67
68
69 void
70 BlockVector::reinit(const std::vector<IndexSet> &parallel_partitioning,
71 const MPI_Comm & communicator,
72 const bool omit_zeroing_entries)
73 {
74 const size_type no_blocks = parallel_partitioning.size();
75 std::vector<size_type> block_sizes(no_blocks);
76
77 for (size_type i = 0; i < no_blocks; ++i)
78 {
79 block_sizes[i] = parallel_partitioning[i].size();
80 }
81
82 this->block_indices.reinit(block_sizes);
83 if (components.size() != n_blocks())
84 components.resize(n_blocks());
85
86 for (size_type i = 0; i < n_blocks(); ++i)
87 components[i].reinit(parallel_partitioning[i],
88 communicator,
89 omit_zeroing_entries);
90
92 }
93
94 void
95 BlockVector::reinit(const std::vector<IndexSet> &parallel_partitioning,
96 const std::vector<IndexSet> &ghost_values,
97 const MPI_Comm & communicator,
98 const bool vector_writable)
99 {
100 const size_type no_blocks = parallel_partitioning.size();
101 std::vector<size_type> block_sizes(no_blocks);
102
103 for (size_type i = 0; i < no_blocks; ++i)
104 {
105 block_sizes[i] = parallel_partitioning[i].size();
106 }
107
108 this->block_indices.reinit(block_sizes);
109 if (components.size() != n_blocks())
110 components.resize(n_blocks());
111
112 for (size_type i = 0; i < n_blocks(); ++i)
113 components[i].reinit(parallel_partitioning[i],
114 ghost_values[i],
115 communicator,
116 vector_writable);
117
119 }
120
121
122 void
123 BlockVector::reinit(const BlockVector &v, const bool omit_zeroing_entries)
124 {
126 if (components.size() != n_blocks())
127 components.resize(n_blocks());
128
129 for (size_type i = 0; i < n_blocks(); ++i)
130 components[i].reinit(v.block(i), omit_zeroing_entries, false);
131
133 }
134
135
136
137 void
139 {
140 std::vector<size_type> block_sizes(num_blocks, 0);
141 this->block_indices.reinit(block_sizes);
142 if (this->components.size() != this->n_blocks())
143 this->components.resize(this->n_blocks());
144
145 for (size_type i = 0; i < this->n_blocks(); ++i)
146 components[i].clear();
147
149 }
150
151
152
153 void
156 const BlockVector & v)
157 {
158 Assert(m.n_block_rows() == v.n_blocks(),
160 Assert(m.n_block_cols() == v.n_blocks(),
162
163 if (v.n_blocks() != n_blocks())
164 {
166 components.resize(v.n_blocks());
167 }
168
169 for (size_type i = 0; i < this->n_blocks(); ++i)
171
173 }
174
175
176
177 void
178 BlockVector::print(std::ostream & out,
179 const unsigned int precision,
180 const bool scientific,
181 const bool across) const
182 {
183 for (size_type i = 0; i < this->n_blocks(); ++i)
184 {
185 if (across)
186 out << 'C' << i << ':';
187 else
188 out << "Component " << i << std::endl;
189 this->components[i].print(out, precision, scientific, across);
190 }
191 }
192
193 } /* end of namespace MPI */
194} /* end of namespace TrilinosWrappers */
195
196
198
199#endif
void reinit(const unsigned int n_blocks, const size_type n_elements_per_block)
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:402
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:403
unsigned int n_blocks() const
BlockVectorBase & operator=(const value_type s)
#define Assert(cond, exc)
Definition: exceptions.h:1465
std::vector< MPI::Vector > components
BlockType & block(const unsigned int i)
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
const BlockIndices & get_block_indices() const
unsigned int n_block_rows() const
unsigned int n_block_cols() const
BlockType & block(const unsigned int row, const unsigned int column)
void print(std::ostream &out, const unsigned int precision=3, const bool scientific=true, const bool across=true) const
void swap(BlockVector &u, BlockVector &v)
void reinit(const std::vector< IndexSet > &parallel_partitioning, const MPI_Comm &communicator=MPI_COMM_WORLD, const bool omit_zeroing_entries=false)
BlockVector & operator=(const value_type s)
void import_nonlocal_data_for_fe(const TrilinosWrappers::BlockSparseMatrix &m, const BlockVector &v)