Reference documentation for deal.II version 9.5.0
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
petsc_parallel_vector.cc
Go to the documentation of this file.
1// ---------------------------------------------------------------------
2//
3// Copyright (C) 2004 - 2023 by the deal.II authors
4//
5// This file is part of the deal.II library.
6//
7// The deal.II library is free software; you can use it, redistribute
8// it, and/or modify it under the terms of the GNU Lesser General
9// Public License as published by the Free Software Foundation; either
10// version 2.1 of the License, or (at your option) any later version.
11// The full text of the license can be found in the file LICENSE.md at
12// the top level directory of deal.II.
13//
14// ---------------------------------------------------------------------
15
16#include <deal.II/base/mpi.h>
17
19
20#ifdef DEAL_II_WITH_PETSC
21
22# include <algorithm>
23# include <cmath>
24
26
27namespace PETScWrappers
28{
29 namespace MPI
30 {
32 {
33 // virtual functions called in constructors and destructors never use the
34 // override in a derived class
35 // for clarity be explicit on which function is called
36 Vector::create_vector(MPI_COMM_SELF, 0, 0);
37 }
38
39
40
41 Vector::Vector(const MPI_Comm communicator,
42 const size_type n,
43 const size_type locally_owned_size)
44 {
46 }
47
48
49
51 const IndexSet &ghost,
52 const MPI_Comm communicator)
53 {
54 Assert(local.is_ascending_and_one_to_one(communicator),
56
57 IndexSet ghost_set = ghost;
58 ghost_set.subtract_set(local);
59
60 Vector::create_vector(communicator,
61 local.size(),
62 local.n_elements(),
63 ghost_set);
64 }
65
66
67
69 : VectorBase()
70 {
71 if (v.has_ghost_elements())
73 v.size(),
76 else
78 v.size(),
80
81 this->operator=(v);
82 }
83
84
85
86 Vector::Vector(const IndexSet &local, const MPI_Comm communicator)
87 {
88 Assert(local.is_ascending_and_one_to_one(communicator),
90 Vector::create_vector(communicator, local.size(), local.n_elements());
91 }
92
93
94
95 Vector &
97 {
98 // make sure left- and right-hand side of the assignment are
99 // compress()'ed:
101 internal::VectorReference::ExcWrongMode(VectorOperation::unknown,
102 v.last_action));
104 internal::VectorReference::ExcWrongMode(VectorOperation::unknown,
105 last_action));
106
107 // if the vectors have different sizes,
108 // then first resize the present one
109 if (size() != v.size())
110 {
111 if (v.has_ghost_elements())
115 else
117 v.size(),
119 true);
120 }
121
122 PetscErrorCode ierr = VecCopy(v.vector, vector);
123 AssertThrow(ierr == 0, ExcPETScError(ierr));
124
125 if (has_ghost_elements())
126 {
127 ierr = VecGhostUpdateBegin(vector, INSERT_VALUES, SCATTER_FORWARD);
128 AssertThrow(ierr == 0, ExcPETScError(ierr));
129 ierr = VecGhostUpdateEnd(vector, INSERT_VALUES, SCATTER_FORWARD);
130 AssertThrow(ierr == 0, ExcPETScError(ierr));
131 }
132 return *this;
133 }
134
135
136
137 void
139 {
141
142 create_vector(MPI_COMM_SELF, 0, 0);
143 }
144
145
146
147 void
148 Vector::reinit(const MPI_Comm communicator,
149 const size_type n,
150 const size_type local_sz,
151 const bool omit_zeroing_entries)
152 {
153 // only do something if the sizes
154 // mismatch (may not be true for every proc)
155
156 int k_global, k = ((size() != n) || (locally_owned_size() != local_sz));
157 {
158 const int ierr =
159 MPI_Allreduce(&k, &k_global, 1, MPI_INT, MPI_LOR, communicator);
160 AssertThrowMPI(ierr);
161 }
162
163 if (k_global || has_ghost_elements())
164 {
165 // FIXME: I'd like to use this here,
166 // but somehow it leads to odd errors
167 // somewhere down the line in some of
168 // the tests:
169 // const PetscErrorCode ierr = VecSetSizes (vector, n, n);
170 // AssertThrow (ierr == 0, ExcPETScError(ierr));
171
172 // so let's go the slow way:
173
174 const PetscErrorCode ierr = VecDestroy(&vector);
175 AssertThrow(ierr == 0, ExcPETScError(ierr));
176
177 create_vector(communicator, n, local_sz);
178 }
179
180 // finally clear the new vector if so
181 // desired
182 if (omit_zeroing_entries == false)
183 *this = 0;
184 }
185
186
187
188 void
189 Vector::reinit(const Vector &v, const bool omit_zeroing_entries)
190 {
191 if (v.has_ghost_elements())
192 {
196 if (!omit_zeroing_entries)
197 {
198 const PetscErrorCode ierr = VecSet(vector, 0.0);
199 AssertThrow(ierr == 0, ExcPETScError(ierr));
200 }
201 }
202 else
204 v.size(),
206 omit_zeroing_entries);
207 }
208
209
210
211 void
213 const IndexSet &ghost,
214 const MPI_Comm comm)
215 {
216 const PetscErrorCode ierr = VecDestroy(&vector);
217 AssertThrow(ierr == 0, ExcPETScError(ierr));
218
220
221 IndexSet ghost_set = ghost;
222 ghost_set.subtract_set(local);
223
224 create_vector(comm, local.size(), local.n_elements(), ghost_set);
225 }
226
227 void
229 {
230 const PetscErrorCode ierr = VecDestroy(&vector);
231 AssertThrow(ierr == 0, ExcPETScError(ierr));
232
234 Assert(local.size() > 0, ExcMessage("can not create vector of size 0."));
235 create_vector(comm, local.size(), local.n_elements());
236 }
237
238 void
240 const std::shared_ptr<const Utilities::MPI::Partitioner> &partitioner,
241 const bool make_ghosted)
242 {
243 if (make_ghosted)
244 {
245 Assert(partitioner->ghost_indices_initialized(),
246 ExcMessage("You asked to create a ghosted vector, but the "
247 "partitioner does not provide ghost indices."));
248
249 this->reinit(partitioner->locally_owned_range(),
250 partitioner->ghost_indices(),
251 partitioner->get_mpi_communicator());
252 }
253 else
254 {
255 this->reinit(partitioner->locally_owned_range(),
256 partitioner->get_mpi_communicator());
257 }
258 }
259
260
261 void
262 Vector::create_vector(const MPI_Comm communicator,
263 const size_type n,
264 const size_type locally_owned_size)
265 {
266 (void)n;
268 ghosted = false;
269
270 const PetscErrorCode ierr = VecCreateMPI(communicator,
272 PETSC_DETERMINE,
273 &vector);
274 AssertThrow(ierr == 0, ExcPETScError(ierr));
275
276 Assert(size() == n, ExcDimensionMismatch(size(), n));
277 }
278
279
280
281 void
282 Vector::create_vector(const MPI_Comm communicator,
283 const size_type n,
284 const size_type locally_owned_size,
285 const IndexSet &ghostnodes)
286 {
287 (void)n;
289 ghosted = true;
290 ghost_indices = ghostnodes;
291
292 const std::vector<size_type> ghostindices = ghostnodes.get_index_vector();
293
294 const PetscInt *ptr =
295 (ghostindices.size() > 0 ?
296 reinterpret_cast<const PetscInt *>(ghostindices.data()) :
297 nullptr);
298
299 PetscErrorCode ierr = VecCreateGhost(communicator,
301 PETSC_DETERMINE,
302 ghostindices.size(),
303 ptr,
304 &vector);
305 AssertThrow(ierr == 0, ExcPETScError(ierr));
306
307 Assert(size() == n, ExcDimensionMismatch(size(), n));
308
309# ifdef DEBUG
310 {
311 // test ghost allocation in debug mode
312 PetscInt begin, end;
313
314 ierr = VecGetOwnershipRange(vector, &begin, &end);
315 AssertThrow(ierr == 0, ExcPETScError(ierr));
316
318 static_cast<size_type>(end - begin));
319
320 Vec l;
321 ierr = VecGhostGetLocalForm(vector, &l);
322 AssertThrow(ierr == 0, ExcPETScError(ierr));
323
324 PetscInt lsize;
325 ierr = VecGetSize(l, &lsize);
326 AssertThrow(ierr == 0, ExcPETScError(ierr));
327
328 ierr = VecGhostRestoreLocalForm(vector, &l);
329 AssertThrow(ierr == 0, ExcPETScError(ierr));
330
331 AssertDimension(lsize,
332 end - begin +
333 static_cast<PetscInt>(ghost_indices.n_elements()));
334 }
335# endif
336 }
337
338
339
340 bool
342 {
343 unsigned int has_nonzero = VectorBase::all_zero() ? 0 : 1;
344# ifdef DEAL_II_WITH_MPI
345 // in parallel, check that the vector
346 // is zero on _all_ processors.
347 unsigned int num_nonzero =
348 Utilities::MPI::sum(has_nonzero, this->get_mpi_communicator());
349 return num_nonzero == 0;
350# else
351 return has_nonzero == 0;
352# endif
353 }
354
355
356 void
357 Vector::print(std::ostream & out,
358 const unsigned int precision,
359 const bool scientific,
360 const bool across) const
361 {
362 AssertThrow(out.fail() == false, ExcIO());
363
364 // get a representation of the vector and
365 // loop over all the elements
366 const PetscScalar *val;
367 PetscInt nlocal, istart, iend;
368
369 PetscErrorCode ierr = VecGetArrayRead(vector, &val);
370 AssertThrow(ierr == 0, ExcPETScError(ierr));
371
372 ierr = VecGetLocalSize(vector, &nlocal);
373 AssertThrow(ierr == 0, ExcPETScError(ierr));
374
375 ierr = VecGetOwnershipRange(vector, &istart, &iend);
376 AssertThrow(ierr == 0, ExcPETScError(ierr));
377
378 // save the state of out stream
379 std::ios::fmtflags old_flags = out.flags();
380 unsigned int old_precision = out.precision(precision);
381
382 out.precision(precision);
383 if (scientific)
384 out.setf(std::ios::scientific, std::ios::floatfield);
385 else
386 out.setf(std::ios::fixed, std::ios::floatfield);
387
388 // let each processor produce its output in turn. this requires
389 // synchronizing output between processors using a barrier --
390 // which is clearly slow, but nobody is going to print a whole
391 // matrix this way on a regular basis for production runs, so
392 // the slowness of the barrier doesn't matter
393 MPI_Comm communicator = this->get_mpi_communicator();
394 for (unsigned int i = 0;
395 i < Utilities::MPI::n_mpi_processes(communicator);
396 i++)
397 {
398 const int mpi_ierr = MPI_Barrier(communicator);
399 AssertThrowMPI(mpi_ierr);
400
401 if (i == Utilities::MPI::this_mpi_process(communicator))
402 {
403 if (across)
404 {
405 out << "[Proc" << i << " " << istart << "-" << iend - 1 << "]"
406 << ' ';
407 for (PetscInt i = 0; i < nlocal; ++i)
408 out << val[i] << ' ';
409 }
410 else
411 {
412 out << "[Proc " << i << " " << istart << "-" << iend - 1
413 << "]" << std::endl;
414 for (PetscInt i = 0; i < nlocal; ++i)
415 out << val[i] << std::endl;
416 }
417 out << std::endl;
418 }
419 }
420 // reset output format
421 out.flags(old_flags);
422 out.precision(old_precision);
423
424 // restore the representation of the
425 // vector
426 ierr = VecRestoreArrayRead(vector, &val);
427 AssertThrow(ierr == 0, ExcPETScError(ierr));
428
429 AssertThrow(out.fail() == false, ExcIO());
430 }
431
432 } // namespace MPI
433
434} // namespace PETScWrappers
435
437
438#endif // DEAL_II_WITH_PETSC
bool is_ascending_and_one_to_one(const MPI_Comm communicator) const
Definition index_set.cc:878
size_type size() const
Definition index_set.h:1661
size_type n_elements() const
Definition index_set.h:1816
void subtract_set(const IndexSet &other)
Definition index_set.cc:268
std::vector< size_type > get_index_vector() const
Definition index_set.cc:690
void print(std::ostream &out, const unsigned int precision=3, const bool scientific=true, const bool across=true) const
Vector & operator=(const Vector &v)
virtual void create_vector(const MPI_Comm comm, const size_type n, const size_type locally_owned_size)
void reinit(const MPI_Comm communicator, const size_type N, const size_type locally_owned_size, const bool omit_zeroing_entries=false)
VectorOperation::values last_action
IndexSet locally_owned_elements() const
MPI_Comm get_mpi_communicator() const
bool has_ghost_elements() const
size_type locally_owned_size() const
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:472
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:473
static ::ExceptionBase & ExcIO()
static ::ExceptionBase & ExcNotImplemented()
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertThrowMPI(error_code)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
T sum(const T &t, const MPI_Comm mpi_communicator)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition mpi.cc:150
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:161
const MPI_Comm comm