deal.II version GIT relicensing-2289-g1e5549a87a 2024-12-21 21:30:00+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
petsc_vector.h
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2004 - 2024 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
15#ifndef dealii_petsc_vector_h
16#define dealii_petsc_vector_h
17
18
19#include <deal.II/base/config.h>
20
21#ifdef DEAL_II_WITH_PETSC
22
26
29# include <deal.II/lac/vector.h>
32
34
35
40namespace PETScWrappers
41{
48 namespace MPI
49 {
157 class Vector : public VectorBase
158 {
159 public:
164
168 Vector();
169
174
191 explicit Vector(const MPI_Comm communicator,
192 const size_type n,
194
205 template <typename Number>
206 explicit Vector(const MPI_Comm communicator,
207 const ::Vector<Number> &v,
209
233 Vector(const IndexSet &local,
234 const IndexSet &ghost,
235 const MPI_Comm communicator);
236
248 explicit Vector(const IndexSet &local, const MPI_Comm communicator);
249
253 Vector(const Vector &v);
254
259 virtual void
260 clear() override;
261
290 Vector &
291 operator=(const Vector &v);
292
299 Vector &
300 operator=(const PetscScalar s);
301
311 template <typename number>
312 Vector &
313 operator=(const ::Vector<number> &v);
314
315 using VectorBase::reinit;
316
333 void
334 reinit(const MPI_Comm communicator,
335 const size_type N,
337 const bool omit_zeroing_entries = false);
338
348 void
349 reinit(const Vector &v, const bool omit_zeroing_entries = false);
350
358 void
359 reinit(const IndexSet &local,
360 const IndexSet &ghost,
361 const MPI_Comm communicator);
362
370 void
371 reinit(const IndexSet &local, const MPI_Comm communicator);
372
380 void
381 reinit(
382 const std::shared_ptr<const Utilities::MPI::Partitioner> &partitioner,
383 const bool make_ghosted = true);
384
396 void
397 print(std::ostream &out,
398 const unsigned int precision = 3,
399 const bool scientific = true,
400 const bool across = true) const;
401
408 bool
409 all_zero() const;
410
411 protected:
418 virtual void
420 const size_type n,
422
423
424
430 virtual void
432 const size_type n,
434 const IndexSet &ghostnodes);
435 };
436
437
438 // ------------------ template and inline functions -------------
439
440
448 inline void
449 swap(Vector &u, Vector &v) noexcept
450 {
451 u.swap(v);
452 }
453
454
455# ifndef DOXYGEN
456
457 template <typename number>
458 Vector::Vector(const MPI_Comm communicator,
459 const ::Vector<number> &v,
461 {
462 Vector::create_vector(communicator, v.size(), locally_owned_size);
463
464 *this = v;
465 }
466
467
468
469 inline Vector &
470 Vector::operator=(const PetscScalar s)
471 {
473
474 return *this;
475 }
476
477
478
479 template <typename number>
480 inline Vector &
481 Vector::operator=(const ::Vector<number> &v)
482 {
483 Assert(size() == v.size(), ExcDimensionMismatch(size(), v.size()));
484
485 // FIXME: the following isn't necessarily fast, but this is due to
486 // the fact that PETSc doesn't offer an inlined access operator.
487 //
488 // if someone wants to contribute some code: to make this code
489 // faster, one could either first convert all values to PetscScalar,
490 // and then set them all at once using VecSetValues. This has the
491 // drawback that it could take quite some memory, if the vector is
492 // large, and it would in addition allocate memory on the heap, which
493 // is expensive. an alternative would be to split the vector into
494 // chunks of, say, 128 elements, convert a chunk at a time and set it
495 // in the output vector using VecSetValues. since 128 elements is
496 // small enough, this could easily be allocated on the stack (as a
497 // local variable) which would make the whole thing much more
498 // efficient.
499 //
500 // a second way to make things faster is for the special case that
501 // number==PetscScalar. we could then declare a specialization of
502 // this template, and omit the conversion. the problem with this is
503 // that the best we can do is to use VecSetValues, but this isn't
504 // very efficient either: it wants to see an array of indices, which
505 // in this case a) again takes up a whole lot of memory on the heap,
506 // and b) is totally dumb since its content would simply be the
507 // sequence 0,1,2,3,...,n. the best of all worlds would probably be a
508 // function in PETSc that would take a pointer to an array of
509 // PetscScalar values and simply copy n elements verbatim into the
510 // vector...
511 for (size_type i = 0; i < v.size(); ++i)
512 (*this)(i) = v(i);
513
515
516 return *this;
517 }
518
519
520
521# endif // DOXYGEN
522 } // namespace MPI
523} // namespace PETScWrappers
524
525namespace internal
526{
527 namespace LinearOperatorImplementation
528 {
529 template <typename>
530 class ReinitHelper;
531
536 template <>
537 class ReinitHelper<PETScWrappers::MPI::Vector>
538 {
539 public:
540 template <typename Matrix>
541 static void
542 reinit_range_vector(const Matrix &matrix,
544 bool /*omit_zeroing_entries*/)
545 {
546 v.reinit(matrix.locally_owned_range_indices(),
547 matrix.get_mpi_communicator());
548 }
549
550 template <typename Matrix>
551 static void
552 reinit_domain_vector(const Matrix &matrix,
554 bool /*omit_zeroing_entries*/)
555 {
556 v.reinit(matrix.locally_owned_domain_indices(),
557 matrix.get_mpi_communicator());
558 }
559 };
560
561 } // namespace LinearOperatorImplementation
562} /* namespace internal */
563
570template <>
571struct is_serial_vector<PETScWrappers::MPI::Vector> : std::false_type
572{};
573
574
576
577#endif // DEAL_II_WITH_PETSC
578
579#endif
types::global_dof_index size_type
void print(std::ostream &out, const unsigned int precision=3, const bool scientific=true, const bool across=true) const
void swap(Vector &u, Vector &v) noexcept
Vector & operator=(const ::Vector< number > &v)
Vector & operator=(const Vector &v)
virtual void create_vector(const MPI_Comm comm, const size_type n, const size_type locally_owned_size)
Vector & operator=(const PetscScalar s)
void reinit(const MPI_Comm communicator, const size_type N, const size_type locally_owned_size, const bool omit_zeroing_entries=false)
Vector(const MPI_Comm communicator, const ::Vector< Number > &v, const size_type locally_owned_size)
void compress(const VectorOperation::values operation)
size_type locally_owned_size() const
VectorBase & operator=(const VectorBase &)
size_type size() const override
static void reinit_domain_vector(const Matrix &matrix, PETScWrappers::MPI::Vector &v, bool)
static void reinit_range_vector(const Matrix &matrix, PETScWrappers::MPI::Vector &v, bool)
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:498
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:499
#define Assert(cond, exc)
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
const MPI_Comm comm
Definition mpi.cc:913
types::global_dof_index locally_owned_size
Definition mpi.cc:822
unsigned int global_dof_index
Definition types.h:81