Reference documentation for deal.II version Git c633c6cdfb 2022-01-24 16:30:41 -0500
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
petsc_vector.h
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2004 - 2021 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 #ifndef dealii_petsc_vector_h
17 # define dealii_petsc_vector_h
18 
19 
20 # include <deal.II/base/config.h>
21 
22 # ifdef DEAL_II_WITH_PETSC
23 
24 # include <deal.II/base/index_set.h>
27 
28 # include <deal.II/lac/exceptions.h>
30 # include <deal.II/lac/vector.h>
33 
35 
36 
40 namespace PETScWrappers
41 {
48  namespace MPI
49  {
157  class Vector : public VectorBase
158  {
159  public:
164 
168  Vector();
169 
186  explicit Vector(const MPI_Comm &communicator,
187  const size_type n,
189 
200  template <typename Number>
201  explicit Vector(const MPI_Comm & communicator,
202  const ::Vector<Number> &v,
204 
205 
219  explicit Vector(const MPI_Comm & communicator,
220  const VectorBase &v,
221  const size_type local_size);
222 
246  Vector(const IndexSet &local,
247  const IndexSet &ghost,
248  const MPI_Comm &communicator);
249 
261  explicit Vector(const IndexSet &local, const MPI_Comm &communicator);
262 
266  Vector(const Vector &v);
267 
272  virtual void
273  clear() override;
274 
279  Vector &
280  operator=(const Vector &v);
281 
288  Vector &
289  operator=(const PetscScalar s);
290 
300  template <typename number>
301  Vector &
302  operator=(const ::Vector<number> &v);
303 
320  void
322  const size_type N,
324  const bool omit_zeroing_entries = false);
325 
335  void
336  reinit(const Vector &v, const bool omit_zeroing_entries = false);
337 
345  void
346  reinit(const IndexSet &local,
347  const IndexSet &ghost,
348  const MPI_Comm &communicator);
349 
357  void
358  reinit(const IndexSet &local, const MPI_Comm &communicator);
359 
364  void
365  reinit(
366  const std::shared_ptr<const Utilities::MPI::Partitioner> &partitioner);
367 
372  const MPI_Comm &
373  get_mpi_communicator() const override;
374 
386  void
387  print(std::ostream & out,
388  const unsigned int precision = 3,
389  const bool scientific = true,
390  const bool across = true) const;
391 
398  bool
399  all_zero() const;
400 
401  protected:
408  virtual void
410 
411 
412 
418  virtual void
419  create_vector(const size_type n,
421  const IndexSet &ghostnodes);
422 
423 
424  private:
429  };
430 
431 
432  // ------------------ template and inline functions -------------
433 
434 
442  inline void
444  {
445  u.swap(v);
446  }
447 
448 
449 # ifndef DOXYGEN
450 
451  template <typename number>
453  const ::Vector<number> &v,
455  : communicator(communicator)
456  {
458 
459  *this = v;
460  }
461 
462 
463 
464  inline Vector &
465  Vector::operator=(const PetscScalar s)
466  {
468 
469  return *this;
470  }
471 
472 
473 
474  template <typename number>
475  inline Vector &
476  Vector::operator=(const ::Vector<number> &v)
477  {
478  Assert(size() == v.size(), ExcDimensionMismatch(size(), v.size()));
479 
480  // FIXME: the following isn't necessarily fast, but this is due to
481  // the fact that PETSc doesn't offer an inlined access operator.
482  //
483  // if someone wants to contribute some code: to make this code
484  // faster, one could either first convert all values to PetscScalar,
485  // and then set them all at once using VecSetValues. This has the
486  // drawback that it could take quite some memory, if the vector is
487  // large, and it would in addition allocate memory on the heap, which
488  // is expensive. an alternative would be to split the vector into
489  // chunks of, say, 128 elements, convert a chunk at a time and set it
490  // in the output vector using VecSetValues. since 128 elements is
491  // small enough, this could easily be allocated on the stack (as a
492  // local variable) which would make the whole thing much more
493  // efficient.
494  //
495  // a second way to make things faster is for the special case that
496  // number==PetscScalar. we could then declare a specialization of
497  // this template, and omit the conversion. the problem with this is
498  // that the best we can do is to use VecSetValues, but this isn't
499  // very efficient either: it wants to see an array of indices, which
500  // in this case a) again takes up a whole lot of memory on the heap,
501  // and b) is totally dumb since its content would simply be the
502  // sequence 0,1,2,3,...,n. the best of all worlds would probably be a
503  // function in Petsc that would take a pointer to an array of
504  // PetscScalar values and simply copy n elements verbatim into the
505  // vector...
506  for (size_type i = 0; i < v.size(); ++i)
507  (*this)(i) = v(i);
508 
510 
511  return *this;
512  }
513 
514 
515 
516  inline const MPI_Comm &
518  {
519  return communicator;
520  }
521 
522 # endif // DOXYGEN
523  } // namespace MPI
524 } // namespace PETScWrappers
525 
526 namespace internal
527 {
528  namespace LinearOperatorImplementation
529  {
530  template <typename>
531  class ReinitHelper;
532 
537  template <>
539  {
540  public:
541  template <typename Matrix>
542  static void
543  reinit_range_vector(const Matrix & matrix,
545  bool /*omit_zeroing_entries*/)
546  {
547  v.reinit(matrix.locally_owned_range_indices(),
548  matrix.get_mpi_communicator());
549  }
550 
551  template <typename Matrix>
552  static void
553  reinit_domain_vector(const Matrix & matrix,
555  bool /*omit_zeroing_entries*/)
556  {
557  v.reinit(matrix.locally_owned_domain_indices(),
558  matrix.get_mpi_communicator());
559  }
560  };
561 
562  } // namespace LinearOperatorImplementation
563 } /* namespace internal */
564 
571 template <>
572 struct is_serial_vector<PETScWrappers::MPI::Vector> : std::false_type
573 {};
574 
575 
577 
578 # endif // DEAL_II_WITH_PETSC
579 
580 #endif
581 /*------------------------- petsc_vector.h -------------------------*/
virtual void create_vector(const size_type n, const size_type locally_owned_size)
Contents is actually a matrix.
size_type locally_owned_size() const
static void reinit_range_vector(const Matrix &matrix, PETScWrappers::MPI::Vector &v, bool)
Definition: petsc_vector.h:543
virtual void clear() override
VectorBase & operator=(const VectorBase &)=delete
void reinit(const MPI_Comm &communicator, const size_type N, const size_type locally_owned_size, const bool omit_zeroing_entries=false)
void compress(const VectorOperation::values operation)
static void reinit_domain_vector(const Matrix &matrix, PETScWrappers::MPI::Vector &v, bool)
Definition: petsc_vector.h:553
#define Assert(cond, exc)
Definition: exceptions.h:1461
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:407
Vector & operator=(const Vector &v)
unsigned int global_dof_index
Definition: types.h:76
const MPI_Comm & get_mpi_communicator() const override
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:406
static const char N
void print(std::ostream &out, const unsigned int precision=3, const bool scientific=true, const bool across=true) const
#define DEAL_II_DEPRECATED
Definition: config.h:163
void swap(Vector &u, Vector &v)
Definition: petsc_vector.h:443