Reference documentation for deal.II version GIT relicensing-233-g802318d791 2024-03-28 20:20:02+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
trilinos_vector.cc
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2008 - 2024 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
16
17#ifdef DEAL_II_WITH_TRILINOS
18
19# include <deal.II/base/mpi.h>
21
26
27# include <boost/io/ios_state.hpp>
28
29# include <Epetra_Export.h>
30# include <Epetra_Import.h>
31# include <Epetra_Vector.h>
32
33# include <cmath>
34# include <memory>
35
36
38
39namespace TrilinosWrappers
40{
41# ifndef DOXYGEN
42 namespace internal
43 {
44 VectorReference::operator TrilinosScalar() const
45 {
46 AssertIndexRange(index, vector.size());
47
48 // Trilinos allows for vectors to be referenced by the [] or ()
49 // operators but only () checks index bounds. We check these bounds by
50 // ourselves, so we can use []. Note that we can only get local values.
51
52 const TrilinosWrappers::types::int_type local_index =
53 vector.vector->Map().LID(
54 static_cast<TrilinosWrappers::types::int_type>(index));
55 Assert(local_index >= 0,
57 index,
58 vector.vector->Map().NumMyElements(),
59 vector.vector->Map().MinMyGID(),
60 vector.vector->Map().MaxMyGID()));
61
62
63 return (*(vector.vector))[0][local_index];
64 }
65 } // namespace internal
66# endif
67
68 namespace MPI
69 {
71 : Subscriptor()
72 , last_action(Zero)
73 , compressed(true)
74 , has_ghosts(false)
75 , vector(new Epetra_FEVector(
76 Epetra_Map(0, 0, 0, Utilities::Trilinos::comm_self())))
77 {}
78
79
80
81 Vector::Vector(const IndexSet &parallel_partitioning,
82 const MPI_Comm communicator)
83 : Vector()
84 {
85 reinit(parallel_partitioning, communicator);
86 }
87
88
89
91 : Vector()
92 {
94 vector = std::make_unique<Epetra_FEVector>(*v.vector);
96 }
97
98
99
100 Vector::Vector(Vector &&v) // NOLINT
101 : Vector()
102 {
103 // initialize a minimal, valid object and swap
104 static_cast<Subscriptor &>(*this) = static_cast<Subscriptor &&>(v);
105 swap(v);
106 }
107
108
109
110 Vector::Vector(const IndexSet &parallel_partitioner,
111 const Vector &v,
112 const MPI_Comm communicator)
113 : Vector()
114 {
116 static_cast<size_type>(
120 v.vector->Map())));
121
122 vector = std::make_unique<Epetra_FEVector>(
123 parallel_partitioner.make_trilinos_map(communicator, true));
124 reinit(v, false, true);
125 }
126
127
128
130 const IndexSet &ghost,
131 const MPI_Comm communicator)
132 : Vector()
133 {
134 reinit(local, ghost, communicator, false);
135 }
136
137
138
139 void
141 {
142 // When we clear the vector, reset the pointer and generate an empty
143 // vector.
144 Epetra_Map map(0, 0, Epetra_MpiComm(MPI_COMM_SELF));
145
146 has_ghosts = false;
147 vector = std::make_unique<Epetra_FEVector>(map);
149 }
150
151
152
153 void
154 Vector::reinit(const IndexSet &parallel_partitioner,
155 const MPI_Comm communicator,
156 const bool /*omit_zeroing_entries*/)
157 {
158 nonlocal_vector.reset();
159
160 const bool overlapping =
161 !parallel_partitioner.is_ascending_and_one_to_one(communicator);
162
163 Epetra_Map map =
164 parallel_partitioner.make_trilinos_map(communicator, overlapping);
165
166 vector = std::make_unique<Epetra_FEVector>(map);
167
168 has_ghosts = vector->Map().UniqueGIDs() == false;
169
170 // If the IndexSets are overlapping, we don't really know
171 // which process owns what. So we decide that no process
172 // owns anything in that case. In particular asking for
173 // the locally owned elements is not allowed.
174 if (has_ghosts)
175 {
178 }
179 else
181
182# ifdef DEBUG
185
187# endif
188
190 }
191
192
193
194 void
196 const bool omit_zeroing_entries,
197 const bool allow_different_maps)
198 {
199 nonlocal_vector.reset();
200
201 // In case we do not allow to have different maps, this call means that
202 // we have to reset the vector. So clear the vector, initialize our map
203 // with the map in v, and generate the vector.
204 if (allow_different_maps == false)
205 {
206 // check equality for MPI communicators: We can only choose the fast
207 // version in case the underlying Epetra_MpiComm object is the same,
208 // otherwise we might access an MPI_Comm object that has been
209 // deleted
210 const Epetra_MpiComm *my_comm =
211 dynamic_cast<const Epetra_MpiComm *>(&vector->Comm());
212 const Epetra_MpiComm *v_comm =
213 dynamic_cast<const Epetra_MpiComm *>(&v.vector->Comm());
214 const bool same_communicators =
215 my_comm != nullptr && v_comm != nullptr &&
216 my_comm->DataPtr() == v_comm->DataPtr();
217 if (!same_communicators ||
218 vector->Map().SameAs(v.vector->Map()) == false)
219 {
220 vector = std::make_unique<Epetra_FEVector>(v.vector->Map());
224 }
225 else if (omit_zeroing_entries == false)
226 {
227 // old and new vectors have exactly the same map, i.e. size and
228 // parallel distribution
229 int ierr;
230 ierr = vector->GlobalAssemble(last_action);
231 (void)ierr;
233
234 ierr = vector->PutScalar(0.0);
236
238 }
239 }
240
241 // Otherwise, we have to check that the two vectors are already of the
242 // same size, create an object for the data exchange and then insert all
243 // the data. The first assertion is only a check whether the user knows
244 // what they are doing.
245 else
246 {
249 "It is not possible to exchange data with the "
250 "option 'omit_zeroing_entries' set, which would not write "
251 "elements."));
252
253 AssertThrow(size() == v.size(),
255
256 Epetra_Import data_exchange(vector->Map(), v.vector->Map());
257
258 const int ierr = vector->Import(*v.vector, data_exchange, Insert);
260
262 }
263# ifdef DEBUG
264 const Epetra_MpiComm *comm_ptr =
265 dynamic_cast<const Epetra_MpiComm *>(&(v.vector->Comm()));
266 Assert(comm_ptr != nullptr, ExcInternalError());
270# endif
271 }
272
273
274
275 void
276 Vector::reinit(const MPI::BlockVector &v, const bool import_data)
277 {
278 nonlocal_vector.reset();
281
282 // In case we do not allow to have different maps, this call means that
283 // we have to reset the vector. So clear the vector, initialize our map
284 // with the map in v, and generate the vector.
285 if (v.n_blocks() == 0)
286 return;
287
288 // create a vector that holds all the elements contained in the block
289 // vector. need to manually create an Epetra_Map.
290 size_type n_elements = 0, added_elements = 0, block_offset = 0;
291 for (size_type block = 0; block < v.n_blocks(); ++block)
292 n_elements += v.block(block).vector->Map().NumMyElements();
293 std::vector<TrilinosWrappers::types::int_type> global_ids(n_elements, -1);
294 for (size_type block = 0; block < v.n_blocks(); ++block)
295 {
298 v.block(block).trilinos_partitioner());
299 size_type vector_size = v.block(block).vector->Map().NumMyElements();
300 for (size_type i = 0; i < vector_size; ++i)
302 owned_elements.add_indices(v.block(block).owned_elements,
304 block_offset += v.block(block).size();
305 }
306
307 Assert(n_elements == added_elements, ExcInternalError());
308 Epetra_Map new_map(v.size(),
309 n_elements,
310 global_ids.data(),
311 0,
312 v.block(0).trilinos_partitioner().Comm());
313
314 auto actual_vec = std::make_unique<Epetra_FEVector>(new_map);
315
316 TrilinosScalar *entries = (*actual_vec)[0];
317 for (size_type block = 0; block < v.n_blocks(); ++block)
318 {
319 v.block(block).trilinos_vector().ExtractCopy(entries, 0);
320 entries += v.block(block).vector->Map().NumMyElements();
321 }
322
323 if (import_data == true)
324 {
326 *actual_vec)) == v.size(),
328 *actual_vec),
329 v.size()));
330
331 Epetra_Import data_exchange(vector->Map(), actual_vec->Map());
332
333 const int ierr = vector->Import(*actual_vec, data_exchange, Insert);
335
337 }
338 else
339 vector = std::move(actual_vec);
340# ifdef DEBUG
341 const Epetra_MpiComm *comm_ptr =
342 dynamic_cast<const Epetra_MpiComm *>(&(vector->Comm()));
343 Assert(comm_ptr != nullptr, ExcInternalError());
346
348# endif
349 }
350
351
352
353 void
354 Vector::reinit(const IndexSet &locally_owned_entries,
355 const IndexSet &ghost_entries,
356 const MPI_Comm communicator,
357 const bool vector_writable)
358 {
359 nonlocal_vector.reset();
361 if (vector_writable == false)
362 {
365 Epetra_Map map =
366 parallel_partitioner.make_trilinos_map(communicator, true);
367 vector = std::make_unique<Epetra_FEVector>(map);
368 }
369 else
370 {
371 Epetra_Map map =
372 locally_owned_entries.make_trilinos_map(communicator, true);
373 Assert(map.IsOneToOne(),
374 ExcMessage("A writable vector must not have ghost entries in "
375 "its parallel partitioning"));
376
377 if (vector->Map().SameAs(map) == false)
378 vector = std::make_unique<Epetra_FEVector>(map);
379 else
380 {
381 const int ierr = vector->PutScalar(0.);
382 (void)ierr;
384 }
385
388 if (Utilities::MPI::n_mpi_processes(communicator) > 1)
389 {
390 Epetra_Map nonlocal_map =
391 nonlocal_entries.make_trilinos_map(communicator, true);
393 std::make_unique<Epetra_MultiVector>(nonlocal_map, 1);
394 }
395 }
396
397 has_ghosts = vector->Map().UniqueGIDs() == false;
398
400
401# ifdef DEBUG
404
406# endif
407 }
408
409
410
411 void
413 const std::shared_ptr<const Utilities::MPI::Partitioner> &partitioner,
414 const bool make_ghosted,
415 const bool vector_writable)
416 {
417 if (make_ghosted)
418 {
419 Assert(partitioner->ghost_indices_initialized(),
420 ExcMessage("You asked to create a ghosted vector, but the "
421 "partitioner does not provide ghost indices."));
422
423 this->reinit(partitioner->locally_owned_range(),
424 partitioner->ghost_indices(),
425 partitioner->get_mpi_communicator(),
427 }
428 else
429 {
430 this->reinit(partitioner->locally_owned_range(),
431 partitioner->get_mpi_communicator());
432 }
433 }
434
435
436
437 Vector &
439 {
440 Assert(vector.get() != nullptr,
441 ExcMessage("Vector is not constructed properly."));
442
443 // check equality for MPI communicators to avoid accessing a possibly
444 // invalid MPI_Comm object
445 const Epetra_MpiComm *my_comm =
446 dynamic_cast<const Epetra_MpiComm *>(&vector->Comm());
447 const Epetra_MpiComm *v_comm =
448 dynamic_cast<const Epetra_MpiComm *>(&v.vector->Comm());
449 const bool same_communicators = my_comm != nullptr && v_comm != nullptr &&
450 my_comm->DataPtr() == v_comm->DataPtr();
451 // Need to ask MPI whether the communicators are the same. We would like
452 // to use the following checks but currently we cannot make sure the
453 // memory of my_comm is not stale from some MPI_Comm_free
454 // somewhere. This can happen when a vector lives in GrowingVectorMemory
455 // data structures. Thus, the following code is commented out.
456 //
457 // if (my_comm != nullptr &&
458 // v_comm != nullptr &&
459 // my_comm->DataPtr() != v_comm->DataPtr())
460 // {
461 // int communicators_same = 0;
462 // const int ierr = MPI_Comm_compare (my_comm->GetMpiComm(),
463 // v_comm->GetMpiComm(),
464 // &communicators_same);
465 // AssertThrowMPI(ierr);
466 // if (!(communicators_same == MPI_IDENT ||
467 // communicators_same == MPI_CONGRUENT))
468 // same_communicators = false;
469 // else
470 // same_communicators = true;
471 // }
472
473 // distinguish three cases. First case: both vectors have the same
474 // layout (just need to copy the local data, not reset the memory and
475 // the underlying Epetra_Map). The third case means that we have to
476 // rebuild the calling vector.
477 if (same_communicators && v.vector->Map().SameAs(vector->Map()))
478 {
479 *vector = *v.vector;
480 if (v.nonlocal_vector.get() != nullptr)
482 std::make_unique<Epetra_MultiVector>(v.nonlocal_vector->Map(), 1);
484 }
485 // Second case: vectors have the same global
486 // size, but different parallel layouts (and
487 // one of them a one-to-one mapping). Then we
488 // can call the import/export functionality.
489 else if (size() == v.size() &&
490 (v.vector->Map().UniqueGIDs() || vector->Map().UniqueGIDs()))
491 {
492 reinit(v, false, true);
493 }
494 // Third case: Vectors do not have the same
495 // size.
496 else
497 {
498 vector = std::make_unique<Epetra_FEVector>(*v.vector);
502 }
503
504 if (v.nonlocal_vector.get() != nullptr)
506 std::make_unique<Epetra_MultiVector>(v.nonlocal_vector->Map(), 1);
507
508 return *this;
509 }
510
511
512
513 Vector &
515 {
516 static_cast<Subscriptor &>(*this) = static_cast<Subscriptor &&>(v);
517 swap(v);
518 return *this;
519 }
520
521
522
523 template <typename number>
524 Vector &
525 Vector::operator=(const ::Vector<number> &v)
526 {
527 Assert(size() == v.size(), ExcDimensionMismatch(size(), v.size()));
528
529 // this is probably not very efficient but works. in particular, we could
530 // do better if we know that number==TrilinosScalar because then we could
531 // elide the copying of elements
532 //
533 // let's hope this isn't a particularly frequent operation
534 std::pair<size_type, size_type> local_range = this->local_range();
535 for (size_type i = local_range.first; i < local_range.second; ++i)
536 (*vector)[0][i - local_range.first] = v(i);
537
538 return *this;
539 }
540
541
542
543 void
545 const Vector &v)
546 {
547 Assert(m.trilinos_matrix().Filled() == true,
548 ExcMessage("Matrix is not compressed. "
549 "Cannot find exchange information!"));
550 Assert(v.vector->Map().UniqueGIDs() == true,
551 ExcMessage("The input vector has overlapping data, "
552 "which is not allowed."));
553
554 if (vector->Map().SameAs(m.trilinos_matrix().ColMap()) == false)
555 vector =
556 std::make_unique<Epetra_FEVector>(m.trilinos_matrix().ColMap());
557
558 Epetra_Import data_exchange(vector->Map(), v.vector->Map());
559 const int ierr = vector->Import(*v.vector, data_exchange, Insert);
560
562
564 }
565
566
567 void
569 const VectorOperation::values operation)
570 {
571 Assert(
572 this->size() == rwv.size(),
574 "Both vectors need to have the same size for import_elements() to work!"));
575 // TODO: a generic import_elements() function should handle any kind of
576 // data layout in ReadWriteVector, but this function is of limited use as
577 // this class will (hopefully) be retired eventually.
580
581 if (operation == VectorOperation::insert)
582 {
583 for (const auto idx : this->locally_owned_elements())
584 (*this)[idx] = rwv[idx];
585 }
586 else if (operation == VectorOperation::add)
587 {
588 for (const auto idx : this->locally_owned_elements())
589 (*this)[idx] += rwv[idx];
590 }
591 else
593
594 this->compress(operation);
595 }
596
597
598 void
600 {
601 Assert(has_ghost_elements() == false,
603 "Calling compress() is only useful if a vector "
604 "has been written into, but this is a vector with ghost "
605 "elements and consequently is read-only. It does "
606 "not make sense to call compress() for such "
607 "vectors."));
608
609 // Select which mode to send to Trilinos. Note that we use last_action if
610 // available and ignore what the user tells us to detect wrongly mixed
611 // operations. Typically given_last_action is only used on machines that
612 // do not execute an operation (because they have no own cells for
613 // example).
614 Epetra_CombineMode mode = last_action;
615 if (last_action == Zero)
616 {
618 mode = Add;
620 mode = Insert;
621 else
622 Assert(
623 false,
625 "compress() can only be called with VectorOperation add, insert, or unknown"));
626 }
627 else
628 {
629 Assert(
630 ((last_action == Add) &&
632 ((last_action == Insert) &&
635 "The last operation on the Vector and the given last action in the compress() call do not agree!"));
636 }
637
638
639# ifdef DEBUG
640 // check that every process has decided to use the same mode. This will
641 // otherwise result in undefined behavior in the call to
642 // GlobalAssemble().
643 const double double_mode = mode;
644 const Epetra_MpiComm *comm_ptr =
645 dynamic_cast<const Epetra_MpiComm *>(&(trilinos_partitioner().Comm()));
646 Assert(comm_ptr != nullptr, ExcInternalError());
647
650 Assert(result.max == result.min,
652 "Not all processors agree whether the last operation on "
653 "this vector was an addition or a set operation. This will "
654 "prevent the compress() operation from succeeding."));
655
656# endif
657
658 // Now pass over the information about what we did last to the vector.
659 if (nonlocal_vector.get() == nullptr || mode != Add)
660 {
661 const auto ierr = vector->GlobalAssemble(mode);
663 }
664 else
665 {
666 Epetra_Export exporter(nonlocal_vector->Map(), vector->Map());
667
668 int ierr = vector->Export(*nonlocal_vector, exporter, mode);
670
671 ierr = nonlocal_vector->PutScalar(0.);
673 }
675
676 compressed = true;
677 }
678
679
680
682 Vector::operator()(const size_type index) const
683 {
684 // Extract local indices in the vector.
686 static_cast<TrilinosWrappers::types::int_type>(index));
687 TrilinosScalar value = 0.;
688
689 // If the element is not present on the current processor, we can't
690 // continue. This is the main difference to the el() function.
691 if (trilinos_i == -1)
692 {
693 Assert(false,
695 vector->Map().NumMyElements(),
696 vector->Map().MinMyGID(),
697 vector->Map().MaxMyGID()));
698 }
699 else
700 value = (*vector)[0][trilinos_i];
701
702 return value;
703 }
704
705
706
707 void
708 Vector::add(const Vector &v, const bool allow_different_maps)
709 {
710 if (allow_different_maps == false)
711 *this += v;
712 else
713 {
715 AssertThrow(size() == v.size(),
717
718# if DEAL_II_TRILINOS_VERSION_GTE(11, 11, 0)
719 Epetra_Import data_exchange(vector->Map(), v.vector->Map());
720 int ierr =
724# else
725 // In versions older than 11.11 the Import function is broken for
726 // adding Hence, we provide a workaround in this case
727
728 Epetra_MultiVector dummy(vector->Map(), 1, false);
729 Epetra_Import data_exchange(dummy.Map(), v.vector->Map());
730
731 int ierr = dummy.Import(*v.vector, data_exchange, Insert);
733
734 ierr = vector->Update(1.0, dummy, 1.0);
736# endif
737 }
738 }
739
740
741
742 bool
744 {
745 Assert(size() == v.size(), ExcDimensionMismatch(size(), v.size()));
746 if (vector->Map().NumMyElements() != v.vector->Map().NumMyElements())
747 return false;
748
749 size_type vector_size = vector->Map().NumMyElements();
750 for (size_type i = 0; i < vector_size; ++i)
751 if ((*(v.vector))[0][i] != (*vector)[0][i])
752 return false;
753
754 return true;
755 }
756
757
758
759 bool
761 {
762 Assert(size() == v.size(), ExcDimensionMismatch(size(), v.size()));
763
764 return (!(*this == v));
765 }
766
767
768
769 bool
771 {
772 // get a representation of the vector and
773 // loop over all the elements
774 TrilinosScalar *start_ptr = (*vector)[0];
775 const TrilinosScalar *ptr = start_ptr,
776 *eptr = start_ptr + vector->Map().NumMyElements();
777 unsigned int flag = 0;
778 while (ptr != eptr)
779 {
780 if (*ptr != 0)
781 {
782 flag = 1;
783 break;
784 }
785 ++ptr;
786 }
787
788 // in parallel, check that the vector
789 // is zero on _all_ processors.
790 const Epetra_MpiComm *mpi_comm =
791 dynamic_cast<const Epetra_MpiComm *>(&vector->Map().Comm());
792 Assert(mpi_comm != nullptr, ExcInternalError());
793 unsigned int num_nonzero = Utilities::MPI::sum(flag, mpi_comm->Comm());
794 return num_nonzero == 0;
795 }
796
797
798
799 bool
801 {
802 // get a representation of the vector and
803 // loop over all the elements
804 TrilinosScalar *start_ptr = (*vector)[0];
805 const TrilinosScalar *ptr = start_ptr,
806 *eptr = start_ptr + vector->Map().NumMyElements();
807 unsigned int flag = 0;
808 while (ptr != eptr)
809 {
810 if (*ptr < 0.0)
811 {
812 flag = 1;
813 break;
814 }
815 ++ptr;
816 }
817
818 // in parallel, check that the vector
819 // is zero on _all_ processors.
820 const auto max_n_negative =
822 return max_n_negative == 0;
823 }
824
825
826
827 void
828 Vector::print(std::ostream &out,
829 const unsigned int precision,
830 const bool scientific,
831 const bool across) const
832 {
833 AssertThrow(out.fail() == false, ExcIO());
834 boost::io::ios_flags_saver restore_flags(out);
835
836
837 out.precision(precision);
838 if (scientific)
839 out.setf(std::ios::scientific, std::ios::floatfield);
840 else
841 out.setf(std::ios::fixed, std::ios::floatfield);
842
843 size_type vector_size = vector->Map().NumMyElements();
844 if (size() != vector_size)
845 {
846 auto global_id = [&](const size_type index) {
847 return gid(vector->Map(), index);
848 };
849 out << "size:" << size()
850 << " locally_owned_size:" << vector->Map().NumMyElements() << " :"
851 << std::endl;
852 for (size_type i = 0; i < vector_size; ++i)
853 out << "[" << global_id(i) << "]: " << (*(vector))[0][i]
854 << std::endl;
855 }
856 else
857 {
858 TrilinosScalar *val;
860 int ierr = vector->ExtractView(&val, &leading_dimension);
861
863 if (across)
864 for (size_type i = 0; i < size(); ++i)
865 out << static_cast<double>(val[i]) << ' ';
866 else
867 for (size_type i = 0; i < size(); ++i)
868 out << static_cast<double>(val[i]) << std::endl;
869 out << std::endl;
870 }
871
872 AssertThrow(out.fail() == false, ExcIO());
873 }
874
875
876
877 void
879 {
880 std::swap(last_action, v.last_action);
881 std::swap(compressed, v.compressed);
882 std::swap(has_ghosts, v.has_ghosts);
883 std::swap(vector, v.vector);
884 std::swap(nonlocal_vector, v.nonlocal_vector);
885 std::swap(owned_elements, v.owned_elements);
886 }
887
888
889
890 std::size_t
892 {
893 // TODO[TH]: No accurate memory
894 // consumption for Trilinos vectors
895 // yet. This is a rough approximation with
896 // one index and the value per local
897 // entry.
898 return sizeof(*this) +
899 this->vector->Map().NumMyElements() *
900 (sizeof(double) + sizeof(TrilinosWrappers::types::int_type));
901 }
902
903 // explicit instantiations
904# ifndef DOXYGEN
905# include "trilinos_vector.inst"
906# endif
907 } // namespace MPI
908} // namespace TrilinosWrappers
909
911
912#endif // DEAL_II_WITH_TRILINOS
virtual size_type size() const override
unsigned int n_blocks() const
BlockType & block(const unsigned int i)
size_type n_elements() const
Definition index_set.h:1923
void set_size(const size_type size)
Definition index_set.h:1753
void clear()
Definition index_set.h:1741
void add_indices(const ForwardIterator &begin, const ForwardIterator &end)
Definition index_set.h:1820
size_type size() const override
const IndexSet & get_stored_elements() const
void compress(VectorOperation::values operation)
void add(const std::vector< size_type > &indices, const std::vector< TrilinosScalar > &values)
std::unique_ptr< Epetra_MultiVector > nonlocal_vector
void import_elements(const LinearAlgebra::ReadWriteVector< double > &rwv, const VectorOperation::values operation)
MPI_Comm get_mpi_communicator() const
const Epetra_BlockMap & trilinos_partitioner() const
reference operator()(const size_type index)
void import_nonlocal_data_for_fe(const ::TrilinosWrappers::SparseMatrix &matrix, const Vector &vector)
std::unique_ptr< Epetra_FEVector > vector
size_type size() const override
void print(std::ostream &out, const unsigned int precision=3, const bool scientific=true, const bool across=true) const
IndexSet locally_owned_elements() const
void reinit(const Vector &v, const bool omit_zeroing_entries=false, const bool allow_different_maps=false)
std::pair< size_type, size_type > local_range() const
bool operator!=(const Vector &v) const
bool operator==(const Vector &v) const
Vector & operator=(const TrilinosScalar s)
std::size_t memory_consumption() const
const Epetra_CrsMatrix & trilinos_matrix() const
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:502
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:503
static ::ExceptionBase & ExcIO()
static ::ExceptionBase & ExcGhostsPresent()
static ::ExceptionBase & ExcNotImplemented()
static ::ExceptionBase & ExcAccessToNonLocalElement(size_type arg1, size_type arg2, size_type arg3, size_type arg4)
#define Assert(cond, exc)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcTrilinosError(int arg1)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
void swap(BlockVector &u, BlockVector &v)
TrilinosWrappers::types::int_type global_length(const Epetra_MultiVector &vector)
int gid(const Epetra_BlockMap &map, int i)
TrilinosWrappers::types::int_type * my_global_elements(const Epetra_BlockMap &map)
TrilinosWrappers::types::int64_type n_global_elements(const Epetra_BlockMap &map)
T sum(const T &t, const MPI_Comm mpi_communicator)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition mpi.cc:128
T max(const T &t, const MPI_Comm mpi_communicator)
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm mpi_communicator)
Definition mpi.cc:102
double TrilinosScalar
Definition types.h:178