deal.II version GIT relicensing-2206-gaa53ff9447 2024-12-02 09:10:00+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
trilinos_vector.cc
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2008 - 2024 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
16
17#ifdef DEAL_II_WITH_TRILINOS
18
19# include <deal.II/base/mpi.h>
21
26
27# include <boost/io/ios_state.hpp>
28
29# include <Epetra_Export.h>
30# include <Epetra_Import.h>
31# include <Epetra_Vector.h>
32
33# include <cmath>
34# include <memory>
35
36
38
39namespace TrilinosWrappers
40{
41# ifndef DOXYGEN
42 namespace internal
43 {
44 VectorReference::operator TrilinosScalar() const
45 {
46 AssertIndexRange(index, vector.size());
47
48 // Trilinos allows for vectors to be referenced by the [] or ()
49 // operators but only () checks index bounds. We check these bounds by
50 // ourselves, so we can use []. Note that we can only get local values.
51
52 const TrilinosWrappers::types::int_type local_index =
53 vector.vector->Map().LID(
54 static_cast<TrilinosWrappers::types::int_type>(index));
55# ifndef DEAL_II_WITH_64BIT_INDICES
56 Assert(local_index >= 0,
58 index,
59 vector.vector->Map().NumMyElements(),
60 vector.vector->Map().MinMyGID(),
61 vector.vector->Map().MaxMyGID()));
62# else
63 Assert(local_index >= 0,
65 index,
66 vector.vector->Map().NumMyElements(),
67 vector.vector->Map().MinMyGID64(),
68 vector.vector->Map().MaxMyGID64()));
69# endif
70
71
72 return (*(vector.vector))[0][local_index];
73 }
74 } // namespace internal
75# endif
76
77 namespace MPI
78 {
80 : last_action(Zero)
81 , compressed(true)
82 , has_ghosts(false)
83 , vector(new Epetra_FEVector(
84 Epetra_Map(0, 0, 0, Utilities::Trilinos::comm_self())))
85 {}
86
87
88
89 Vector::Vector(const IndexSet &parallel_partitioning,
90 const MPI_Comm communicator)
91 : Vector()
92 {
93 reinit(parallel_partitioning, communicator);
94 }
95
96
97
99 : Vector()
100 {
102 vector = std::make_unique<Epetra_FEVector>(*v.vector);
104 }
105
106
107
108 Vector::Vector(Vector &&v) // NOLINT
109 : Vector()
110 {
111 // initialize a minimal, valid object and swap
112 static_cast<EnableObserverPointer &>(*this) =
113 static_cast<EnableObserverPointer &&>(v);
114 swap(v);
115 }
116
117
118
119 Vector::Vector(const IndexSet &parallel_partitioner,
120 const Vector &v,
121 const MPI_Comm communicator)
122 : Vector()
123 {
125 static_cast<size_type>(
129 v.vector->Map())));
130
131 vector = std::make_unique<Epetra_FEVector>(
132 parallel_partitioner.make_trilinos_map(communicator, true));
133 reinit(v, false, true);
134 }
135
136
137
139 const IndexSet &ghost,
140 const MPI_Comm communicator)
141 : Vector()
142 {
143 reinit(local, ghost, communicator, false);
144 }
145
146
147
148 void
150 {
151 // When we clear the vector, reset the pointer and generate an empty
152 // vector.
153 Epetra_Map map(0, 0, Epetra_MpiComm(MPI_COMM_SELF));
154
155 has_ghosts = false;
156 vector = std::make_unique<Epetra_FEVector>(map);
158 }
159
160
161
162 void
163 Vector::reinit(const IndexSet &parallel_partitioner,
164 const MPI_Comm communicator,
165 const bool /*omit_zeroing_entries*/)
166 {
167 nonlocal_vector.reset();
168
169 const bool overlapping =
170 !parallel_partitioner.is_ascending_and_one_to_one(communicator);
171
172 Epetra_Map map =
173 parallel_partitioner.make_trilinos_map(communicator, overlapping);
174
175 vector = std::make_unique<Epetra_FEVector>(map);
176
177 has_ghosts = vector->Map().UniqueGIDs() == false;
178
179 // If the IndexSets are overlapping, we don't really know
180 // which process owns what. So we decide that no process
181 // owns anything in that case. In particular asking for
182 // the locally owned elements is not allowed.
183 if (has_ghosts)
184 {
187 }
188 else
190
191# ifdef DEBUG
194
196# endif
197
199 }
200
201
202
203 void
205 const bool omit_zeroing_entries,
206 const bool allow_different_maps)
207 {
208 nonlocal_vector.reset();
209
210 // In case we do not allow to have different maps, this call means that
211 // we have to reset the vector. So clear the vector, initialize our map
212 // with the map in v, and generate the vector.
213 if (allow_different_maps == false)
214 {
215 // check equality for MPI communicators: We can only choose the fast
216 // version in case the underlying Epetra_MpiComm object is the same,
217 // otherwise we might access an MPI_Comm object that has been
218 // deleted
219 const Epetra_MpiComm *my_comm =
220 dynamic_cast<const Epetra_MpiComm *>(&vector->Comm());
221 const Epetra_MpiComm *v_comm =
222 dynamic_cast<const Epetra_MpiComm *>(&v.vector->Comm());
223 const bool same_communicators =
224 my_comm != nullptr && v_comm != nullptr &&
225 my_comm->DataPtr() == v_comm->DataPtr();
226 if (!same_communicators ||
227 vector->Map().SameAs(v.vector->Map()) == false)
228 {
229 vector = std::make_unique<Epetra_FEVector>(v.vector->Map());
233 }
234 else if (omit_zeroing_entries == false)
235 {
236 // old and new vectors have exactly the same map, i.e. size and
237 // parallel distribution
238 int ierr;
239 ierr = vector->GlobalAssemble(last_action);
240 (void)ierr;
242
243 ierr = vector->PutScalar(0.0);
245
247 }
248 }
249
250 // Otherwise, we have to check that the two vectors are already of the
251 // same size, create an object for the data exchange and then insert all
252 // the data. The first assertion is only a check whether the user knows
253 // what they are doing.
254 else
255 {
258 "It is not possible to exchange data with the "
259 "option 'omit_zeroing_entries' set, which would not write "
260 "elements."));
261
262 AssertThrow(size() == v.size(),
264
265 Epetra_Import data_exchange(vector->Map(), v.vector->Map());
266
267 const int ierr = vector->Import(*v.vector, data_exchange, Insert);
269
271 }
272# ifdef DEBUG
273 const Epetra_MpiComm *comm_ptr =
274 dynamic_cast<const Epetra_MpiComm *>(&(v.vector->Comm()));
275 Assert(comm_ptr != nullptr, ExcInternalError());
279# endif
280 }
281
282
283
284 void
285 Vector::reinit(const MPI::BlockVector &v, const bool import_data)
286 {
287 nonlocal_vector.reset();
290
291 // In case we do not allow to have different maps, this call means that
292 // we have to reset the vector. So clear the vector, initialize our map
293 // with the map in v, and generate the vector.
294 if (v.n_blocks() == 0)
295 return;
296
297 // create a vector that holds all the elements contained in the block
298 // vector. need to manually create an Epetra_Map.
299 size_type n_elements = 0, added_elements = 0, block_offset = 0;
300 for (size_type block = 0; block < v.n_blocks(); ++block)
301 n_elements += v.block(block).vector->Map().NumMyElements();
302 std::vector<TrilinosWrappers::types::int_type> global_ids(n_elements, -1);
303 for (size_type block = 0; block < v.n_blocks(); ++block)
304 {
307 v.block(block).trilinos_partitioner());
308 size_type vector_size = v.block(block).vector->Map().NumMyElements();
309 for (size_type i = 0; i < vector_size; ++i)
311 owned_elements.add_indices(v.block(block).owned_elements,
313 block_offset += v.block(block).size();
314 }
315
316 Assert(n_elements == added_elements, ExcInternalError());
317 Epetra_Map new_map(v.size(),
318 n_elements,
319 global_ids.data(),
320 0,
321 v.block(0).trilinos_partitioner().Comm());
322
323 auto actual_vec = std::make_unique<Epetra_FEVector>(new_map);
324
325 TrilinosScalar *entries = (*actual_vec)[0];
326 for (size_type block = 0; block < v.n_blocks(); ++block)
327 {
328 v.block(block).trilinos_vector().ExtractCopy(entries, 0);
329 entries += v.block(block).vector->Map().NumMyElements();
330 }
331
332 if (import_data == true)
333 {
335 *actual_vec)) == v.size(),
337 *actual_vec),
338 v.size()));
339
340 Epetra_Import data_exchange(vector->Map(), actual_vec->Map());
341
342 const int ierr = vector->Import(*actual_vec, data_exchange, Insert);
344
346 }
347 else
348 vector = std::move(actual_vec);
349# ifdef DEBUG
350 const Epetra_MpiComm *comm_ptr =
351 dynamic_cast<const Epetra_MpiComm *>(&(vector->Comm()));
352 Assert(comm_ptr != nullptr, ExcInternalError());
355
357# endif
358 }
359
360
361
362 void
363 Vector::reinit(const IndexSet &locally_owned_entries,
364 const IndexSet &ghost_entries,
365 const MPI_Comm communicator,
366 const bool vector_writable)
367 {
368 nonlocal_vector.reset();
370 if (vector_writable == false)
371 {
374 Epetra_Map map =
375 parallel_partitioner.make_trilinos_map(communicator, true);
376 vector = std::make_unique<Epetra_FEVector>(map);
377 }
378 else
379 {
380 Epetra_Map map =
381 locally_owned_entries.make_trilinos_map(communicator, true);
382 Assert(map.IsOneToOne(),
383 ExcMessage("A writable vector must not have ghost entries in "
384 "its parallel partitioning"));
385
386 if (vector->Map().SameAs(map) == false)
387 vector = std::make_unique<Epetra_FEVector>(map);
388 else
389 {
390 const int ierr = vector->PutScalar(0.);
391 (void)ierr;
393 }
394
397 if (Utilities::MPI::n_mpi_processes(communicator) > 1)
398 {
399 Epetra_Map nonlocal_map =
400 nonlocal_entries.make_trilinos_map(communicator, true);
402 std::make_unique<Epetra_MultiVector>(nonlocal_map, 1);
403 }
404 }
405
406 has_ghosts = vector->Map().UniqueGIDs() == false;
407
409
410# ifdef DEBUG
413
415# endif
416 }
417
418
419
420 void
422 const std::shared_ptr<const Utilities::MPI::Partitioner> &partitioner,
423 const bool make_ghosted,
424 const bool vector_writable)
425 {
426 if (make_ghosted)
427 {
428 Assert(partitioner->ghost_indices_initialized(),
429 ExcMessage("You asked to create a ghosted vector, but the "
430 "partitioner does not provide ghost indices."));
431
432 this->reinit(partitioner->locally_owned_range(),
433 partitioner->ghost_indices(),
434 partitioner->get_mpi_communicator(),
436 }
437 else
438 {
439 this->reinit(partitioner->locally_owned_range(),
440 partitioner->get_mpi_communicator());
441 }
442 }
443
444
445
446 Vector &
448 {
449 Assert(vector.get() != nullptr,
450 ExcMessage("Vector is not constructed properly."));
451
452 // check equality for MPI communicators to avoid accessing a possibly
453 // invalid MPI_Comm object
454 const Epetra_MpiComm *my_comm =
455 dynamic_cast<const Epetra_MpiComm *>(&vector->Comm());
456 const Epetra_MpiComm *v_comm =
457 dynamic_cast<const Epetra_MpiComm *>(&v.vector->Comm());
458 const bool same_communicators = my_comm != nullptr && v_comm != nullptr &&
459 my_comm->DataPtr() == v_comm->DataPtr();
460 // Need to ask MPI whether the communicators are the same. We would like
461 // to use the following checks but currently we cannot make sure the
462 // memory of my_comm is not stale from some MPI_Comm_free
463 // somewhere. This can happen when a vector lives in GrowingVectorMemory
464 // data structures. Thus, the following code is commented out.
465 //
466 // if (my_comm != nullptr &&
467 // v_comm != nullptr &&
468 // my_comm->DataPtr() != v_comm->DataPtr())
469 // {
470 // int communicators_same = 0;
471 // const int ierr = MPI_Comm_compare (my_comm->GetMpiComm(),
472 // v_comm->GetMpiComm(),
473 // &communicators_same);
474 // AssertThrowMPI(ierr);
475 // if (!(communicators_same == MPI_IDENT ||
476 // communicators_same == MPI_CONGRUENT))
477 // same_communicators = false;
478 // else
479 // same_communicators = true;
480 // }
481
482 // distinguish three cases. First case: both vectors have the same
483 // layout (just need to copy the local data, not reset the memory and
484 // the underlying Epetra_Map). The third case means that we have to
485 // rebuild the calling vector.
486 if (same_communicators && v.vector->Map().SameAs(vector->Map()))
487 {
488 *vector = *v.vector;
489 if (v.nonlocal_vector.get() != nullptr)
491 std::make_unique<Epetra_MultiVector>(v.nonlocal_vector->Map(), 1);
493 }
494 // Second case: vectors have the same global
495 // size, but different parallel layouts (and
496 // one of them a one-to-one mapping). Then we
497 // can call the import/export functionality.
498 else if (size() == v.size() &&
499 (v.vector->Map().UniqueGIDs() || vector->Map().UniqueGIDs()))
500 {
501 reinit(v, false, true);
502 }
503 // Third case: Vectors do not have the same
504 // size.
505 else
506 {
507 vector = std::make_unique<Epetra_FEVector>(*v.vector);
511 }
512
513 if (v.nonlocal_vector.get() != nullptr)
515 std::make_unique<Epetra_MultiVector>(v.nonlocal_vector->Map(), 1);
516
517 return *this;
518 }
519
520
521
522 Vector &
524 {
525 static_cast<EnableObserverPointer &>(*this) =
526 static_cast<EnableObserverPointer &&>(v);
527 swap(v);
528 return *this;
529 }
530
531
532
533 template <typename number>
534 Vector &
535 Vector::operator=(const ::Vector<number> &v)
536 {
537 Assert(size() == v.size(), ExcDimensionMismatch(size(), v.size()));
538
539 // this is probably not very efficient but works. in particular, we could
540 // do better if we know that number==TrilinosScalar because then we could
541 // elide the copying of elements
542 //
543 // let's hope this isn't a particularly frequent operation
544 std::pair<size_type, size_type> local_range = this->local_range();
545 for (size_type i = local_range.first; i < local_range.second; ++i)
546 (*vector)[0][i - local_range.first] = v(i);
547
548 return *this;
549 }
550
551
552
553 void
555 const Vector &v)
556 {
557 Assert(m.trilinos_matrix().Filled() == true,
558 ExcMessage("Matrix is not compressed. "
559 "Cannot find exchange information!"));
560 Assert(v.vector->Map().UniqueGIDs() == true,
561 ExcMessage("The input vector has overlapping data, "
562 "which is not allowed."));
563
564 if (vector->Map().SameAs(m.trilinos_matrix().ColMap()) == false)
565 vector =
566 std::make_unique<Epetra_FEVector>(m.trilinos_matrix().ColMap());
567
568 Epetra_Import data_exchange(vector->Map(), v.vector->Map());
569 const int ierr = vector->Import(*v.vector, data_exchange, Insert);
570
572
574 }
575
576
577 void
579 const VectorOperation::values operation)
580 {
581 Assert(
582 this->size() == rwv.size(),
584 "Both vectors need to have the same size for import_elements() to work!"));
585 // TODO: a generic import_elements() function should handle any kind of
586 // data layout in ReadWriteVector, but this function is of limited use as
587 // this class will (hopefully) be retired eventually.
590
592 {
593 for (const auto idx : this->locally_owned_elements())
594 (*this)[idx] = rwv[idx];
595 }
597 {
598 for (const auto idx : this->locally_owned_elements())
599 (*this)[idx] += rwv[idx];
600 }
601 else
603
604 this->compress(operation);
605 }
606
607
608 void
610 {
611 Assert(has_ghost_elements() == false,
613 "Calling compress() is only useful if a vector "
614 "has been written into, but this is a vector with ghost "
615 "elements and consequently is read-only. It does "
616 "not make sense to call compress() for such "
617 "vectors."));
618
619 // Select which mode to send to Trilinos. Note that we use last_action if
620 // available and ignore what the user tells us to detect wrongly mixed
621 // operations. Typically given_last_action is only used on machines that
622 // do not execute an operation (because they have no own cells for
623 // example).
624 Epetra_CombineMode mode = last_action;
625 if (last_action == Zero)
626 {
628 mode = Add;
630 mode = Insert;
631 else
632 Assert(
633 false,
635 "compress() can only be called with VectorOperation add, insert, or unknown"));
636 }
637 else
638 {
639 Assert(
640 ((last_action == Add) &&
642 ((last_action == Insert) &&
645 "The last operation on the Vector and the given last action in the compress() call do not agree!"));
646 }
647
648
649# ifdef DEBUG
650 // check that every process has decided to use the same mode. This will
651 // otherwise result in undefined behavior in the call to
652 // GlobalAssemble().
653 const double double_mode = mode;
654 const Epetra_MpiComm *comm_ptr =
655 dynamic_cast<const Epetra_MpiComm *>(&(trilinos_partitioner().Comm()));
656 Assert(comm_ptr != nullptr, ExcInternalError());
657
660 Assert(result.max == result.min,
662 "Not all processors agree whether the last operation on "
663 "this vector was an addition or a set operation. This will "
664 "prevent the compress() operation from succeeding."));
665
666# endif
667
668 // Now pass over the information about what we did last to the vector.
669 if (nonlocal_vector.get() == nullptr || mode != Add)
670 {
671 const auto ierr = vector->GlobalAssemble(mode);
673 }
674 else
675 {
676 Epetra_Export exporter(nonlocal_vector->Map(), vector->Map());
677
678 int ierr = vector->Export(*nonlocal_vector, exporter, mode);
680
681 ierr = nonlocal_vector->PutScalar(0.);
683 }
685
686 compressed = true;
687 }
688
689
690
692 Vector::operator()(const size_type index) const
693 {
694 // Extract local indices in the vector.
696 static_cast<TrilinosWrappers::types::int_type>(index));
697 TrilinosScalar value = 0.;
698
699 // If the element is not present on the current processor, we can't
700 // continue. This is the main difference to the el() function.
701 if (trilinos_i == -1)
702 {
703# ifndef DEAL_II_WITH_64BIT_INDICES
704 Assert(false,
706 vector->Map().NumMyElements(),
707 vector->Map().MinMyGID(),
708 vector->Map().MaxMyGID()));
709# else
710 Assert(false,
712 vector->Map().NumMyElements(),
713 vector->Map().MinMyGID64(),
714 vector->Map().MaxMyGID64()));
715# endif
716 }
717 else
718 value = (*vector)[0][trilinos_i];
719
720 return value;
721 }
722
723
724
725 void
726 Vector::add(const Vector &v, const bool allow_different_maps)
727 {
728 if (allow_different_maps == false)
729 *this += v;
730 else
731 {
733 AssertThrow(size() == v.size(),
735
736# if DEAL_II_TRILINOS_VERSION_GTE(11, 11, 0)
737 Epetra_Import data_exchange(vector->Map(), v.vector->Map());
738 int ierr =
742# else
743 // In versions older than 11.11 the Import function is broken for
744 // adding Hence, we provide a workaround in this case
745
746 Epetra_MultiVector dummy(vector->Map(), 1, false);
747 Epetra_Import data_exchange(dummy.Map(), v.vector->Map());
748
749 int ierr = dummy.Import(*v.vector, data_exchange, Insert);
751
752 ierr = vector->Update(1.0, dummy, 1.0);
754# endif
755 }
756 }
757
758
759
760 bool
762 {
763 Assert(size() == v.size(), ExcDimensionMismatch(size(), v.size()));
764 if (vector->Map().NumMyElements() != v.vector->Map().NumMyElements())
765 return false;
766
767 size_type vector_size = vector->Map().NumMyElements();
768 for (size_type i = 0; i < vector_size; ++i)
769 if ((*(v.vector))[0][i] != (*vector)[0][i])
770 return false;
771
772 return true;
773 }
774
775
776
777 bool
779 {
780 Assert(size() == v.size(), ExcDimensionMismatch(size(), v.size()));
781
782 return (!(*this == v));
783 }
784
785
786
787 bool
789 {
790 // get a representation of the vector and
791 // loop over all the elements
792 TrilinosScalar *start_ptr = (*vector)[0];
793 const TrilinosScalar *ptr = start_ptr,
794 *eptr = start_ptr + vector->Map().NumMyElements();
795 unsigned int flag = 0;
796 while (ptr != eptr)
797 {
798 if (*ptr != 0)
799 {
800 flag = 1;
801 break;
802 }
803 ++ptr;
804 }
805
806 // in parallel, check that the vector
807 // is zero on _all_ processors.
808 const Epetra_MpiComm *mpi_comm =
809 dynamic_cast<const Epetra_MpiComm *>(&vector->Map().Comm());
810 Assert(mpi_comm != nullptr, ExcInternalError());
811 unsigned int num_nonzero = Utilities::MPI::sum(flag, mpi_comm->Comm());
812 return num_nonzero == 0;
813 }
814
815
816
817 bool
819 {
820 // get a representation of the vector and
821 // loop over all the elements
822 TrilinosScalar *start_ptr = (*vector)[0];
823 const TrilinosScalar *ptr = start_ptr,
824 *eptr = start_ptr + vector->Map().NumMyElements();
825 unsigned int flag = 0;
826 while (ptr != eptr)
827 {
828 if (*ptr < 0.0)
829 {
830 flag = 1;
831 break;
832 }
833 ++ptr;
834 }
835
836 // in parallel, check that the vector
837 // is zero on _all_ processors.
838 const auto max_n_negative =
840 return max_n_negative == 0;
841 }
842
843
844
845 void
846 Vector::print(std::ostream &out,
847 const unsigned int precision,
848 const bool scientific,
849 const bool across) const
850 {
851 AssertThrow(out.fail() == false, ExcIO());
852 boost::io::ios_flags_saver restore_flags(out);
853
854
855 out.precision(precision);
856 if (scientific)
857 out.setf(std::ios::scientific, std::ios::floatfield);
858 else
859 out.setf(std::ios::fixed, std::ios::floatfield);
860
861 size_type vector_size = vector->Map().NumMyElements();
862 if (size() != vector_size)
863 {
864 auto global_id = [&](const size_type index) {
865 return gid(vector->Map(), index);
866 };
867 out << "size:" << size()
868 << " locally_owned_size:" << vector->Map().NumMyElements() << " :"
869 << std::endl;
870 for (size_type i = 0; i < vector_size; ++i)
871 out << "[" << global_id(i) << "]: " << (*(vector))[0][i]
872 << std::endl;
873 }
874 else
875 {
876 TrilinosScalar *val;
878 int ierr = vector->ExtractView(&val, &leading_dimension);
879
881 if (across)
882 for (size_type i = 0; i < size(); ++i)
883 out << static_cast<double>(val[i]) << ' ';
884 else
885 for (size_type i = 0; i < size(); ++i)
886 out << static_cast<double>(val[i]) << std::endl;
887 out << std::endl;
888 }
889
890 AssertThrow(out.fail() == false, ExcIO());
891 }
892
893
894
895 void
896 Vector::swap(Vector &v) noexcept
897 {
898 std::swap(last_action, v.last_action);
899 std::swap(compressed, v.compressed);
900 std::swap(has_ghosts, v.has_ghosts);
901 std::swap(vector, v.vector);
902 std::swap(nonlocal_vector, v.nonlocal_vector);
903 std::swap(owned_elements, v.owned_elements);
904 }
905
906
907
908 std::size_t
910 {
911 // TODO[TH]: No accurate memory
912 // consumption for Trilinos vectors
913 // yet. This is a rough approximation with
914 // one index and the value per local
915 // entry.
916 return sizeof(*this) +
917 this->vector->Map().NumMyElements() *
918 (sizeof(double) + sizeof(TrilinosWrappers::types::int_type));
919 }
920
921 // explicit instantiations
922# ifndef DOXYGEN
923# include "trilinos_vector.inst"
924# endif
925 } // namespace MPI
926} // namespace TrilinosWrappers
927
929
930#endif // DEAL_II_WITH_TRILINOS
virtual size_type size() const override
unsigned int n_blocks() const
BlockType & block(const unsigned int i)
size_type n_elements() const
Definition index_set.h:1934
void set_size(const size_type size)
Definition index_set.h:1764
void clear()
Definition index_set.h:1752
void add_indices(const ForwardIterator &begin, const ForwardIterator &end)
Definition index_set.h:1831
size_type size() const override
const IndexSet & get_stored_elements() const
void compress(VectorOperation::values operation)
void add(const std::vector< size_type > &indices, const std::vector< TrilinosScalar > &values)
std::unique_ptr< Epetra_MultiVector > nonlocal_vector
void import_elements(const LinearAlgebra::ReadWriteVector< double > &rwv, const VectorOperation::values operation)
MPI_Comm get_mpi_communicator() const
void swap(Vector &v) noexcept
const Epetra_BlockMap & trilinos_partitioner() const
reference operator()(const size_type index)
void import_nonlocal_data_for_fe(const ::TrilinosWrappers::SparseMatrix &matrix, const Vector &vector)
std::unique_ptr< Epetra_FEVector > vector
size_type size() const override
void print(std::ostream &out, const unsigned int precision=3, const bool scientific=true, const bool across=true) const
IndexSet locally_owned_elements() const
void reinit(const Vector &v, const bool omit_zeroing_entries=false, const bool allow_different_maps=false)
std::pair< size_type, size_type > local_range() const
bool operator!=(const Vector &v) const
bool operator==(const Vector &v) const
Vector & operator=(const TrilinosScalar s)
std::size_t memory_consumption() const
const Epetra_CrsMatrix & trilinos_matrix() const
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:498
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:499
static ::ExceptionBase & ExcIO()
static ::ExceptionBase & ExcGhostsPresent()
static ::ExceptionBase & ExcNotImplemented()
static ::ExceptionBase & ExcAccessToNonLocalElement(size_type arg1, size_type arg2, size_type arg3, size_type arg4)
#define Assert(cond, exc)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcTrilinosError(int arg1)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
void swap(BlockVector &u, BlockVector &v) noexcept
TrilinosWrappers::types::int_type global_length(const Epetra_MultiVector &vector)
int gid(const Epetra_BlockMap &map, int i)
TrilinosWrappers::types::int_type * my_global_elements(const Epetra_BlockMap &map)
TrilinosWrappers::types::int64_type n_global_elements(const Epetra_BlockMap &map)
T sum(const T &t, const MPI_Comm mpi_communicator)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition mpi.cc:92
T max(const T &t, const MPI_Comm mpi_communicator)
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm mpi_communicator)
Definition mpi.cc:66
double TrilinosScalar
Definition types.h:178