Reference documentation for deal.II version 9.3.3
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
trilinos_vector.cc
Go to the documentation of this file.
1// ---------------------------------------------------------------------
2//
3// Copyright (C) 2008 - 2020 by the deal.II authors
4//
5// This file is part of the deal.II library.
6//
7// The deal.II library is free software; you can use it, redistribute
8// it, and/or modify it under the terms of the GNU Lesser General
9// Public License as published by the Free Software Foundation; either
10// version 2.1 of the License, or (at your option) any later version.
11// The full text of the license can be found in the file LICENSE.md at
12// the top level directory of deal.II.
13//
14// ---------------------------------------------------------------------
15
17
18#ifdef DEAL_II_WITH_TRILINOS
19
20# include <deal.II/base/mpi.h>
21
26
27# include <boost/io/ios_state.hpp>
28
29# include <Epetra_Export.h>
30# include <Epetra_Import.h>
31# include <Epetra_Vector.h>
32
33# include <cmath>
34# include <memory>
35
36
38
39namespace TrilinosWrappers
40{
41# ifndef DOXYGEN
42 namespace internal
43 {
44 VectorReference::operator TrilinosScalar() const
45 {
46 AssertIndexRange(index, vector.size());
47
48 // Trilinos allows for vectors to be referenced by the [] or ()
49 // operators but only () checks index bounds. We check these bounds by
50 // ourselves, so we can use []. Note that we can only get local values.
51
52 const TrilinosWrappers::types::int_type local_index =
53 vector.vector->Map().LID(
54 static_cast<TrilinosWrappers::types::int_type>(index));
55 Assert(local_index >= 0,
57 index,
58 vector.local_size(),
59 vector.vector->Map().MinMyGID(),
60 vector.vector->Map().MaxMyGID()));
61
62
63 return (*(vector.vector))[0][local_index];
64 }
65 } // namespace internal
66# endif
67
68 namespace MPI
69 {
71 : Subscriptor()
72 , last_action(Zero)
73 , compressed(true)
74 , has_ghosts(false)
75 , vector(new Epetra_FEVector(
76 Epetra_Map(0, 0, 0, Utilities::Trilinos::comm_self())))
77 {}
78
79
80
81 Vector::Vector(const IndexSet &parallel_partitioning,
82 const MPI_Comm &communicator)
83 : Vector()
84 {
85 reinit(parallel_partitioning, communicator);
86 }
87
88
89
91 : Vector()
92 {
94 vector = std::make_unique<Epetra_FEVector>(*v.vector);
96 }
97
98
99
100 Vector::Vector(Vector &&v) noexcept
101 : Vector()
102 {
103 // initialize a minimal, valid object and swap
104 swap(v);
105 }
106
107
108
109 Vector::Vector(const IndexSet &parallel_partitioner,
110 const Vector & v,
111 const MPI_Comm &communicator)
112 : Vector()
113 {
114 AssertThrow(parallel_partitioner.size() ==
115 static_cast<size_type>(
117 ExcDimensionMismatch(parallel_partitioner.size(),
119 v.vector->Map())));
120
121 vector = std::make_unique<Epetra_FEVector>(
122 parallel_partitioner.make_trilinos_map(communicator, true));
123 reinit(v, false, true);
124 }
125
126
127
129 const IndexSet &ghost,
130 const MPI_Comm &communicator)
131 : Vector()
132 {
133 reinit(local, ghost, communicator, false);
134 }
135
136
137
138 void
140 {
141 // When we clear the vector, reset the pointer and generate an empty
142 // vector.
143# ifdef DEAL_II_WITH_MPI
144 Epetra_Map map(0, 0, Epetra_MpiComm(MPI_COMM_SELF));
145# else
146 Epetra_Map map(0, 0, Epetra_SerialComm());
147# endif
148
149 has_ghosts = false;
150 vector = std::make_unique<Epetra_FEVector>(map);
151 last_action = Zero;
152 }
153
154
155
156 void
157 Vector::reinit(const IndexSet &parallel_partitioner,
158 const MPI_Comm &communicator,
159 const bool /*omit_zeroing_entries*/)
160 {
161 nonlocal_vector.reset();
162
163 const bool overlapping =
164 !parallel_partitioner.is_ascending_and_one_to_one(communicator);
165
166 Epetra_Map map =
167 parallel_partitioner.make_trilinos_map(communicator, overlapping);
168
169 vector = std::make_unique<Epetra_FEVector>(map);
170
171 has_ghosts = vector->Map().UniqueGIDs() == false;
172
173 // If the IndexSets are overlapping, we don't really know
174 // which process owns what. So we decide that no process
175 // owns anything in that case. In particular asking for
176 // the locally owned elements is not allowed.
177 if (has_ghosts)
178 {
181 }
182 else
183 owned_elements = parallel_partitioner;
184
185# ifdef DEBUG
186 const size_type n_elements_global =
188
189 Assert(has_ghosts || n_elements_global == size(), ExcInternalError());
190# endif
191
192 last_action = Zero;
193 }
194
195
196
197 void
199 const bool omit_zeroing_entries,
200 const bool allow_different_maps)
201 {
202 nonlocal_vector.reset();
203
204 // In case we do not allow to have different maps, this call means that
205 // we have to reset the vector. So clear the vector, initialize our map
206 // with the map in v, and generate the vector.
207 if (allow_different_maps == false)
208 {
209 // check equality for MPI communicators: We can only choose the fast
210 // version in case the underlying Epetra_MpiComm object is the same,
211 // otherwise we might access an MPI_Comm object that has been
212 // deleted
213# ifdef DEAL_II_WITH_MPI
214 const Epetra_MpiComm *my_comm =
215 dynamic_cast<const Epetra_MpiComm *>(&vector->Comm());
216 const Epetra_MpiComm *v_comm =
217 dynamic_cast<const Epetra_MpiComm *>(&v.vector->Comm());
218 const bool same_communicators =
219 my_comm != nullptr && v_comm != nullptr &&
220 my_comm->DataPtr() == v_comm->DataPtr();
221# else
222 const bool same_communicators = true;
223# endif
224 if (!same_communicators ||
225 vector->Map().SameAs(v.vector->Map()) == false)
226 {
227 vector = std::make_unique<Epetra_FEVector>(v.vector->Map());
229 last_action = Zero;
231 }
232 else if (omit_zeroing_entries == false)
233 {
234 // old and new vectors have exactly the same map, i.e. size and
235 // parallel distribution
236 int ierr;
237 ierr = vector->GlobalAssemble(last_action);
238 (void)ierr;
239 Assert(ierr == 0, ExcTrilinosError(ierr));
240
241 ierr = vector->PutScalar(0.0);
242 Assert(ierr == 0, ExcTrilinosError(ierr));
243
244 last_action = Zero;
245 }
246 }
247
248 // Otherwise, we have to check that the two vectors are already of the
249 // same size, create an object for the data exchange and then insert all
250 // the data. The first assertion is only a check whether the user knows
251 // what they are doing.
252 else
253 {
254 Assert(omit_zeroing_entries == false,
256 "It is not possible to exchange data with the "
257 "option 'omit_zeroing_entries' set, which would not write "
258 "elements."));
259
260 AssertThrow(size() == v.size(),
262
263 Epetra_Import data_exchange(vector->Map(), v.vector->Map());
264
265 const int ierr = vector->Import(*v.vector, data_exchange, Insert);
266 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
267
268 last_action = Insert;
269 }
270# if defined(DEBUG) && defined(DEAL_II_WITH_MPI)
271 const Epetra_MpiComm *comm_ptr =
272 dynamic_cast<const Epetra_MpiComm *>(&(v.vector->Comm()));
273 Assert(comm_ptr != nullptr, ExcInternalError());
274 const size_type n_elements_global =
275 Utilities::MPI::sum(owned_elements.n_elements(), comm_ptr->Comm());
276 Assert(has_ghosts || n_elements_global == size(), ExcInternalError());
277# endif
278 }
279
280
281
282 void
283 Vector::reinit(const MPI::BlockVector &v, const bool import_data)
284 {
285 nonlocal_vector.reset();
288
289 // In case we do not allow to have different maps, this call means that
290 // we have to reset the vector. So clear the vector, initialize our map
291 // with the map in v, and generate the vector.
292 if (v.n_blocks() == 0)
293 return;
294
295 // create a vector that holds all the elements contained in the block
296 // vector. need to manually create an Epetra_Map.
297 size_type n_elements = 0, added_elements = 0, block_offset = 0;
298 for (size_type block = 0; block < v.n_blocks(); ++block)
299 n_elements += v.block(block).local_size();
300 std::vector<TrilinosWrappers::types::int_type> global_ids(n_elements, -1);
301 for (size_type block = 0; block < v.n_blocks(); ++block)
302 {
303 TrilinosWrappers::types::int_type *glob_elements =
305 v.block(block).trilinos_partitioner());
306 for (size_type i = 0; i < v.block(block).local_size(); ++i)
307 global_ids[added_elements++] = glob_elements[i] + block_offset;
308 owned_elements.add_indices(v.block(block).owned_elements,
309 block_offset);
310 block_offset += v.block(block).size();
311 }
312
313 Assert(n_elements == added_elements, ExcInternalError());
314 Epetra_Map new_map(v.size(),
315 n_elements,
316 global_ids.data(),
317 0,
318 v.block(0).trilinos_partitioner().Comm());
319
320 auto actual_vec = std::make_unique<Epetra_FEVector>(new_map);
321
322 TrilinosScalar *entries = (*actual_vec)[0];
323 for (size_type block = 0; block < v.n_blocks(); ++block)
324 {
325 v.block(block).trilinos_vector().ExtractCopy(entries, 0);
326 entries += v.block(block).local_size();
327 }
328
329 if (import_data == true)
330 {
332 *actual_vec)) == v.size(),
334 *actual_vec),
335 v.size()));
336
337 Epetra_Import data_exchange(vector->Map(), actual_vec->Map());
338
339 const int ierr = vector->Import(*actual_vec, data_exchange, Insert);
340 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
341
342 last_action = Insert;
343 }
344 else
345 vector = std::move(actual_vec);
346# if defined(DEBUG) && defined(DEAL_II_WITH_MPI)
347 const Epetra_MpiComm *comm_ptr =
348 dynamic_cast<const Epetra_MpiComm *>(&(vector->Comm()));
349 Assert(comm_ptr != nullptr, ExcInternalError());
350 const size_type n_elements_global =
351 Utilities::MPI::sum(owned_elements.n_elements(), comm_ptr->Comm());
352
353 Assert(has_ghosts || n_elements_global == size(), ExcInternalError());
354# endif
355 }
356
357
358
359 void
360 Vector::reinit(const IndexSet &locally_owned_entries,
361 const IndexSet &ghost_entries,
362 const MPI_Comm &communicator,
363 const bool vector_writable)
364 {
365 nonlocal_vector.reset();
366 owned_elements = locally_owned_entries;
367 if (vector_writable == false)
368 {
369 IndexSet parallel_partitioner = locally_owned_entries;
370 parallel_partitioner.add_indices(ghost_entries);
371 Epetra_Map map =
372 parallel_partitioner.make_trilinos_map(communicator, true);
373 vector = std::make_unique<Epetra_FEVector>(map);
374 }
375 else
376 {
377 Epetra_Map map =
378 locally_owned_entries.make_trilinos_map(communicator, true);
379 Assert(map.IsOneToOne(),
380 ExcMessage("A writable vector must not have ghost entries in "
381 "its parallel partitioning"));
382
383 if (vector->Map().SameAs(map) == false)
384 vector = std::make_unique<Epetra_FEVector>(map);
385 else
386 {
387 const int ierr = vector->PutScalar(0.);
388 (void)ierr;
389 Assert(ierr == 0, ExcTrilinosError(ierr));
390 }
391
392 IndexSet nonlocal_entries(ghost_entries);
393 nonlocal_entries.subtract_set(locally_owned_entries);
394 if (Utilities::MPI::n_mpi_processes(communicator) > 1)
395 {
396 Epetra_Map nonlocal_map =
397 nonlocal_entries.make_trilinos_map(communicator, true);
399 std::make_unique<Epetra_MultiVector>(nonlocal_map, 1);
400 }
401 }
402
403 has_ghosts = vector->Map().UniqueGIDs() == false;
404
405 last_action = Zero;
406
407# ifdef DEBUG
408 const size_type n_elements_global =
410
411 Assert(has_ghosts || n_elements_global == size(), ExcInternalError());
412# endif
413 }
414
415
416
417 Vector &
419 {
420 Assert(vector.get() != nullptr,
421 ExcMessage("Vector is not constructed properly."));
422
423 // check equality for MPI communicators to avoid accessing a possibly
424 // invalid MPI_Comm object
425# ifdef DEAL_II_WITH_MPI
426 const Epetra_MpiComm *my_comm =
427 dynamic_cast<const Epetra_MpiComm *>(&vector->Comm());
428 const Epetra_MpiComm *v_comm =
429 dynamic_cast<const Epetra_MpiComm *>(&v.vector->Comm());
430 const bool same_communicators = my_comm != nullptr && v_comm != nullptr &&
431 my_comm->DataPtr() == v_comm->DataPtr();
432 // Need to ask MPI whether the communicators are the same. We would like
433 // to use the following checks but currently we cannot make sure the
434 // memory of my_comm is not stale from some MPI_Comm_free
435 // somewhere. This can happen when a vector lives in GrowingVectorMemory
436 // data structures. Thus, the following code is commented out.
437 //
438 // if (my_comm != nullptr &&
439 // v_comm != nullptr &&
440 // my_comm->DataPtr() != v_comm->DataPtr())
441 // {
442 // int communicators_same = 0;
443 // const int ierr = MPI_Comm_compare (my_comm->GetMpiComm(),
444 // v_comm->GetMpiComm(),
445 // &communicators_same);
446 // AssertThrowMPI(ierr);
447 // if (!(communicators_same == MPI_IDENT ||
448 // communicators_same == MPI_CONGRUENT))
449 // same_communicators = false;
450 // else
451 // same_communicators = true;
452 // }
453# else
454 const bool same_communicators = true;
455# endif
456
457 // distinguish three cases. First case: both vectors have the same
458 // layout (just need to copy the local data, not reset the memory and
459 // the underlying Epetra_Map). The third case means that we have to
460 // rebuild the calling vector.
461 if (same_communicators && v.vector->Map().SameAs(vector->Map()))
462 {
463 *vector = *v.vector;
464 if (v.nonlocal_vector.get() != nullptr)
466 std::make_unique<Epetra_MultiVector>(v.nonlocal_vector->Map(), 1);
467 last_action = Zero;
468 }
469 // Second case: vectors have the same global
470 // size, but different parallel layouts (and
471 // one of them a one-to-one mapping). Then we
472 // can call the import/export functionality.
473 else if (size() == v.size() &&
474 (v.vector->Map().UniqueGIDs() || vector->Map().UniqueGIDs()))
475 {
476 reinit(v, false, true);
477 }
478 // Third case: Vectors do not have the same
479 // size.
480 else
481 {
482 vector = std::make_unique<Epetra_FEVector>(*v.vector);
483 last_action = Zero;
486 }
487
488 if (v.nonlocal_vector.get() != nullptr)
490 std::make_unique<Epetra_MultiVector>(v.nonlocal_vector->Map(), 1);
491
492 return *this;
493 }
494
495
496
497 Vector &
499 {
500 swap(v);
501 return *this;
502 }
503
504
505
506 template <typename number>
507 Vector &
508 Vector::operator=(const ::Vector<number> &v)
509 {
510 Assert(size() == v.size(), ExcDimensionMismatch(size(), v.size()));
511
512 // this is probably not very efficient but works. in particular, we could
513 // do better if we know that number==TrilinosScalar because then we could
514 // elide the copying of elements
515 //
516 // let's hope this isn't a particularly frequent operation
517 std::pair<size_type, size_type> local_range = this->local_range();
518 for (size_type i = local_range.first; i < local_range.second; ++i)
519 (*vector)[0][i - local_range.first] = v(i);
520
521 return *this;
522 }
523
524
525
526 void
528 const Vector & v)
529 {
530 Assert(m.trilinos_matrix().Filled() == true,
531 ExcMessage("Matrix is not compressed. "
532 "Cannot find exchange information!"));
533 Assert(v.vector->Map().UniqueGIDs() == true,
534 ExcMessage("The input vector has overlapping data, "
535 "which is not allowed."));
536
537 if (vector->Map().SameAs(m.trilinos_matrix().ColMap()) == false)
538 vector =
539 std::make_unique<Epetra_FEVector>(m.trilinos_matrix().ColMap());
540
541 Epetra_Import data_exchange(vector->Map(), v.vector->Map());
542 const int ierr = vector->Import(*v.vector, data_exchange, Insert);
543
544 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
545
546 last_action = Insert;
547 }
548
549
550 void
552 const VectorOperation::values operation)
553 {
554 Assert(
555 this->size() == rwv.size(),
557 "Both vectors need to have the same size for import() to work!"));
558 // TODO: a generic import() function should handle any kind of data layout
559 // in ReadWriteVector, but this function is of limited use as this class
560 // will (hopefully) be retired eventually.
563
564 if (operation == VectorOperation::insert)
565 {
566 for (const auto idx : this->locally_owned_elements())
567 (*this)[idx] = rwv[idx];
568 }
569 else if (operation == VectorOperation::add)
570 {
571 for (const auto idx : this->locally_owned_elements())
572 (*this)[idx] += rwv[idx];
573 }
574 else
576
577 this->compress(operation);
578 }
579
580
581 void
583 {
584 // Select which mode to send to Trilinos. Note that we use last_action if
585 // available and ignore what the user tells us to detect wrongly mixed
586 // operations. Typically given_last_action is only used on machines that
587 // do not execute an operation (because they have no own cells for
588 // example).
589 Epetra_CombineMode mode = last_action;
590 if (last_action == Zero)
591 {
592 if (given_last_action == ::VectorOperation::add)
593 mode = Add;
594 else if (given_last_action == ::VectorOperation::insert)
595 mode = Insert;
596 else
597 Assert(
598 false,
600 "compress() can only be called with VectorOperation add, insert, or unknown"));
601 }
602 else
603 {
604 Assert(
605 ((last_action == Add) &&
606 (given_last_action == ::VectorOperation::add)) ||
607 ((last_action == Insert) &&
608 (given_last_action == ::VectorOperation::insert)),
610 "The last operation on the Vector and the given last action in the compress() call do not agree!"));
611 }
612
613
614# ifdef DEBUG
615# ifdef DEAL_II_WITH_MPI
616 // check that every process has decided to use the same mode. This will
617 // otherwise result in undefined behavior in the call to
618 // GlobalAssemble().
619 double double_mode = mode;
620 const Epetra_MpiComm *comm_ptr =
621 dynamic_cast<const Epetra_MpiComm *>(&(trilinos_partitioner().Comm()));
622 Assert(comm_ptr != nullptr, ExcInternalError());
624 Utilities::MPI::min_max_avg(double_mode, comm_ptr->GetMpiComm());
625 Assert(result.max - result.min < 1e-5,
627 "Not all processors agree whether the last operation on "
628 "this vector was an addition or a set operation. This will "
629 "prevent the compress() operation from succeeding."));
630
631# endif
632# endif
633
634 // Now pass over the information about what we did last to the vector.
635 int ierr = 0;
636 if (nonlocal_vector.get() == nullptr || mode != Add)
637 ierr = vector->GlobalAssemble(mode);
638 else
639 {
640 Epetra_Export exporter(nonlocal_vector->Map(), vector->Map());
641 ierr = vector->Export(*nonlocal_vector, exporter, mode);
642 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
643 ierr = nonlocal_vector->PutScalar(0.);
644 }
645 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
646 last_action = Zero;
647
648 compressed = true;
649 }
650
651
652
654 Vector::operator()(const size_type index) const
655 {
656 // Extract local indices in the vector.
657 TrilinosWrappers::types::int_type trilinos_i = vector->Map().LID(
658 static_cast<TrilinosWrappers::types::int_type>(index));
659 TrilinosScalar value = 0.;
660
661 // If the element is not present on the current processor, we can't
662 // continue. This is the main difference to the el() function.
663 if (trilinos_i == -1)
664 {
665 Assert(false,
667 local_size(),
668 vector->Map().MinMyGID(),
669 vector->Map().MaxMyGID()));
670 }
671 else
672 value = (*vector)[0][trilinos_i];
673
674 return value;
675 }
676
677
678
679 void
680 Vector::add(const Vector &v, const bool allow_different_maps)
681 {
682 if (allow_different_maps == false)
683 *this += v;
684 else
685 {
687 AssertThrow(size() == v.size(),
689
690# if DEAL_II_TRILINOS_VERSION_GTE(11, 11, 0)
691 Epetra_Import data_exchange(vector->Map(), v.vector->Map());
692 int ierr =
693 vector->Import(*v.vector, data_exchange, Epetra_AddLocalAlso);
694 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
695 last_action = Add;
696# else
697 // In versions older than 11.11 the Import function is broken for
698 // adding Hence, we provide a workaround in this case
699
700 Epetra_MultiVector dummy(vector->Map(), 1, false);
701 Epetra_Import data_exchange(dummy.Map(), v.vector->Map());
702
703 int ierr = dummy.Import(*v.vector, data_exchange, Insert);
704 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
705
706 ierr = vector->Update(1.0, dummy, 1.0);
707 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
708# endif
709 }
710 }
711
712
713
714 bool
716 {
717 Assert(size() == v.size(), ExcDimensionMismatch(size(), v.size()));
718 if (local_size() != v.local_size())
719 return false;
720
721 size_type i;
722 for (i = 0; i < local_size(); ++i)
723 if ((*(v.vector))[0][i] != (*vector)[0][i])
724 return false;
725
726 return true;
727 }
728
729
730
731 bool
733 {
734 Assert(size() == v.size(), ExcDimensionMismatch(size(), v.size()));
735
736 return (!(*this == v));
737 }
738
739
740
741 bool
743 {
744 // get a representation of the vector and
745 // loop over all the elements
746 TrilinosScalar * start_ptr = (*vector)[0];
747 const TrilinosScalar *ptr = start_ptr, *eptr = start_ptr + local_size();
748 unsigned int flag = 0;
749 while (ptr != eptr)
750 {
751 if (*ptr != 0)
752 {
753 flag = 1;
754 break;
755 }
756 ++ptr;
757 }
758
759# ifdef DEAL_II_WITH_MPI
760 // in parallel, check that the vector
761 // is zero on _all_ processors.
762 const Epetra_MpiComm *mpi_comm =
763 dynamic_cast<const Epetra_MpiComm *>(&vector->Map().Comm());
764 Assert(mpi_comm != nullptr, ExcInternalError());
765 unsigned int num_nonzero = Utilities::MPI::sum(flag, mpi_comm->Comm());
766 return num_nonzero == 0;
767# else
768 return flag == 0;
769# endif
770 }
771
772
773
774 bool
776 {
777# ifdef DEAL_II_WITH_MPI
778 // if this vector is a parallel one, then
779 // we need to communicate to determine
780 // the answer to the current
781 // function. this still has to be
782 // implemented
784# endif
785 // get a representation of the vector and
786 // loop over all the elements
787 TrilinosScalar *start_ptr;
788 int leading_dimension;
789 int ierr = vector->ExtractView(&start_ptr, &leading_dimension);
790 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
791
792 // TODO: This
793 // won't work in parallel like
794 // this. Find out a better way to
795 // this in that case.
796 const TrilinosScalar *ptr = start_ptr, *eptr = start_ptr + size();
797 bool flag = true;
798 while (ptr != eptr)
799 {
800 if (*ptr < 0.0)
801 {
802 flag = false;
803 break;
804 }
805 ++ptr;
806 }
807
808 return flag;
809 }
810
811
812
813 void
814 Vector::print(std::ostream & out,
815 const unsigned int precision,
816 const bool scientific,
817 const bool across) const
818 {
819 AssertThrow(out, ExcIO());
820 boost::io::ios_flags_saver restore_flags(out);
821
822
823 out.precision(precision);
824 if (scientific)
825 out.setf(std::ios::scientific, std::ios::floatfield);
826 else
827 out.setf(std::ios::fixed, std::ios::floatfield);
828
829 if (size() != local_size())
830 {
831 auto global_id = [&](const size_type index) {
832 return gid(vector->Map(), index);
833 };
834 out << "size:" << size() << " local_size:" << local_size() << " :"
835 << std::endl;
836 for (size_type i = 0; i < local_size(); ++i)
837 out << "[" << global_id(i) << "]: " << (*(vector))[0][i]
838 << std::endl;
839 }
840 else
841 {
842 TrilinosScalar *val;
843 int leading_dimension;
844 int ierr = vector->ExtractView(&val, &leading_dimension);
845
846 AssertThrow(ierr == 0, ExcTrilinosError(ierr));
847 if (across)
848 for (size_type i = 0; i < size(); ++i)
849 out << static_cast<double>(val[i]) << ' ';
850 else
851 for (size_type i = 0; i < size(); ++i)
852 out << static_cast<double>(val[i]) << std::endl;
853 out << std::endl;
854 }
855
856 AssertThrow(out, ExcIO());
857 }
858
859
860
861 void
863 {
870 }
871
872
873
874 std::size_t
876 {
877 // TODO[TH]: No accurate memory
878 // consumption for Trilinos vectors
879 // yet. This is a rough approximation with
880 // one index and the value per local
881 // entry.
882 return sizeof(*this) +
883 this->local_size() *
884 (sizeof(double) + sizeof(TrilinosWrappers::types::int_type));
885 }
886
887 // explicit instantiations
888# ifndef DOXYGEN
889# include "trilinos_vector.inst"
890# endif
891 } // namespace MPI
892} // namespace TrilinosWrappers
893
895
896#endif // DEAL_II_WITH_TRILINOS
size_type size() const
Definition: index_set.h:1634
size_type n_elements() const
Definition: index_set.h:1832
void set_size(const size_type size)
Definition: index_set.h:1622
void clear()
Definition: index_set.h:1610
void subtract_set(const IndexSet &other)
Definition: index_set.cc:258
Epetra_Map make_trilinos_map(const MPI_Comm &communicator=MPI_COMM_WORLD, const bool overlapping=false) const
Definition: index_set.cc:596
bool is_ascending_and_one_to_one(const MPI_Comm &communicator) const
Definition: index_set.cc:666
void add_indices(const ForwardIterator &begin, const ForwardIterator &end)
Definition: index_set.h:1703
const Epetra_CrsMatrix & trilinos_matrix() const
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:402
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:403
static ::ExceptionBase & ExcIO()
static ::ExceptionBase & ExcGhostsPresent()
unsigned int n_blocks() const
static ::ExceptionBase & ExcNotImplemented()
std::size_t size() const
#define Assert(cond, exc)
Definition: exceptions.h:1465
#define AssertIndexRange(index, range)
Definition: exceptions.h:1690
static ::ExceptionBase & ExcInternalError()
BlockType & block(const unsigned int i)
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
Definition: exceptions.h:1575
void add(const std::vector< size_type > &indices, const std::vector< TrilinosScalar > &values)
std::unique_ptr< Epetra_MultiVector > nonlocal_vector
const Epetra_BlockMap & trilinos_partitioner() const
int gid(const Epetra_BlockMap &map, int i)
reference operator()(const size_type index)
void import_nonlocal_data_for_fe(const ::TrilinosWrappers::SparseMatrix &matrix, const Vector &vector)
std::unique_ptr< Epetra_FEVector > vector
size_type local_size() const
void print(std::ostream &out, const unsigned int precision=3, const bool scientific=true, const bool across=true) const
IndexSet locally_owned_elements() const
void swap(BlockVector &u, BlockVector &v)
static ::ExceptionBase & ExcAccessToNonLocalElement(size_type arg1, size_type arg2, size_type arg3, size_type arg4)
void reinit(const Vector &v, const bool omit_zeroing_entries=false, const bool allow_different_maps=false)
std::pair< size_type, size_type > local_range() const
void compress(::VectorOperation::values operation)
bool operator!=(const Vector &v) const
bool operator==(const Vector &v) const
static ::ExceptionBase & ExcTrilinosError(int arg1)
Vector & operator=(const TrilinosScalar s)
void import(const LinearAlgebra::ReadWriteVector< double > &rwv, const VectorOperation::values operation)
std::size_t memory_consumption() const
const IndexSet & get_stored_elements() const
void swap(MemorySpaceData< Number, MemorySpace > &, MemorySpaceData< Number, MemorySpace > &)
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
TrilinosWrappers::types::int_type global_length(const Epetra_MultiVector &vector)
TrilinosWrappers::types::int_type n_global_elements(const Epetra_BlockMap &map)
TrilinosWrappers::types::int_type * my_global_elements(const Epetra_BlockMap &map)
T sum(const T &t, const MPI_Comm &mpi_communicator)
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:117
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
Definition: mpi.cc:91
const Epetra_Comm & comm_self()
Definition: utilities.cc:1113
double TrilinosScalar
Definition: types.h:163