16 #include <deal.II/base/index_set.h> 17 #include <deal.II/base/memory_consumption.h> 18 #include <deal.II/base/mpi.h> 22 #ifdef DEAL_II_WITH_TRILINOS 23 # ifdef DEAL_II_WITH_MPI 24 # include <Epetra_MpiComm.h> 26 # include <Epetra_Map.h> 27 # include <Epetra_SerialComm.h> 30 DEAL_II_NAMESPACE_OPEN
34 #ifdef DEAL_II_WITH_TRILINOS 39 # ifdef DEAL_II_WITH_64BIT_INDICES 43 , index_space_size(1 + map.MaxAllGID64())
44 , largest_range(
numbers::invalid_unsigned_int)
46 Assert(map.MinAllGID64() == 0,
48 "The Epetra_BlockMap does not contain the global index 0, " 49 "which means some entries are not present on any processor."));
53 add_range(size_type(map.MinMyGID64()), size_type(map.MaxMyGID64() + 1));
56 const size_type n_indices = map.NumMyElements();
58 reinterpret_cast<size_type *
>(map.MyGlobalElements64());
59 add_indices(indices, indices + n_indices);
70 , index_space_size(1 + map.MaxAllGID())
71 , largest_range(
numbers::invalid_unsigned_int)
73 Assert(map.MinAllGID() == 0,
75 "The Epetra_BlockMap does not contain the global index 0, " 76 "which means some entries are not present on any processor."));
83 const size_type n_indices = map.NumMyElements();
84 unsigned int * indices =
85 reinterpret_cast<unsigned int *
>(map.MyGlobalElements());
93 #endif // ifdef DEAL_II_WITH_TRILINOS 114 ranges.push_back(new_range);
140 std::vector<Range>::iterator store =
ranges.begin();
141 for (std::vector<Range>::iterator i =
ranges.begin(); i !=
ranges.end();)
143 std::vector<Range>::iterator next = i;
150 while (next !=
ranges.end() && (next->begin <= last_index))
152 last_index = std::max(last_index, next->end);
158 *store =
Range(first_index, last_index);
162 if (store !=
ranges.end())
164 std::vector<Range> new_ranges(
ranges.begin(), store);
169 size_type next_index = 0, largest_range_size = 0;
170 for (std::vector<Range>::iterator i =
ranges.begin(); i !=
ranges.end(); ++i)
174 i->nth_index_in_set = next_index;
175 next_index += (i->end - i->begin);
176 if (i->end - i->begin > largest_range_size)
178 largest_range_size = i->end - i->begin;
198 std::vector<Range>::const_iterator r1 =
ranges.begin(),
206 if (r1->end <= r2->begin)
208 else if (r2->end <= r1->begin)
213 Assert(((r1->begin <= r2->begin) && (r1->end > r2->begin)) ||
214 ((r2->begin <= r1->begin) && (r2->end > r1->begin)),
218 result.add_range(std::max(r1->begin, r2->begin),
219 std::min(r1->end, r2->end));
224 if (r1->end <= r2->end)
241 ExcMessage(
"End index needs to be larger or equal to begin index!"));
245 std::vector<Range>::const_iterator r1 =
ranges.begin();
247 while (r1 !=
ranges.end())
249 if ((r1->end >
begin) && (r1->begin <
end))
254 else if (r1->begin >=
end)
276 std::vector<Range> new_ranges;
278 std::vector<Range>::iterator own_it =
ranges.begin();
279 std::vector<Range>::iterator other_it = other.
ranges.begin();
281 while (own_it !=
ranges.end() && other_it != other.
ranges.end())
284 if (own_it->end <= other_it->begin)
290 if (own_it->begin >= other_it->end)
298 if (own_it->begin < other_it->begin)
300 Range r(own_it->begin, other_it->begin);
301 r.nth_index_in_set = 0;
302 new_ranges.push_back(r);
307 own_it->begin = other_it->end;
308 if (own_it->begin > own_it->end)
310 own_it->begin = own_it->end;
320 for (std::vector<Range>::iterator it =
ranges.begin(); it !=
ranges.end();)
322 if (it->begin >= it->end)
329 const std::vector<Range>::iterator
end = new_ranges.end();
330 for (std::vector<Range>::iterator it = new_ranges.begin(); it !=
end; ++it)
343 "pop_back() failed, because this IndexSet contains no entries."));
361 "pop_front() failed, because this IndexSet contains no entries."));
381 if ((
this == &other) && (offset == 0))
386 ExcIndexRangeType<size_type>(other.
ranges.back().end - 1,
393 std::vector<Range>::const_iterator r1 =
ranges.begin(),
394 r2 = other.
ranges.begin();
396 std::vector<Range> new_ranges;
403 if (r2 == other.
ranges.end() ||
404 (r1 !=
ranges.end() && r1->end < (r2->begin + offset)))
406 new_ranges.push_back(*r1);
409 else if (r1 ==
ranges.end() || (r2->end + offset) < r1->begin)
411 new_ranges.emplace_back(r2->begin + offset, r2->end + offset);
418 Range next(std::min(r1->begin, r2->begin + offset),
419 std::max(r1->end, r2->end + offset));
420 new_ranges.push_back(next);
437 out <<
size() <<
" ";
438 out <<
ranges.size() << std::endl;
439 std::vector<Range>::const_iterator r =
ranges.begin();
440 for (; r !=
ranges.end(); ++r)
442 out << r->begin <<
" " << r->end << std::endl;
454 unsigned int n_ranges;
459 for (
unsigned int i = 0; i < n_ranges; ++i)
476 std::size_t n_ranges =
ranges.size();
477 out.write(reinterpret_cast<const char *>(&n_ranges),
sizeof(n_ranges));
478 if (
ranges.empty() ==
false)
479 out.write(reinterpret_cast<const char *>(&*
ranges.begin()),
488 std::size_t n_ranges;
489 in.read(reinterpret_cast<char *>(&
size),
sizeof(
size));
490 in.read(reinterpret_cast<char *>(&n_ranges),
sizeof(n_ranges));
496 in.read(reinterpret_cast<char *>(&*
ranges.begin()),
512 for (
const auto &range :
ranges)
513 for (
size_type entry = range.begin; entry < range.end; ++entry)
514 indices.push_back(entry);
521 #ifdef DEAL_II_WITH_TRILINOS 522 # ifdef DEAL_II_TRILINOS_WITH_TPETRA 524 Tpetra::Map<int, types::global_dof_index>
525 IndexSet::make_tpetra_map(
const MPI_Comm &communicator,
526 const bool overlapping)
const 537 ExcMessage(
"You are trying to create an Tpetra::Map object " 538 "that partitions elements of an index set " 539 "between processors. However, the union of the " 540 "index sets on different processors does not " 541 "contain all indices exactly once: the sum of " 542 "the number of entries the various processors " 543 "want to store locally is " +
545 " whereas the total size of the object to be " 548 ". In other words, there are " 549 "either indices that are not spoken for " 550 "by any processor, or there are indices that are " 551 "claimed by multiple processors."));
560 return Tpetra::Map<int, types::global_dof_index>(
564 # ifdef DEAL_II_WITH_MPI 565 Teuchos::rcp(
new Teuchos::MpiComm<int>(communicator))
567 Teuchos::rcp(
new Teuchos::Comm<int>())
572 std::vector<size_type> indices;
574 std::vector<types::global_dof_index> int_indices(indices.size());
575 std::copy(indices.begin(), indices.end(), int_indices.begin());
576 const Teuchos::ArrayView<types::global_dof_index> arr_view(int_indices);
577 return Tpetra::Map<int, types::global_dof_index>(
581 # ifdef DEAL_II_WITH_MPI 582 Teuchos::rcp(
new Teuchos::MpiComm<int>(communicator))
584 Teuchos::rcp(
new Teuchos::Comm<int>())
595 const bool overlapping)
const 606 ExcMessage(
"You are trying to create an Epetra_Map object " 607 "that partitions elements of an index set " 608 "between processors. However, the union of the " 609 "index sets on different processors does not " 610 "contain all indices exactly once: the sum of " 611 "the number of entries the various processors " 612 "want to store locally is " +
614 " whereas the total size of the object to be " 617 ". In other words, there are " 618 "either indices that are not spoken for " 619 "by any processor, or there are indices that are " 620 "claimed by multiple processors."));
630 return Epetra_Map(TrilinosWrappers::types::int_type(
size()),
631 TrilinosWrappers::types::int_type(
n_elements()),
633 # ifdef DEAL_II_WITH_MPI
634 Epetra_MpiComm(communicator)
641 std::vector<size_type> indices;
644 TrilinosWrappers::types::int_type(-1),
645 TrilinosWrappers::types::int_type(
n_elements()),
647 reinterpret_cast<TrilinosWrappers::types::int_type *>(
651 # ifdef DEAL_II_WITH_MPI 652 Epetra_MpiComm(communicator)
670 if (n_global_elements !=
size())
673 #ifdef DEAL_II_WITH_MPI 675 const bool all_contiguous =
680 bool is_globally_ascending =
true;
687 const std::vector<types::global_dof_index> global_dofs =
697 for (; index < global_dofs.size(); ++index)
702 if (new_dof <= old_dof)
704 is_globally_ascending =
false;
714 int is_ascending = is_globally_ascending ? 1 : 0;
715 int ierr = MPI_Bcast(&is_ascending, 1, MPI_INT, 0, communicator);
718 return (is_ascending == 1);
721 #endif // DEAL_II_WITH_MPI 737 DEAL_II_NAMESPACE_CLOSE
Iterator lower_bound(Iterator first, Iterator last, const T &val)
ElementIterator end() const
static ::ExceptionBase & ExcIO()
void block_read(std::istream &in)
void add_indices(const ForwardIterator &begin, const ForwardIterator &end)
#define AssertThrow(cond, exc)
types::global_dof_index size_type
std::string to_string(const number value, const unsigned int digits=numbers::invalid_unsigned_int)
std::size_t memory_consumption() const
size_type index_space_size
bool is_ascending_and_one_to_one(const MPI_Comm &communicator) const
Epetra_Map make_trilinos_map(const MPI_Comm &communicator=MPI_COMM_WORLD, const bool overlapping=false) const
static ::ExceptionBase & ExcMessage(std::string arg1)
void block_write(std::ostream &out) const
std::vector< Range > ranges
T sum(const T &t, const MPI_Comm &mpi_communicator)
void subtract_set(const IndexSet &other)
#define Assert(cond, exc)
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
std::vector< T > gather(const MPI_Comm &comm, const T &object_to_send, const unsigned int root_process=0)
void fill_index_vector(std::vector< size_type > &indices) const
void add_range(const size_type begin, const size_type end)
IndexSet operator &(const IndexSet &is) const
void set_size(const size_type size)
unsigned int global_dof_index
IndexSet get_view(const size_type begin, const size_type end) const
IntervalIterator begin_intervals() const
#define AssertThrowMPI(error_code)
bool is_contiguous() const
Threads::Mutex compress_mutex
T min(const T &t, const MPI_Comm &mpi_communicator)
ElementIterator begin() const
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
void write(std::ostream &out) const
const types::global_dof_index invalid_dof_index
ElementIterator begin() const
void read(std::istream &in)
size_type n_elements() const
std::enable_if< std::is_fundamental< T >::value, std::size_t >::type memory_consumption(const T &t)
static ::ExceptionBase & ExcInternalError()