22#ifdef DEAL_II_WITH_TRILINOS
23# ifdef DEAL_II_WITH_MPI
24# include <Epetra_MpiComm.h>
26# include <Epetra_Map.h>
27# include <Epetra_SerialComm.h>
34#ifdef DEAL_II_WITH_TRILINOS
39# ifdef DEAL_II_WITH_64BIT_INDICES
43 , index_space_size(1 + map.MaxAllGID64())
44 , largest_range(
numbers::invalid_unsigned_int)
46 Assert(map.MinAllGID64() == 0,
48 "The Epetra_BlockMap does not contain the global index 0, "
49 "which means some entries are not present on any processor."));
56 const size_type n_indices = map.NumMyElements();
58 reinterpret_cast<size_type *
>(map.MyGlobalElements64());
59 add_indices(indices, indices + n_indices);
70 , index_space_size(1 + map.MaxAllGID())
71 , largest_range(
numbers::invalid_unsigned_int)
73 Assert(map.MinAllGID() == 0,
75 "The Epetra_BlockMap does not contain the global index 0, "
76 "which means some entries are not present on any processor."));
83 const size_type n_indices = map.NumMyElements();
84 unsigned int * indices =
85 reinterpret_cast<unsigned int *
>(map.MyGlobalElements());
111 std::vector<Range>::iterator store =
ranges.begin();
112 for (std::vector<Range>::iterator i =
ranges.begin(); i !=
ranges.end();)
114 std::vector<Range>::iterator next = i;
121 while (next !=
ranges.end() && (next->begin <= last_index))
123 last_index =
std::max(last_index, next->end);
129 *store =
Range(first_index, last_index);
133 if (store !=
ranges.end())
135 std::vector<Range> new_ranges(
ranges.begin(), store);
140 size_type next_index = 0, largest_range_size = 0;
141 for (std::vector<Range>::iterator i =
ranges.begin(); i !=
ranges.end(); ++i)
145 i->nth_index_in_set = next_index;
146 next_index += (i->end - i->begin);
147 if (i->end - i->begin > largest_range_size)
149 largest_range_size = i->end - i->begin;
171 std::vector<Range>::const_iterator r1 =
ranges.begin(),
179 if (r1->end <= r2->begin)
181 else if (r2->end <= r1->begin)
186 Assert(((r1->begin <= r2->begin) && (r1->end > r2->begin)) ||
187 ((r2->begin <= r1->begin) && (r2->end > r1->begin)),
191 result.add_range(
std::max(r1->begin, r2->begin),
197 if (r1->end <= r2->end)
215 ExcMessage(
"End index needs to be larger or equal to begin index!"));
219 std::vector<Range>::const_iterator r1 =
ranges.begin();
221 while (r1 !=
ranges.end())
223 if ((r1->end >
begin) && (r1->begin <
end))
228 else if (r1->begin >=
end)
240 const std::vector<types::global_dof_index> &n_indices_per_block)
const
242 std::vector<IndexSet> partitioned;
243 const unsigned int n_blocks = n_indices_per_block.size();
245 partitioned.reserve(n_blocks);
247 for (
const auto n_block_indices : n_indices_per_block)
249 partitioned.push_back(this->
get_view(start, start + n_block_indices));
250 start += n_block_indices;
255 for (
const auto &partition : partitioned)
257 sum += partition.size();
275 std::vector<Range> new_ranges;
277 std::vector<Range>::iterator own_it =
ranges.begin();
278 std::vector<Range>::iterator other_it = other.
ranges.begin();
280 while (own_it !=
ranges.end() && other_it != other.
ranges.end())
283 if (own_it->end <= other_it->begin)
285 new_ranges.push_back(*own_it);
290 if (own_it->begin >= other_it->end)
298 if (own_it->begin < other_it->begin)
300 Range r(own_it->begin, other_it->begin);
302 new_ranges.push_back(r);
307 own_it->begin = other_it->end;
308 if (own_it->begin > own_it->end)
310 own_it->begin = own_it->end;
319 for (; own_it !=
ranges.end(); ++own_it)
320 new_ranges.push_back(*own_it);
325 const std::vector<Range>::iterator
end = new_ranges.end();
326 for (std::vector<Range>::iterator it = new_ranges.begin(); it !=
end; ++it)
338 for (
const auto el : *
this)
339 set.add_indices(other, el * other.
size());
351 "pop_back() failed, because this IndexSet contains no entries."));
369 "pop_front() failed, because this IndexSet contains no entries."));
389 if ((
this == &other) && (offset == 0))
392 if (other.
ranges.size() != 0)
400 std::vector<Range>::const_iterator r1 =
ranges.begin(),
401 r2 = other.
ranges.begin();
403 std::vector<Range> new_ranges;
410 if (r2 == other.
ranges.end() ||
411 (r1 !=
ranges.end() && r1->end < (r2->begin + offset)))
413 new_ranges.push_back(*r1);
416 else if (r1 ==
ranges.end() || (r2->end + offset) < r1->begin)
418 new_ranges.emplace_back(r2->begin + offset, r2->end + offset);
426 std::max(r1->end, r2->end + offset));
427 new_ranges.push_back(next);
444 out <<
size() <<
" ";
445 out <<
ranges.size() << std::endl;
446 std::vector<Range>::const_iterator r =
ranges.begin();
447 for (; r !=
ranges.end(); ++r)
449 out << r->begin <<
" " << r->end << std::endl;
461 unsigned int n_ranges;
466 for (
unsigned int i = 0; i < n_ranges; ++i)
483 std::size_t n_ranges =
ranges.size();
484 out.write(
reinterpret_cast<const char *
>(&n_ranges),
sizeof(n_ranges));
485 if (
ranges.empty() ==
false)
486 out.write(
reinterpret_cast<const char *
>(&*
ranges.begin()),
495 std::size_t n_ranges;
496 in.read(
reinterpret_cast<char *
>(&
size),
sizeof(
size));
497 in.read(
reinterpret_cast<char *
>(&n_ranges),
sizeof(n_ranges));
503 in.read(
reinterpret_cast<char *
>(&*
ranges.begin()),
519 for (
const auto &range :
ranges)
520 for (
size_type entry = range.begin; entry < range.end; ++entry)
521 indices.push_back(entry);
528#ifdef DEAL_II_WITH_TRILINOS
529# ifdef DEAL_II_TRILINOS_WITH_TPETRA
531Tpetra::Map<int, types::global_dof_index>
533 const bool overlapping)
const
544 ExcMessage(
"You are trying to create an Tpetra::Map object "
545 "that partitions elements of an index set "
546 "between processors. However, the union of the "
547 "index sets on different processors does not "
548 "contain all indices exactly once: the sum of "
549 "the number of entries the various processors "
550 "want to store locally is " +
551 std::to_string(n_global_elements) +
552 " whereas the total size of the object to be "
554 std::to_string(
size()) +
555 ". In other words, there are "
556 "either indices that are not spoken for "
557 "by any processor, or there are indices that are "
558 "claimed by multiple processors."));
567 return Tpetra::Map<int, types::global_dof_index>(
571# ifdef DEAL_II_WITH_MPI
572 Teuchos::rcp(
new Teuchos::MpiComm<int>(communicator))
574 Teuchos::rcp(
new Teuchos::Comm<int>())
579 std::vector<size_type> indices;
581 std::vector<types::global_dof_index> int_indices(indices.size());
582 std::copy(indices.begin(), indices.end(), int_indices.begin());
583 const Teuchos::ArrayView<types::global_dof_index> arr_view(int_indices);
584 return Tpetra::Map<int, types::global_dof_index>(
588# ifdef DEAL_II_WITH_MPI
589 Teuchos::rcp(
new Teuchos::MpiComm<int>(communicator))
591 Teuchos::rcp(
new Teuchos::Comm<int>())
602 const bool overlapping)
const
613 ExcMessage(
"You are trying to create an Epetra_Map object "
614 "that partitions elements of an index set "
615 "between processors. However, the union of the "
616 "index sets on different processors does not "
617 "contain all indices exactly once: the sum of "
618 "the number of entries the various processors "
619 "want to store locally is " +
620 std::to_string(n_global_elements) +
621 " whereas the total size of the object to be "
623 std::to_string(
size()) +
624 ". In other words, there are "
625 "either indices that are not spoken for "
626 "by any processor, or there are indices that are "
627 "claimed by multiple processors."));
640# ifdef DEAL_II_WITH_MPI
641 Epetra_MpiComm(communicator)
648 std::vector<size_type> indices;
658# ifdef DEAL_II_WITH_MPI
659 Epetra_MpiComm(communicator)
677 if (n_global_elements !=
size())
680 if (n_global_elements == 0)
683#ifdef DEAL_II_WITH_MPI
685 const bool all_contiguous =
690 bool is_globally_ascending =
true;
697 const std::vector<types::global_dof_index> global_dofs =
707 for (; index < global_dofs.size(); ++index)
712 if (new_dof <= old_dof)
714 is_globally_ascending =
false;
724 int is_ascending = is_globally_ascending ? 1 : 0;
725 int ierr = MPI_Bcast(&is_ascending, 1, MPI_INT, 0, communicator);
728 return (is_ascending == 1);
ElementIterator begin() const
bool is_contiguous() const
void fill_index_vector(std::vector< size_type > &indices) const
std::vector< IndexSet > split_by_block(const std::vector< types::global_dof_index > &n_indices_per_block) const
size_type n_elements() const
ElementIterator begin() const
void set_size(const size_type size)
void read(std::istream &in)
Tpetra::Map< int, types::global_dof_index > make_tpetra_map(const MPI_Comm &communicator=MPI_COMM_WORLD, const bool overlapping=false) const
IndexSet tensor_product(const IndexSet &other) const
void write(std::ostream &out) const
void block_read(std::istream &in)
IntervalIterator begin_intervals() const
std::vector< Range > ranges
void subtract_set(const IndexSet &other)
ElementIterator end() const
Threads::Mutex compress_mutex
size_type index_space_size
Epetra_Map make_trilinos_map(const MPI_Comm &communicator=MPI_COMM_WORLD, const bool overlapping=false) const
void block_write(std::ostream &out) const
IndexSet get_view(const size_type begin, const size_type end) const
void add_range(const size_type begin, const size_type end)
bool is_ascending_and_one_to_one(const MPI_Comm &communicator) const
std::size_t memory_consumption() const
types::global_dof_index size_type
void add_indices(const ForwardIterator &begin, const ForwardIterator &end)
IndexSet operator&(const IndexSet &is) const
#define DEAL_II_NAMESPACE_OPEN
#define DEAL_II_NAMESPACE_CLOSE
static ::ExceptionBase & ExcIO()
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertThrowMPI(error_code)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
types::global_dof_index size_type
std::enable_if< std::is_fundamental< T >::value, std::size_t >::type memory_consumption(const T &t)
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
T min(const T &t, const MPI_Comm &mpi_communicator)
std::vector< T > gather(const MPI_Comm &comm, const T &object_to_send, const unsigned int root_process=0)
T sum(const T &t, const MPI_Comm &mpi_communicator)
std::string compress(const std::string &input)
const types::global_dof_index invalid_dof_index
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
size_type nth_index_in_set