22#ifdef DEAL_II_WITH_TRILINOS
23# ifdef DEAL_II_WITH_MPI
24# include <Epetra_MpiComm.h>
26# include <Epetra_Map.h>
27# include <Epetra_SerialComm.h>
34#ifdef DEAL_II_WITH_TRILINOS
39# ifdef DEAL_II_WITH_64BIT_INDICES
43 , index_space_size(1 + map.MaxAllGID64())
46 Assert(map.MinAllGID64() == 0,
48 "The Epetra_BlockMap does not contain the global index 0, "
49 "which means some entries are not present on any processor."));
56 const size_type n_indices = map.NumMyElements();
58 reinterpret_cast<size_type *
>(map.MyGlobalElements64());
59 add_indices(indices, indices + n_indices);
70 , index_space_size(1 + map.MaxAllGID())
73 Assert(map.MinAllGID() == 0,
75 "The Epetra_BlockMap does not contain the global index 0, "
76 "which means some entries are not present on any processor."));
83 const size_type n_indices = map.NumMyElements();
84 unsigned int * indices =
85 reinterpret_cast<unsigned int *
>(map.MyGlobalElements());
111 std::vector<Range>::iterator store =
ranges.begin();
112 for (std::vector<Range>::iterator i =
ranges.begin(); i !=
ranges.end();)
114 std::vector<Range>::iterator next = i;
121 while (next !=
ranges.end() && (next->begin <= last_index))
123 last_index =
std::max(last_index, next->end);
129 *store =
Range(first_index, last_index);
133 if (store !=
ranges.end())
135 std::vector<Range> new_ranges(
ranges.begin(), store);
140 size_type next_index = 0, largest_range_size = 0;
141 for (std::vector<Range>::iterator i =
ranges.begin(); i !=
ranges.end(); ++i)
145 i->nth_index_in_set = next_index;
146 next_index += (i->end - i->begin);
147 if (i->end - i->begin > largest_range_size)
149 largest_range_size = i->end - i->begin;
170 std::vector<Range>::const_iterator r1 =
ranges.begin(),
178 if (r1->end <= r2->begin)
180 else if (r2->end <= r1->begin)
185 Assert(((r1->begin <= r2->begin) && (r1->end > r2->begin)) ||
186 ((r2->begin <= r1->begin) && (r2->end > r1->begin)),
190 result.add_range(
std::max(r1->begin, r2->begin),
196 if (r1->end <= r2->end)
214 ExcMessage(
"End index needs to be larger or equal to begin index!"));
218 std::vector<Range>::const_iterator r1 =
ranges.begin();
220 while (r1 !=
ranges.end())
222 if ((r1->end >
begin) && (r1->begin <
end))
227 else if (r1->begin >=
end)
239 const std::vector<types::global_dof_index> &n_indices_per_block)
const
241 std::vector<IndexSet> partitioned;
242 const unsigned int n_blocks = n_indices_per_block.size();
247 for (
const auto n_block_indices : n_indices_per_block)
249 partitioned.push_back(this->
get_view(start, start + n_block_indices));
250 start += n_block_indices;
251 sum += partitioned.back().size();
267 std::vector<Range> new_ranges;
269 std::vector<Range>::iterator own_it =
ranges.begin();
270 std::vector<Range>::iterator other_it = other.
ranges.begin();
272 while (own_it !=
ranges.end() && other_it != other.
ranges.end())
275 if (own_it->end <= other_it->begin)
281 if (own_it->begin >= other_it->end)
289 if (own_it->begin < other_it->begin)
291 Range r(own_it->begin, other_it->begin);
293 new_ranges.push_back(r);
298 own_it->begin = other_it->end;
299 if (own_it->begin > own_it->end)
301 own_it->begin = own_it->end;
311 for (std::vector<Range>::iterator it =
ranges.begin(); it !=
ranges.end();)
313 if (it->begin >= it->end)
320 const std::vector<Range>::iterator
end = new_ranges.end();
321 for (std::vector<Range>::iterator it = new_ranges.begin(); it !=
end; ++it)
333 for (
const auto el : *
this)
334 set.add_indices(other, el * other.
size());
346 "pop_back() failed, because this IndexSet contains no entries."));
364 "pop_front() failed, because this IndexSet contains no entries."));
384 if ((
this == &other) && (offset == 0))
387 if (other.
ranges.size() != 0)
395 std::vector<Range>::const_iterator r1 =
ranges.begin(),
396 r2 = other.
ranges.begin();
398 std::vector<Range> new_ranges;
405 if (r2 == other.
ranges.end() ||
406 (r1 !=
ranges.end() && r1->end < (r2->begin + offset)))
408 new_ranges.push_back(*r1);
411 else if (r1 ==
ranges.end() || (r2->end + offset) < r1->begin)
413 new_ranges.emplace_back(r2->begin + offset, r2->end + offset);
421 std::max(r1->end, r2->end + offset));
422 new_ranges.push_back(next);
439 out <<
size() <<
" ";
440 out <<
ranges.size() << std::endl;
441 std::vector<Range>::const_iterator r =
ranges.begin();
442 for (; r !=
ranges.end(); ++r)
444 out << r->begin <<
" " << r->end << std::endl;
456 unsigned int n_ranges;
461 for (
unsigned int i = 0; i < n_ranges; ++i)
478 std::size_t n_ranges =
ranges.size();
479 out.write(
reinterpret_cast<const char *
>(&n_ranges),
sizeof(n_ranges));
480 if (
ranges.empty() ==
false)
481 out.write(
reinterpret_cast<const char *
>(&*
ranges.begin()),
490 std::size_t n_ranges;
491 in.read(
reinterpret_cast<char *
>(&
size),
sizeof(
size));
492 in.read(
reinterpret_cast<char *
>(&n_ranges),
sizeof(n_ranges));
498 in.read(
reinterpret_cast<char *
>(&*
ranges.begin()),
514 for (
const auto &range :
ranges)
515 for (
size_type entry = range.begin; entry < range.end; ++entry)
516 indices.push_back(entry);
523#ifdef DEAL_II_WITH_TRILINOS
524# ifdef DEAL_II_TRILINOS_WITH_TPETRA
526Tpetra::Map<int, types::global_dof_index>
528 const bool overlapping)
const
539 ExcMessage(
"You are trying to create an Tpetra::Map object "
540 "that partitions elements of an index set "
541 "between processors. However, the union of the "
542 "index sets on different processors does not "
543 "contain all indices exactly once: the sum of "
544 "the number of entries the various processors "
545 "want to store locally is " +
547 " whereas the total size of the object to be "
550 ". In other words, there are "
551 "either indices that are not spoken for "
552 "by any processor, or there are indices that are "
553 "claimed by multiple processors."));
562 return Tpetra::Map<int, types::global_dof_index>(
566# ifdef DEAL_II_WITH_MPI
567 Teuchos::rcp(
new Teuchos::MpiComm<int>(communicator))
569 Teuchos::rcp(
new Teuchos::Comm<int>())
574 std::vector<size_type> indices;
576 std::vector<types::global_dof_index> int_indices(indices.size());
577 std::copy(indices.begin(), indices.end(), int_indices.begin());
578 const Teuchos::ArrayView<types::global_dof_index> arr_view(int_indices);
579 return Tpetra::Map<int, types::global_dof_index>(
583# ifdef DEAL_II_WITH_MPI
584 Teuchos::rcp(
new Teuchos::MpiComm<int>(communicator))
586 Teuchos::rcp(
new Teuchos::Comm<int>())
597 const bool overlapping)
const
608 ExcMessage(
"You are trying to create an Epetra_Map object "
609 "that partitions elements of an index set "
610 "between processors. However, the union of the "
611 "index sets on different processors does not "
612 "contain all indices exactly once: the sum of "
613 "the number of entries the various processors "
614 "want to store locally is " +
616 " whereas the total size of the object to be "
619 ". In other words, there are "
620 "either indices that are not spoken for "
621 "by any processor, or there are indices that are "
622 "claimed by multiple processors."));
635# ifdef DEAL_II_WITH_MPI
636 Epetra_MpiComm(communicator)
643 std::vector<size_type> indices;
653# ifdef DEAL_II_WITH_MPI
654 Epetra_MpiComm(communicator)
678#ifdef DEAL_II_WITH_MPI
680 const bool all_contiguous =
685 bool is_globally_ascending =
true;
692 const std::vector<types::global_dof_index> global_dofs =
702 for (; index < global_dofs.size(); ++index)
707 if (new_dof <= old_dof)
709 is_globally_ascending =
false;
719 int is_ascending = is_globally_ascending ? 1 : 0;
720 int ierr = MPI_Bcast(&is_ascending, 1, MPI_INT, 0, communicator);
723 return (is_ascending == 1);
ElementIterator begin() const
bool is_contiguous() const
void fill_index_vector(std::vector< size_type > &indices) const
std::vector< IndexSet > split_by_block(const std::vector< types::global_dof_index > &n_indices_per_block) const
size_type n_elements() const
ElementIterator begin() const
void set_size(const size_type size)
void read(std::istream &in)
Tpetra::Map< int, types::global_dof_index > make_tpetra_map(const MPI_Comm &communicator=MPI_COMM_WORLD, const bool overlapping=false) const
IndexSet tensor_product(const IndexSet &other) const
void write(std::ostream &out) const
void block_read(std::istream &in)
IntervalIterator begin_intervals() const
std::vector< Range > ranges
void subtract_set(const IndexSet &other)
ElementIterator end() const
Threads::Mutex compress_mutex
size_type index_space_size
Epetra_Map make_trilinos_map(const MPI_Comm &communicator=MPI_COMM_WORLD, const bool overlapping=false) const
void block_write(std::ostream &out) const
IndexSet get_view(const size_type begin, const size_type end) const
void add_range(const size_type begin, const size_type end)
bool is_ascending_and_one_to_one(const MPI_Comm &communicator) const
std::size_t memory_consumption() const
types::global_dof_index size_type
void add_indices(const ForwardIterator &begin, const ForwardIterator &end)
IndexSet operator&(const IndexSet &is) const
#define DEAL_II_NAMESPACE_OPEN
#define DEAL_II_NAMESPACE_CLOSE
__global__ void set(Number *val, const Number s, const size_type N)
static ::ExceptionBase & ExcIO()
#define Assert(cond, exc)
std::string to_string(const T &t)
#define AssertDimension(dim1, dim2)
#define AssertThrowMPI(error_code)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
types::global_dof_index size_type
std::enable_if< IsBlockVector< VectorType >::value, unsignedint >::type n_blocks(const VectorType &vector)
std::enable_if< std::is_fundamental< T >::value, std::size_t >::type memory_consumption(const T &t)
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > b(const Tensor< 2, dim, Number > &F)
VectorType::value_type * end(VectorType &V)
VectorType::value_type * begin(VectorType &V)
TrilinosWrappers::types::int_type n_global_elements(const Epetra_BlockMap &map)
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
T min(const T &t, const MPI_Comm &mpi_communicator)
std::vector< T > gather(const MPI_Comm &comm, const T &object_to_send, const unsigned int root_process=0)
T sum(const T &t, const MPI_Comm &mpi_communicator)
std::string compress(const std::string &input)
void copy(const T *begin, const T *end, U *dest)
static const unsigned int invalid_unsigned_int
const types::global_dof_index invalid_dof_index
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
size_type nth_index_in_set