22 #ifdef DEAL_II_WITH_TRILINOS
23 # ifdef DEAL_II_WITH_MPI
24 # include <Epetra_MpiComm.h>
26 # include <Epetra_Map.h>
27 # include <Epetra_SerialComm.h>
34 #ifdef DEAL_II_WITH_TRILINOS
39 # ifdef DEAL_II_WITH_64BIT_INDICES
43 , index_space_size(1 + map.MaxAllGID64())
46 Assert(map.MinAllGID64() == 0,
48 "The Epetra_BlockMap does not contain the global index 0, "
49 "which means some entries are not present on any processor."));
56 const size_type n_indices = map.NumMyElements();
58 reinterpret_cast<size_type *
>(map.MyGlobalElements64());
59 add_indices(indices, indices + n_indices);
70 , index_space_size(1 + map.MaxAllGID())
73 Assert(map.MinAllGID() == 0,
75 "The Epetra_BlockMap does not contain the global index 0, "
76 "which means some entries are not present on any processor."));
83 const size_type n_indices = map.NumMyElements();
84 unsigned int * indices =
85 reinterpret_cast<unsigned int *
>(map.MyGlobalElements());
93 #endif // ifdef DEAL_II_WITH_TRILINOS
111 std::vector<Range>::iterator store =
ranges.begin();
112 for (std::vector<Range>::iterator i =
ranges.begin(); i !=
ranges.end();)
114 std::vector<Range>::iterator next = i;
121 while (next !=
ranges.end() && (next->begin <= last_index))
123 last_index =
std::max(last_index, next->end);
129 *store =
Range(first_index, last_index);
133 if (store !=
ranges.end())
135 std::vector<Range> new_ranges(
ranges.begin(), store);
140 size_type next_index = 0, largest_range_size = 0;
141 for (std::vector<Range>::iterator i =
ranges.begin(); i !=
ranges.end(); ++i)
145 i->nth_index_in_set = next_index;
146 next_index += (i->end - i->begin);
147 if (i->end - i->begin > largest_range_size)
149 largest_range_size = i->end - i->begin;
170 std::vector<Range>::const_iterator r1 =
ranges.begin(),
178 if (r1->end <= r2->begin)
180 else if (r2->end <= r1->begin)
185 Assert(((r1->begin <= r2->begin) && (r1->end > r2->begin)) ||
186 ((r2->begin <= r1->begin) && (r2->end > r1->begin)),
190 result.add_range(
std::max(r1->begin, r2->begin),
196 if (r1->end <= r2->end)
214 ExcMessage(
"End index needs to be larger or equal to begin index!"));
218 std::vector<Range>::const_iterator r1 =
ranges.begin();
220 while (r1 !=
ranges.end())
222 if ((r1->end >
begin) && (r1->begin <
end))
227 else if (r1->begin >=
end)
237 std::vector<IndexSet>
239 const std::vector<types::global_dof_index> &n_indices_per_block)
const
241 std::vector<IndexSet> partitioned;
242 const unsigned int n_blocks = n_indices_per_block.size();
247 for (
const auto n_block_indices : n_indices_per_block)
249 partitioned.push_back(this->
get_view(start, start + n_block_indices));
250 start += n_block_indices;
251 sum += partitioned.back().size();
267 std::vector<Range> new_ranges;
269 std::vector<Range>::iterator own_it =
ranges.begin();
270 std::vector<Range>::iterator other_it = other.
ranges.begin();
272 while (own_it !=
ranges.end() && other_it != other.
ranges.end())
275 if (own_it->end <= other_it->begin)
281 if (own_it->begin >= other_it->end)
289 if (own_it->begin < other_it->begin)
291 Range r(own_it->begin, other_it->begin);
293 new_ranges.push_back(r);
298 own_it->begin = other_it->end;
299 if (own_it->begin > own_it->end)
301 own_it->begin = own_it->end;
311 for (std::vector<Range>::iterator it =
ranges.begin(); it !=
ranges.end();)
313 if (it->begin >= it->end)
320 const std::vector<Range>::iterator
end = new_ranges.end();
321 for (std::vector<Range>::iterator it = new_ranges.begin(); it !=
end; ++it)
333 for (
const auto el : *
this)
334 set.add_indices(other, el * other.
size());
346 "pop_back() failed, because this IndexSet contains no entries."));
364 "pop_front() failed, because this IndexSet contains no entries."));
384 if ((
this == &other) && (offset == 0))
387 if (other.
ranges.size() != 0)
395 std::vector<Range>::const_iterator r1 =
ranges.begin(),
396 r2 = other.
ranges.begin();
398 std::vector<Range> new_ranges;
405 if (r2 == other.
ranges.end() ||
406 (r1 !=
ranges.end() && r1->end < (r2->begin + offset)))
408 new_ranges.push_back(*r1);
411 else if (r1 ==
ranges.end() || (r2->end + offset) < r1->begin)
413 new_ranges.emplace_back(r2->begin + offset, r2->end + offset);
421 std::max(r1->end, r2->end + offset));
422 new_ranges.push_back(next);
439 out <<
size() <<
" ";
440 out <<
ranges.size() << std::endl;
441 std::vector<Range>::const_iterator r =
ranges.begin();
442 for (; r !=
ranges.end(); ++r)
444 out << r->begin <<
" " << r->end << std::endl;
456 unsigned int n_ranges;
461 for (
unsigned int i = 0; i < n_ranges; ++i)
478 std::size_t n_ranges =
ranges.size();
479 out.write(
reinterpret_cast<const char *
>(&n_ranges),
sizeof(n_ranges));
480 if (
ranges.empty() ==
false)
481 out.write(
reinterpret_cast<const char *
>(&*
ranges.begin()),
490 std::size_t n_ranges;
491 in.read(
reinterpret_cast<char *
>(&
size),
sizeof(
size));
492 in.read(
reinterpret_cast<char *
>(&n_ranges),
sizeof(n_ranges));
498 in.read(
reinterpret_cast<char *
>(&*
ranges.begin()),
514 for (
const auto &range :
ranges)
515 for (
size_type entry = range.begin; entry < range.end; ++entry)
516 indices.push_back(entry);
523 #ifdef DEAL_II_WITH_TRILINOS
524 # ifdef DEAL_II_TRILINOS_WITH_TPETRA
526 Tpetra::Map<int, types::global_dof_index>
528 const bool overlapping)
const
539 ExcMessage(
"You are trying to create an Tpetra::Map object "
540 "that partitions elements of an index set "
541 "between processors. However, the union of the "
542 "index sets on different processors does not "
543 "contain all indices exactly once: the sum of "
544 "the number of entries the various processors "
545 "want to store locally is " +
547 " whereas the total size of the object to be "
550 ". In other words, there are "
551 "either indices that are not spoken for "
552 "by any processor, or there are indices that are "
553 "claimed by multiple processors."));
562 return Tpetra::Map<int, types::global_dof_index>(
566 # ifdef DEAL_II_WITH_MPI
567 Teuchos::rcp(
new Teuchos::MpiComm<int>(communicator))
569 Teuchos::rcp(
new Teuchos::Comm<int>())
574 std::vector<size_type> indices;
576 std::vector<types::global_dof_index> int_indices(indices.size());
577 std::copy(indices.begin(), indices.end(), int_indices.begin());
578 const Teuchos::ArrayView<types::global_dof_index> arr_view(int_indices);
579 return Tpetra::Map<int, types::global_dof_index>(
583 # ifdef DEAL_II_WITH_MPI
584 Teuchos::rcp(
new Teuchos::MpiComm<int>(communicator))
586 Teuchos::rcp(
new Teuchos::Comm<int>())
597 const bool overlapping)
const
608 ExcMessage(
"You are trying to create an Epetra_Map object "
609 "that partitions elements of an index set "
610 "between processors. However, the union of the "
611 "index sets on different processors does not "
612 "contain all indices exactly once: the sum of "
613 "the number of entries the various processors "
614 "want to store locally is " +
616 " whereas the total size of the object to be "
619 ". In other words, there are "
620 "either indices that are not spoken for "
621 "by any processor, or there are indices that are "
622 "claimed by multiple processors."));
635 # ifdef DEAL_II_WITH_MPI
636 Epetra_MpiComm(communicator)
643 std::vector<size_type> indices;
653 # ifdef DEAL_II_WITH_MPI
654 Epetra_MpiComm(communicator)
675 #ifdef DEAL_II_WITH_MPI
677 const bool all_contiguous =
682 bool is_globally_ascending =
true;
689 const std::vector<types::global_dof_index> global_dofs =
699 for (; index < global_dofs.size(); ++index)
704 if (new_dof <= old_dof)
706 is_globally_ascending =
false;
716 int is_ascending = is_globally_ascending ? 1 : 0;
717 int ierr = MPI_Bcast(&is_ascending, 1, MPI_INT, 0, communicator);
720 return (is_ascending == 1);
723 #endif // DEAL_II_WITH_MPI