Processing math: 0%
 deal.II version GIT relicensing-3087-ga35b476a78 2025-04-19 20:30:01+00:00
\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}} \newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=} \newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]} \newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages Concepts
index_set.cc
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2009 - 2024 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
17#include <deal.II/base/mpi.h>
18
21
22#include <boost/container/small_vector.hpp>
23
24#include <vector>
25
26#ifdef DEAL_II_WITH_TRILINOS
27# ifdef DEAL_II_WITH_MPI
28# include <Epetra_MpiComm.h>
29# endif
30# include <Epetra_Map.h>
31# include <Epetra_SerialComm.h>
32# ifdef DEAL_II_TRILINOS_WITH_TPETRA
33# include <Tpetra_Map.hpp>
34# endif
35#endif
36
38
39
40
41#ifdef DEAL_II_WITH_TRILINOS
42
43# ifdef DEAL_II_TRILINOS_WITH_TPETRA
44
45template <typename NodeType>
47 const Teuchos::RCP<
48 const Tpetra::Map<int, types::signed_global_dof_index, NodeType>> &map)
49 : is_compressed(true)
50 , index_space_size(1 + map->getMaxAllGlobalIndex())
51 , largest_range(numbers::invalid_unsigned_int)
52{
53 Assert(map->getMinAllGlobalIndex() == 0,
55 "The Tpetra::Map does not contain the global index 0, "
56 "which means some entries are not present on any processor."));
57
58 // For a contiguous map, we do not need to go through the whole data...
59 if (map->isContiguous())
60 add_range(size_type(map->getMinGlobalIndex()),
61 size_type(map->getMaxGlobalIndex() + 1));
62 else
63 {
64# if DEAL_II_TRILINOS_VERSION_GTE(13, 4, 0)
65 const size_type n_indices = map->getLocalNumElements();
66# else
67 const size_type n_indices = map->getNodeNumElements();
68# endif
69 const types::signed_global_dof_index *indices =
70 map->getMyGlobalIndices().data();
71 add_indices(indices, indices + n_indices);
72 }
73 compress();
74}
75
76# endif // DEAL_II_TRILINOS_WITH_TPETRA
77
78
79// the 64-bit path uses a few different names, so put that into a separate
80// implementation
81
82# ifdef DEAL_II_WITH_64BIT_INDICES
83
84IndexSet::IndexSet(const Epetra_BlockMap &map)
85 : is_compressed(true)
86 , index_space_size(1 + map.MaxAllGID64())
87 , largest_range(numbers::invalid_unsigned_int)
88{
89 Assert(map.MinAllGID64() == 0,
91 "The Epetra_BlockMap does not contain the global index 0, "
92 "which means some entries are not present on any processor."));
93
94 // For a contiguous map, we do not need to go through the whole data...
95 if (map.LinearMap())
96 add_range(size_type(map.MinMyGID64()), size_type(map.MaxMyGID64() + 1));
97 else
98 {
99 const size_type n_indices = map.NumMyElements();
100 size_type *indices =
101 reinterpret_cast<size_type *>(map.MyGlobalElements64());
102 add_indices(indices, indices + n_indices);
103 }
104 compress();
105}
106
107# else
108
109// this is the standard 32-bit implementation
110
111IndexSet::IndexSet(const Epetra_BlockMap &map)
112 : is_compressed(true)
113 , index_space_size(1 + map.MaxAllGID())
114 , largest_range(numbers::invalid_unsigned_int)
115{
116 Assert(map.MinAllGID() == 0,
118 "The Epetra_BlockMap does not contain the global index 0, "
119 "which means some entries are not present on any processor."));
120
121 // For a contiguous map, we do not need to go through the whole data...
122 if (map.LinearMap())
123 add_range(size_type(map.MinMyGID()), size_type(map.MaxMyGID() + 1));
124 else
125 {
126 const size_type n_indices = map.NumMyElements();
127 unsigned int *indices =
128 reinterpret_cast<unsigned int *>(map.MyGlobalElements());
129 add_indices(indices, indices + n_indices);
130 }
131 compress();
132}
133
134# endif
135
136#endif // ifdef DEAL_II_WITH_TRILINOS
137
138
139
140void
142{
143 {
144 // we will, in the following, modify mutable variables. this can only
145 // work in multithreaded applications if we lock the data structures
146 // via a mutex, so that users can call 'const' functions from threads
147 // in parallel (and these 'const' functions can then call compress()
148 // which itself calls the current function)
149 std::lock_guard<std::mutex> lock(compress_mutex);
150
151 // see if any of the contiguous ranges can be merged. do not use
152 // std::vector::erase in-place as it is quadratic in the number of
153 // ranges. since the ranges are sorted by their first index, determining
154 // overlap isn't all that hard
155 std::vector<Range>::iterator store = ranges.begin();
156 for (std::vector<Range>::iterator i = ranges.begin(); i != ranges.end();)
157 {
158 std::vector<Range>::iterator next = i;
159 ++next;
160
161 size_type first_index = i->begin;
162 size_type last_index = i->end;
163
164 // see if we can merge any of the following ranges
165 while (next != ranges.end() && (next->begin <= last_index))
166 {
167 last_index = std::max(last_index, next->end);
168 ++next;
169 }
170 i = next;
171
172 // store the new range in the slot we last occupied
173 *store = Range(first_index, last_index);
174 ++store;
175 }
176 // use a compact array with exactly the right amount of storage
177 if (store != ranges.end())
178 {
179 std::vector<Range> new_ranges(ranges.begin(), store);
180 ranges.swap(new_ranges);
181 }
182
183 // now compute indices within set and the range with most elements
184 size_type next_index = 0, largest_range_size = 0;
185 for (std::vector<Range>::iterator i = ranges.begin(); i != ranges.end();
186 ++i)
187 {
188 Assert(i->begin < i->end, ExcInternalError());
189
190 i->nth_index_in_set = next_index;
191 next_index += (i->end - i->begin);
192 if (i->end - i->begin > largest_range_size)
193 {
194 largest_range_size = i->end - i->begin;
195 largest_range = i - ranges.begin();
196 }
197 }
198 is_compressed = true;
199
200 // check that next_index is correct. needs to be after the previous
201 // statement because we otherwise will get into an endless loop
202 Assert(next_index == n_elements(), ExcInternalError());
203 }
204
205 if constexpr (running_in_debug_mode())
206 {
207 // A consistency check: We should only ever have added indices
208 // that are within the range of the index set. Instead of doing
209 // this in every one of the many functions that add indices,
210 // do this in the current, central location
211 for (const auto &range : ranges)
212 Assert((range.begin < index_space_size) &&
213 (range.end <= index_space_size),
215 "In the process of creating the current IndexSet "
216 "object, you added indices beyond the size of the index "
217 "space. Specifically, you added elements that form the "
218 "range [" +
219 std::to_string(range.begin) + "," + std::to_string(range.end) +
220 "), but the size of the index space is only " +
221 std::to_string(index_space_size) + "."));
222 }
223}
224
225
226
227#ifndef DOXYGEN
229IndexSet::operator&(const IndexSet &is) const
230{
231 Assert(size() == is.size(), ExcDimensionMismatch(size(), is.size()));
232
233 compress();
234 is.compress();
235
236 std::vector<Range>::const_iterator r1 = ranges.begin(),
237 r2 = is.ranges.begin();
238 IndexSet result(size());
239
240 while ((r1 != ranges.end()) && (r2 != is.ranges.end()))
241 {
242 // if r1 and r2 do not overlap at all, then move the pointer that sits
243 // to the left of the other up by one
244 if (r1->end <= r2->begin)
245 ++r1;
246 else if (r2->end <= r1->begin)
247 ++r2;
248 else
249 {
250 // the ranges must overlap somehow
251 Assert(((r1->begin <= r2->begin) && (r1->end > r2->begin)) ||
252 ((r2->begin <= r1->begin) && (r2->end > r1->begin)),
254
255 // add the overlapping range to the result
256 result.add_range(std::max(r1->begin, r2->begin),
257 std::min(r1->end, r2->end));
258
259 // now move that iterator that ends earlier one up. note that it has
260 // to be this one because a subsequent range may still have a chance
261 // of overlapping with the range that ends later
262 if (r1->end <= r2->end)
263 ++r1;
264 else
265 ++r2;
266 }
267 }
268
269 result.compress();
270 return result;
271}
272#endif
273
274
275
277IndexSet::get_view(const size_type begin, const size_type end) const
278{
279 Assert(begin <= end,
280 ExcMessage("End index needs to be larger or equal to begin index!"));
281 Assert(end <= size(),
282 ExcMessage("You are asking for a view into an IndexSet object "
283 "that would cover the sub-range [" +
284 std::to_string(begin) + ',' + std::to_string(end) +
285 "). But this is not a subset of the range "
286 "of the current object, which is [0," +
287 std::to_string(size()) + ")."));
288
289 IndexSet result(end - begin);
290 std::vector<Range>::const_iterator r1 = ranges.begin();
291
292 while (r1 != ranges.end())
293 {
294 if ((r1->end > begin) && (r1->begin < end))
295 {
296 result.add_range(std::max(r1->begin, begin) - begin,
297 std::min(r1->end, end) - begin);
298 }
299 else if (r1->begin >= end)
300 break;
301
302 ++r1;
303 }
304
305 result.compress();
306 return result;
307}
308
309
310
312IndexSet::get_view(const IndexSet &mask) const
313{
314 Assert(size() == mask.size(),
315 ExcMessage("The mask must have the same size index space "
316 "as the index set it is applied to."));
317
318 // If 'other' is an empty set, then the view is also empty:
319 if (mask == IndexSet())
320 return {};
321
322 // For everything, it is more efficient to work on compressed sets:
323 compress();
324 mask.compress();
325
326 // If 'other' has a single range, then we can just defer to the
327 // previous function
328 if (mask.ranges.size() == 1)
329 return get_view(mask.ranges[0].begin, mask.ranges[0].end);
330
331 // For the general case where the mask is an arbitrary set,
332 // the situation is slightly more complicated. We need to walk
333 // the ranges of the two index sets in parallel and search for
334 // overlaps, and then appropriately shift
335
336 // we save all new ranges to our IndexSet in an temporary vector and
337 // add all of them in one go at the end.
338 std::vector<Range> new_ranges;
339
340 std::vector<Range>::iterator own_it = ranges.begin();
341 std::vector<Range>::iterator mask_it = mask.ranges.begin();
342
343 while ((own_it != ranges.end()) && (mask_it != mask.ranges.end()))
344 {
345 // If our own range lies completely ahead of the current
346 // range in the mask, move forward and start the loop body
347 // anew. If this was the last range, the 'while' loop above
348 // will terminate, so we don't have to check for end iterators
349 if (own_it->end <= mask_it->begin)
350 {
351 ++own_it;
352 continue;
353 }
354
355 // Do the same if the current mask range lies completely ahead of
356 // the current range of the this object:
357 if (mask_it->end <= own_it->begin)
358 {
359 ++mask_it;
360 continue;
361 }
362
363 // Now own_it and other_it overlap. Check that that is true by
364 // enumerating the cases that can happen. This is
365 // surprisingly tricky because the two intervals can intersect in
366 // a number of different ways, but there really are only the four
367 // following possibilities:
368
369 // Case 1: our interval overlaps the left end of the other interval
370 //
371 // So we need to add the elements from the first element of the mask's
372 // interval to the end of our own interval. But we need to shift the
373 // indices so that they correspond to the how many'th element within the
374 // mask this is; fortunately (because we compressed the mask), this
375 // is recorded in the mask's ranges.
376 if ((own_it->begin <= mask_it->begin) && (own_it->end <= mask_it->end))
377 {
378 new_ranges.emplace_back(mask_it->begin - mask_it->nth_index_in_set,
379 own_it->end - mask_it->nth_index_in_set);
380 }
381 else
382 // Case 2:our interval overlaps the tail end of the other interval
383 if ((mask_it->begin <= own_it->begin) && (mask_it->end <= own_it->end))
384 {
385 const size_type offset_within_mask_interval =
386 own_it->begin - mask_it->begin;
387 new_ranges.emplace_back(mask_it->nth_index_in_set +
388 offset_within_mask_interval,
389 mask_it->nth_index_in_set +
390 (mask_it->end - mask_it->begin));
391 }
392 else
393 // Case 3: Our own interval completely encloses the other interval
394 if ((own_it->begin <= mask_it->begin) &&
395 (own_it->end >= mask_it->end))
396 {
397 new_ranges.emplace_back(mask_it->begin -
398 mask_it->nth_index_in_set,
399 mask_it->end - mask_it->nth_index_in_set);
400 }
401 else
402 // Case 3: The other interval completely encloses our own interval
403 if ((mask_it->begin <= own_it->begin) &&
404 (mask_it->end >= own_it->end))
405 {
406 const size_type offset_within_mask_interval =
407 own_it->begin - mask_it->begin;
408 new_ranges.emplace_back(mask_it->nth_index_in_set +
409 offset_within_mask_interval,
410 mask_it->nth_index_in_set +
411 offset_within_mask_interval +
412 (own_it->end - own_it->begin));
413 }
414 else
416
417 // We considered the overlap of these two intervals. It may of course
418 // be that one of them overlaps with another one, but that can only
419 // be the case for the interval that extends further to the right. So
420 // we can safely move on from the interval that terminates earlier:
421 if (own_it->end < mask_it->end)
422 ++own_it;
423 else if (mask_it->end < own_it->end)
424 ++mask_it;
425 else
426 {
427 // The intervals ended at the same point. We can move on from both.
428 // (The algorithm would also work if we only moved on from one,
429 // but we can micro-optimize here without too much effort.)
430 ++own_it;
431 ++mask_it;
432 }
433 }
434
435 // Now turn the ranges of overlap we have accumulated into an IndexSet in
436 // its own right:
437 IndexSet result(mask.n_elements());
438 for (const auto &range : new_ranges)
439 result.add_range(range.begin, range.end);
440 result.compress();
441
442 return result;
443}
444
445
446
447std::vector<IndexSet>
449 const std::vector<types::global_dof_index> &n_indices_per_block) const
450{
451 std::vector<IndexSet> partitioned;
452 const unsigned int n_blocks = n_indices_per_block.size();
453
454 partitioned.reserve(n_blocks);
455 types::global_dof_index start = 0;
456 for (const auto n_block_indices : n_indices_per_block)
457 {
458 partitioned.push_back(this->get_view(start, start + n_block_indices));
459 start += n_block_indices;
460 }
461
462 if constexpr (running_in_debug_mode())
463 {
465 for (const auto &partition : partitioned)
466 {
467 sum += partition.size();
468 }
469 AssertDimension(sum, this->size());
470 }
471
472 return partitioned;
473}
474
475
476
477void
479{
480 compress();
481 other.compress();
482 is_compressed = false;
483
484
485 // we save all new ranges to our IndexSet in an temporary vector and
486 // add all of them in one go at the end.
487 std::vector<Range> new_ranges;
488
489 std::vector<Range>::iterator own_it = ranges.begin();
490 std::vector<Range>::iterator other_it = other.ranges.begin();
491
492 while (own_it != ranges.end() && other_it != other.ranges.end())
493 {
494 // advance own iterator until we get an overlap
495 if (own_it->end <= other_it->begin)
496 {
497 new_ranges.push_back(*own_it);
498 ++own_it;
499 continue;
500 }
501 // we are done with other_it, so advance
502 if (own_it->begin >= other_it->end)
503 {
504 ++other_it;
505 continue;
506 }
507
508 // Now own_it and other_it overlap. First save the part of own_it that
509 // is before other_it (if not empty).
510 if (own_it->begin < other_it->begin)
511 {
512 Range r(own_it->begin, other_it->begin);
513 r.nth_index_in_set = 0; // fix warning of unused variable
514 new_ranges.push_back(r);
515 }
516 // change own_it to the sub range behind other_it. Do not delete own_it
517 // in any case. As removal would invalidate iterators, we just shrink
518 // the range to an empty one.
519 own_it->begin = other_it->end;
520 if (own_it->begin > own_it->end)
521 {
522 own_it->begin = own_it->end;
523 ++own_it;
524 }
525
526 // continue without advancing iterators, the right one will be advanced
527 // next.
528 }
529
530 // make sure to take over the remaining ranges
531 for (; own_it != ranges.end(); ++own_it)
532 new_ranges.push_back(*own_it);
533
534 ranges.clear();
535
536 // done, now add the temporary ranges
537 const std::vector<Range>::iterator end = new_ranges.end();
538 for (std::vector<Range>::iterator it = new_ranges.begin(); it != end; ++it)
539 add_range(it->begin, it->end);
540
541 compress();
542}
543
544
545
548{
549 IndexSet set(this->size() * other.size());
550 for (const auto el : *this)
551 set.add_indices(other, el * other.size());
552 set.compress();
553 return set;
554}
555
556
557
560{
561 Assert(is_empty() == false,
563 "pop_back() failed, because this IndexSet contains no entries."));
564
565 const size_type index = ranges.back().end - 1;
566 --ranges.back().end;
567
568 if (ranges.back().begin == ranges.back().end)
569 ranges.pop_back();
570
571 return index;
572}
573
574
575
578{
579 Assert(is_empty() == false,
581 "pop_front() failed, because this IndexSet contains no entries."));
582
583 const size_type index = ranges.front().begin;
584 ++ranges.front().begin;
585
586 if (ranges.front().begin == ranges.front().end)
587 ranges.erase(ranges.begin());
588
589 // We have to set this in any case, because nth_index_in_set is no longer
590 // up to date for all but the first range
591 is_compressed = false;
592
593 return index;
594}
595
596
597
598void
600{
601 // if the inserted range is already within the range we find by lower_bound,
602 // there is no need to do anything; we do not try to be clever here and
603 // leave all other work to compress().
604 const auto insert_position =
605 Utilities::lower_bound(ranges.begin(), ranges.end(), new_range);
606 if (insert_position == ranges.end() ||
607 insert_position->begin > new_range.begin ||
608 insert_position->end < new_range.end)
609 ranges.insert(insert_position, new_range);
610}
611
612
613
614void
616 boost::container::small_vector<std::pair<size_type, size_type>, 200>
617 &tmp_ranges,
618 const bool ranges_are_sorted)
619{
620 if (!ranges_are_sorted)
621 std::sort(tmp_ranges.begin(), tmp_ranges.end());
622
623 // if we have many ranges, we first construct a temporary index set (where
624 // we add ranges in a consecutive way, so fast), otherwise, we work with
625 // add_range(). the number 9 is chosen heuristically given the fact that
626 // there are typically up to 8 independent ranges when adding the degrees of
627 // freedom on a 3d cell or 9 when adding degrees of freedom of faces. if
628 // doing cell-by-cell additions, we want to avoid repeated calls to
629 // IndexSet::compress() which gets called upon merging two index sets, so we
630 // want to be in the other branch then.
631 if (tmp_ranges.size() > 9)
632 {
633 IndexSet tmp_set(size());
634 tmp_set.ranges.reserve(tmp_ranges.size());
635 for (const auto &i : tmp_ranges)
636 tmp_set.add_range(i.first, i.second);
637
638 // Case if we have zero or just one range: Add into the other set with
639 // its indices, as that is cheaper
640 if (this->ranges.size() <= 1)
641 {
642 if (this->ranges.size() == 1)
643 tmp_set.add_range(ranges[0].begin, ranges[0].end);
644 std::swap(*this, tmp_set);
645 }
646 else
647 this->add_indices(tmp_set);
648 }
649 else
650 for (const auto &i : tmp_ranges)
651 add_range(i.first, i.second);
652}
653
654
655
656void
657IndexSet::add_indices(const IndexSet &other, const size_type offset)
658{
659 if ((this == &other) && (offset == 0))
660 return;
661
662 if (other.ranges.size() != 0)
663 {
664 AssertIndexRange(other.ranges.back().end - 1, index_space_size);
665 }
666
667 compress();
668 other.compress();
669
670 std::vector<Range>::const_iterator r1 = ranges.begin(),
671 r2 = other.ranges.begin();
672
673 std::vector<Range> new_ranges;
674 // just get the start and end of the ranges right in this method, everything
675 // else will be done in compress()
676 while (r1 != ranges.end() || r2 != other.ranges.end())
677 {
678 // the two ranges do not overlap or we are at the end of one of the
679 // ranges
680 if (r2 == other.ranges.end() ||
681 (r1 != ranges.end() && r1->end < (r2->begin + offset)))
682 {
683 new_ranges.push_back(*r1);
684 ++r1;
685 }
686 else if (r1 == ranges.end() || (r2->end + offset) < r1->begin)
687 {
688 new_ranges.emplace_back(r2->begin + offset, r2->end + offset);
689 ++r2;
690 }
691 else
692 {
693 // ok, we do overlap, so just take the combination of the current
694 // range (do not bother to merge with subsequent ranges)
695 Range next(std::min(r1->begin, r2->begin + offset),
696 std::max(r1->end, r2->end + offset));
697 new_ranges.push_back(next);
698 ++r1;
699 ++r2;
700 }
701 }
702 ranges.swap(new_ranges);
703
704 is_compressed = false;
705 compress();
706}
707
708
709
710bool
712{
713 Assert(size() == other.size(),
714 ExcMessage("One index set can only be a subset of another if they "
715 "describe index spaces of the same size. The ones in "
716 "question here have sizes " +
717 std::to_string(size()) + " and " +
718 std::to_string(other.size()) + "."));
719
720 // See whether there are indices in the current set that are not in 'other'.
721 // If so, then this is clearly not a subset of 'other'.
722 IndexSet A_minus_B = *this;
723 A_minus_B.subtract_set(other);
724 if (A_minus_B.n_elements() > 0)
725 return false;
726 else
727 // Else, every index in 'this' is also in 'other', since we ended up
728 // with an empty set upon subtraction. This means that we have a subset:
729 return true;
730}
731
732
733
734void
735IndexSet::write(std::ostream &out) const
736{
737 compress();
738 out << size() << " ";
739 out << ranges.size() << std::endl;
740 std::vector<Range>::const_iterator r = ranges.begin();
741 for (; r != ranges.end(); ++r)
742 {
743 out << r->begin << " " << r->end << std::endl;
744 }
745}
746
747
748
749void
750IndexSet::read(std::istream &in)
751{
752 AssertThrow(in.fail() == false, ExcIO());
753
754 size_type s;
755 unsigned int n_ranges;
756
757 in >> s >> n_ranges;
758 ranges.clear();
759 set_size(s);
760 for (unsigned int i = 0; i < n_ranges; ++i)
761 {
762 AssertThrow(in.fail() == false, ExcIO());
763
764 size_type b, e;
765 in >> b >> e;
766 add_range(b, e);
767 }
768}
769
770
771void
772IndexSet::block_write(std::ostream &out) const
773{
774 AssertThrow(out.fail() == false, ExcIO());
775 out.write(reinterpret_cast<const char *>(&index_space_size),
776 sizeof(index_space_size));
777 std::size_t n_ranges = ranges.size();
778 out.write(reinterpret_cast<const char *>(&n_ranges), sizeof(n_ranges));
779 if (ranges.empty() == false)
780 out.write(reinterpret_cast<const char *>(&*ranges.begin()),
781 ranges.size() * sizeof(Range));
782 AssertThrow(out.fail() == false, ExcIO());
783}
784
785void
786IndexSet::block_read(std::istream &in)
787{
789 std::size_t n_ranges;
790 in.read(reinterpret_cast<char *>(&size), sizeof(size));
791 in.read(reinterpret_cast<char *>(&n_ranges), sizeof(n_ranges));
792 // we have to clear ranges first
793 ranges.clear();
794 set_size(size);
795 ranges.resize(n_ranges, Range(0, 0));
796 if (n_ranges != 0u)
797 in.read(reinterpret_cast<char *>(&*ranges.begin()),
798 ranges.size() * sizeof(Range));
799
800 do_compress(); // needed so that largest_range can be recomputed
801}
802
803
804
805bool
807{
808 // get the element after which we would have to insert a range that
809 // consists of all elements from this element to the end of the index
810 // range plus one. after this call we know that if p!=end() then
811 // p->begin<=index unless there is no such range at all
812 //
813 // if the searched for element is an element of this range, then we're
814 // done. otherwise, the element can't be in one of the following ranges
815 // because otherwise p would be a different iterator
816 //
817 // since we already know the position relative to the largest range (we
818 // called compress!), we can perform the binary search on ranges with
819 // lower/higher number compared to the largest range
820 std::vector<Range>::const_iterator p = std::upper_bound(
821 ranges.begin() +
822 (index < ranges[largest_range].begin ? 0 : largest_range + 1),
823 index < ranges[largest_range].begin ? ranges.begin() + largest_range :
824 ranges.end(),
825 Range(index, size() + 1));
826
827 if (p == ranges.begin())
828 return ((index >= p->begin) && (index < p->end));
829
830 Assert((p == ranges.end()) || (p->begin > index), ExcInternalError());
831
832 // now move to that previous range
833 --p;
834 Assert(p->begin <= index, ExcInternalError());
835
836 return (p->end > index);
837}
838
839
840
843{
844 // find out which chunk the local index n belongs to by using a binary
845 // search. the comparator is based on the end of the ranges.
846 Range r(n, n + 1);
847 r.nth_index_in_set = n;
848
849 const std::vector<Range>::const_iterator p = Utilities::lower_bound(
850 ranges.begin(), ranges.end(), r, Range::nth_index_compare);
851
852 Assert(p != ranges.end(), ExcInternalError());
853 return p->begin + (n - p->nth_index_in_set);
854}
855
856
857
860{
861 // we could try to use the main range for splitting up the search range, but
862 // since we only come here when the largest range did not contain the index,
863 // there is little gain from doing a first step manually.
864 Range r(n, n);
865 std::vector<Range>::const_iterator p =
867
868 // if n is not in this set
869 if (p == ranges.end() || p->end == n || p->begin > n)
871
872 Assert(p != ranges.end(), ExcInternalError());
873 Assert(p->begin <= n, ExcInternalError());
874 Assert(n < p->end, ExcInternalError());
875 return (n - p->begin) + p->nth_index_in_set;
876}
877
878
879
881IndexSet::at(const size_type global_index) const
882{
883 compress();
884 AssertIndexRange(global_index, size());
885
886 if (ranges.empty())
887 return end();
888
889 std::vector<Range>::const_iterator main_range =
890 ranges.begin() + largest_range;
891
892 Range r(global_index, global_index + 1);
893 // This optimization makes the bounds for lower_bound smaller by checking
894 // the largest range first.
895 std::vector<Range>::const_iterator range_begin, range_end;
896 if (global_index < main_range->begin)
897 {
898 range_begin = ranges.begin();
899 range_end = main_range;
900 }
901 else
902 {
903 range_begin = main_range;
904 range_end = ranges.end();
905 }
906
907 // This will give us the first range p=[a,b[ with b>=global_index using
908 // a binary search
909 const std::vector<Range>::const_iterator p =
910 Utilities::lower_bound(range_begin, range_end, r, Range::end_compare);
911
912 // We couldn't find a range, which means we have no range that contains
913 // global_index and also no range behind it, meaning we need to return end().
914 if (p == ranges.end())
915 return end();
916
917 // Finally, we can have two cases: Either global_index is not in [a,b[,
918 // which means we need to return an iterator to a because global_index, ...,
919 // a-1 is not in the IndexSet (if branch). Alternatively, global_index is in
920 // [a,b[ and we will return an iterator pointing directly at global_index
921 // (else branch).
922 if (global_index < p->begin)
923 return {this, static_cast<size_type>(p - ranges.begin()), p->begin};
924 else
925 return {this, static_cast<size_type>(p - ranges.begin()), global_index};
926}
927
928
929
930std::vector<IndexSet::size_type>
932{
933 compress();
934
935 std::vector<size_type> indices;
936 indices.reserve(n_elements());
937
938 for (const auto &range : ranges)
939 for (size_type entry = range.begin; entry < range.end; ++entry)
940 indices.push_back(entry);
941
942 Assert(indices.size() == n_elements(), ExcInternalError());
943
944 return indices;
945}
946
947
948
949void
950IndexSet::fill_index_vector(std::vector<size_type> &indices) const
951{
952 indices = get_index_vector();
953}
954
955
956
957#ifdef DEAL_II_WITH_TRILINOS
958# ifdef DEAL_II_TRILINOS_WITH_TPETRA
959
960template <typename NodeType>
961Tpetra::Map<int, types::signed_global_dof_index, NodeType>
963 const bool overlapping) const
964{
965 return *make_tpetra_map_rcp<NodeType>(communicator, overlapping);
966}
967
968
969
970template <typename NodeType>
971Teuchos::RCP<Tpetra::Map<int, types::signed_global_dof_index, NodeType>>
973 const bool overlapping) const
974{
975 compress();
976 (void)communicator;
977
978 if constexpr (running_in_debug_mode())
979 {
980 if (!overlapping)
981 {
982 const size_type n_global_elements =
983 Utilities::MPI::sum(n_elements(), communicator);
984 Assert(n_global_elements == size(),
985 ExcMessage("You are trying to create an Tpetra::Map object "
986 "that partitions elements of an index set "
987 "between processors. However, the union of the "
988 "index sets on different processors does not "
989 "contain all indices exactly once: the sum of "
990 "the number of entries the various processors "
991 "want to store locally is " +
992 std::to_string(n_global_elements) +
993 " whereas the total size of the object to be "
994 "allocated is " +
995 std::to_string(size()) +
996 ". In other words, there are "
997 "either indices that are not spoken for "
998 "by any processor, or there are indices that are "
999 "claimed by multiple processors."));
1000 }
1001 }
1002
1003 // Find out if the IndexSet is ascending and 1:1. This corresponds to a
1004 // linear Tpetra::Map. Overlapping IndexSets are never 1:1.
1005 const bool linear =
1006 overlapping ? false : is_ascending_and_one_to_one(communicator);
1007 if (linear)
1009 Tpetra::Map<int, types::signed_global_dof_index, NodeType>>(
1010 size(),
1011 n_elements(),
1012 0,
1013# ifdef DEAL_II_WITH_MPI
1014 Utilities::Trilinos::internal::make_rcp<Teuchos::MpiComm<int>>(
1015 communicator)
1016# else
1017 Utilities::Trilinos::internal::make_rcp<Teuchos::Comm<int>>()
1018# endif // DEAL_II_WITH_MPI
1019 );
1020 else
1021 {
1022 const std::vector<size_type> indices = get_index_vector();
1023 std::vector<types::signed_global_dof_index> int_indices(indices.size());
1024 std::copy(indices.begin(), indices.end(), int_indices.begin());
1025 const Teuchos::ArrayView<types::signed_global_dof_index> arr_view(
1026 int_indices);
1027
1029 Tpetra::Map<int, types::signed_global_dof_index, NodeType>>(
1030 size(),
1031 arr_view,
1032 0,
1033# ifdef DEAL_II_WITH_MPI
1034 Utilities::Trilinos::internal::make_rcp<Teuchos::MpiComm<int>>(
1035 communicator)
1036# else
1037 Utilities::Trilinos::internal::make_rcp<Teuchos::Comm<int>>()
1038# endif // DEAL_II_WITH_MPI
1039 );
1040 }
1041}
1042# endif
1043
1044
1045
1046Epetra_Map
1048 const bool overlapping) const
1049{
1050 compress();
1051 (void)communicator;
1052
1053 if constexpr (running_in_debug_mode())
1054 {
1055 if (!overlapping)
1056 {
1057 const size_type n_global_elements =
1058 Utilities::MPI::sum(n_elements(), communicator);
1059 Assert(n_global_elements == size(),
1060 ExcMessage("You are trying to create an Epetra_Map object "
1061 "that partitions elements of an index set "
1062 "between processors. However, the union of the "
1063 "index sets on different processors does not "
1064 "contain all indices exactly once: the sum of "
1065 "the number of entries the various processors "
1066 "want to store locally is " +
1067 std::to_string(n_global_elements) +
1068 " whereas the total size of the object to be "
1069 "allocated is " +
1070 std::to_string(size()) +
1071 ". In other words, there are "
1072 "either indices that are not spoken for "
1073 "by any processor, or there are indices that are "
1074 "claimed by multiple processors."));
1075 }
1076 }
1077
1078 // Find out if the IndexSet is ascending and 1:1. This corresponds to a
1079 // linear EpetraMap. Overlapping IndexSets are never 1:1.
1080 const bool linear =
1081 overlapping ? false : is_ascending_and_one_to_one(communicator);
1082
1083 if (linear)
1084 return Epetra_Map(TrilinosWrappers::types::int_type(size()),
1086 0,
1087# ifdef DEAL_II_WITH_MPI
1088 Epetra_MpiComm(communicator)
1089# else
1090 Epetra_SerialComm()
1091# endif
1092 );
1093 else
1094 {
1095 const std::vector<size_type> indices = get_index_vector();
1096 return Epetra_Map(
1099 (n_elements() > 0 ?
1100 reinterpret_cast<const TrilinosWrappers::types::int_type *>(
1101 indices.data()) :
1102 nullptr),
1103 0,
1104# ifdef DEAL_II_WITH_MPI
1105 Epetra_MpiComm(communicator)
1106# else
1107 Epetra_SerialComm()
1108# endif
1109 );
1110 }
1111}
1112#endif
1113
1114
1115#ifdef DEAL_II_WITH_PETSC
1116IS
1117IndexSet::make_petsc_is(const MPI_Comm communicator) const
1118{
1119 std::vector<size_type> indices;
1120 fill_index_vector(indices);
1121
1122 // If the size of the index set can be converted to a PetscInt then every
1123 // value can also be converted
1124 AssertThrowIntegerConversion(static_cast<PetscInt>(size()), size());
1125 const auto local_size = static_cast<PetscInt>(n_elements());
1126 AssertIntegerConversion(local_size, n_elements());
1127
1128 size_type i = 0;
1129 std::vector<PetscInt> petsc_indices(n_elements());
1130 for (const auto &index : *this)
1131 petsc_indices[i] = static_cast<PetscInt>(index);
1132
1133 IS is;
1134 PetscErrorCode ierr = ISCreateGeneral(
1135 communicator, local_size, petsc_indices.data(), PETSC_COPY_VALUES, &is);
1136 AssertThrow(ierr == 0, ExcPETScError(ierr));
1137
1138 return is;
1139}
1140#endif
1141
1142
1143
1144bool
1146{
1147 // If the sum of local elements does not add up to the total size,
1148 // the IndexSet can't be complete.
1149 const size_type n_global_elements =
1150 Utilities::MPI::sum(n_elements(), communicator);
1151 if (n_global_elements != size())
1152 return false;
1153
1154 if (n_global_elements == 0)
1155 return true;
1156
1157#ifdef DEAL_II_WITH_MPI
1158 // Non-contiguous IndexSets can't be linear.
1159 const bool all_contiguous =
1160 (Utilities::MPI::min(is_contiguous() ? 1 : 0, communicator) == 1);
1161 if (!all_contiguous)
1162 return false;
1163
1164 bool is_globally_ascending = true;
1165 // we know that there is only one interval
1166 types::global_dof_index first_local_dof = (n_elements() > 0) ?
1167 *(begin_intervals()->begin()) :
1169
1170 const unsigned int my_rank = Utilities::MPI::this_mpi_process(communicator);
1171 const std::vector<types::global_dof_index> global_dofs =
1172 Utilities::MPI::gather(communicator, first_local_dof, 0);
1173
1174 if (my_rank == 0)
1175 {
1176 // find out if the received std::vector is ascending
1177 types::global_dof_index index = 0;
1178 while (global_dofs[index] == numbers::invalid_dof_index)
1179 ++index;
1180 types::global_dof_index old_dof = global_dofs[index++];
1181 for (; index < global_dofs.size(); ++index)
1182 {
1183 const types::global_dof_index new_dof = global_dofs[index];
1184 if (new_dof != numbers::invalid_dof_index)
1185 {
1186 if (new_dof <= old_dof)
1187 {
1188 is_globally_ascending = false;
1189 break;
1190 }
1191 else
1192 old_dof = new_dof;
1193 }
1194 }
1195 }
1196
1197 // now broadcast the result
1198 int is_ascending = is_globally_ascending ? 1 : 0;
1199 int ierr = MPI_Bcast(&is_ascending, 1, MPI_INT, 0, communicator);
1200 AssertThrowMPI(ierr);
1201
1202 return (is_ascending == 1);
1203#else
1204 return true;
1205#endif // DEAL_II_WITH_MPI
1206}
1207
1208
1209
1210std::size_t
1218
1219// explicit template instantiations
1220
1221#ifndef DOXYGEN
1222# ifdef DEAL_II_WITH_TRILINOS
1223# ifdef DEAL_II_TRILINOS_WITH_TPETRA
1224
1225template IndexSet::IndexSet(
1226 const Teuchos::RCP<const Tpetra::Map<
1227 int,
1230 &);
1231
1232# if defined(KOKKOS_ENABLE_CUDA) || defined(KOKKOS_ENABLE_HIP) || \
1233 defined(KOKKOS_ENABLE_SYCL)
1234template IndexSet::IndexSet(
1235 const Teuchos::RCP<const Tpetra::Map<
1236 int,
1239 &);
1240# endif
1241
1245 const MPI_Comm,
1246 bool) const;
1247
1248# if defined(KOKKOS_ENABLE_CUDA) || defined(KOKKOS_ENABLE_HIP) || \
1249 defined(KOKKOS_ENABLE_SYCL)
1254 const MPI_Comm,
1255 bool) const;
1256# endif
1257
1258template Teuchos::RCP<
1262 const MPI_Comm,
1263 bool) const;
1264
1265# if defined(KOKKOS_ENABLE_CUDA) || defined(KOKKOS_ENABLE_HIP) || \
1266 defined(KOKKOS_ENABLE_SYCL)
1267template Teuchos::RCP<
1271 const MPI_Comm,
1272 bool) const;
1273# endif
1274
1275# endif
1276# endif
1277#endif
1278
ElementIterator begin() const
Definition index_set.h:1276
bool is_subset_of(const IndexSet &other) const
Definition index_set.cc:711
bool is_element_binary_search(const size_type local_index) const
Definition index_set.cc:806
size_type index_within_set_binary_search(const size_type global_index) const
Definition index_set.cc:859
size_type largest_range
Definition index_set.h:1136
IS make_petsc_is(const MPI_Comm communicator=MPI_COMM_WORLD) const
bool is_ascending_and_one_to_one(const MPI_Comm communicator) const
bool is_contiguous() const
Definition index_set.h:1928
Tpetra::Map< int, types::signed_global_dof_index, NodeType > make_tpetra_map(const MPI_Comm communicator=MPI_COMM_WORLD, const bool overlapping=false) const
Definition index_set.cc:962
void do_compress() const
Definition index_set.cc:141
ElementIterator at(const size_type global_index) const
Definition index_set.cc:881
size_type size() const
Definition index_set.h:1787
void fill_index_vector(std::vector< size_type > &indices) const
Definition index_set.cc:950
bool is_empty() const
Definition index_set.h:1937
std::vector< IndexSet > split_by_block(const std::vector< types::global_dof_index > &n_indices_per_block) const
Definition index_set.cc:448
size_type n_elements() const
Definition index_set.h:1945
void add_range_lower_bound(const Range &range)
Definition index_set.cc:599
ElementIterator begin() const
Definition index_set.h:1721
size_type pop_front()
Definition index_set.cc:577
void set_size(const size_type size)
Definition index_set.h:1775
void read(std::istream &in)
Definition index_set.cc:750
Epetra_Map make_trilinos_map(const MPI_Comm communicator=MPI_COMM_WORLD, const bool overlapping=false) const
IndexSet tensor_product(const IndexSet &other) const
Definition index_set.cc:547
void write(std::ostream &out) const
Definition index_set.cc:735
void block_read(std::istream &in)
Definition index_set.cc:786
bool is_compressed
Definition index_set.h:1119
void add_ranges_internal(boost::container::small_vector< std::pair< size_type, size_type >, 200 > &tmp_ranges, const bool ranges_are_sorted)
Definition index_set.cc:615
IntervalIterator begin_intervals() const
Definition index_set.h:1742
std::vector< Range > ranges
Definition index_set.h:1109
void subtract_set(const IndexSet &other)
Definition index_set.cc:478
ElementIterator end() const
Definition index_set.h:1733
Threads::Mutex compress_mutex
Definition index_set.h:1142
size_type index_space_size
Definition index_set.h:1125
void block_write(std::ostream &out) const
Definition index_set.cc:772
IndexSet get_view(const size_type begin, const size_type end) const
Definition index_set.cc:277
void add_range(const size_type begin, const size_type end)
Definition index_set.h:1814
std::size_t memory_consumption() const
size_type nth_index_in_set_binary_search(const size_type local_index) const
Definition index_set.cc:842
void compress() const
Definition index_set.h:1795
size_type pop_back()
Definition index_set.cc:559
std::vector< size_type > get_index_vector() const
Definition index_set.cc:931
Teuchos::RCP< Tpetra::Map< int, types::signed_global_dof_index, NodeType > > make_tpetra_map_rcp(const MPI_Comm communicator=MPI_COMM_WORLD, const bool overlapping=false) const
Definition index_set.cc:972
types::global_dof_index size_type
Definition index_set.h:81
void add_indices(const ForwardIterator &begin, const ForwardIterator &end)
Definition index_set.h:1842
IndexSet operator&(const IndexSet &is) const
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:35
constexpr bool running_in_debug_mode()
Definition config.h:73
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:36
#define AssertIntegerConversion(index1, index2)
#define DEAL_II_ASSERT_UNREACHABLE()
#define AssertThrowIntegerConversion(index1, index2)
static ::ExceptionBase & ExcIO()
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertThrowMPI(error_code)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
const unsigned int my_rank
Definition mpi.cc:929
Tpetra::KokkosCompat::KokkosDeviceWrapperNode< typename MemorySpace::kokkos_space::execution_space, typename MemorySpace::kokkos_space > NodeType
Tpetra::Map< LO, GO, NodeType< MemorySpace > > MapType
std::enable_if_t< std::is_fundamental_v< T >, std::size_t > memory_consumption(const T &t)
T sum(const T &t, const MPI_Comm mpi_communicator)
T min(const T &t, const MPI_Comm mpi_communicator)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:114
std::vector< T > gather(const MPI_Comm comm, const T &object_to_send, const unsigned int root_process=0)
Teuchos::RCP< T > make_rcp(Args &&...args)
Iterator lower_bound(Iterator first, Iterator last, const T &val)
Definition utilities.h:1033
constexpr types::global_dof_index invalid_dof_index
Definition types.h:269
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
static bool end_compare(const IndexSet::Range &x, const IndexSet::Range &y)
Definition index_set.h:1067
static bool nth_index_compare(const IndexSet::Range &x, const IndexSet::Range &y)
Definition index_set.h:1073
size_type end
Definition index_set.h:1035
size_type nth_index_in_set
Definition index_set.h:1037
size_type begin
Definition index_set.h:1034