deal.II version GIT relicensing-2084-gab787ef0dd 2024-11-04 03:10:00+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
index_set.cc
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2009 - 2024 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
17#include <deal.II/base/mpi.h>
18
21
22#include <vector>
23
24#ifdef DEAL_II_WITH_TRILINOS
25# ifdef DEAL_II_WITH_MPI
26# include <Epetra_MpiComm.h>
27# endif
28# include <Epetra_Map.h>
29# include <Epetra_SerialComm.h>
30# ifdef DEAL_II_TRILINOS_WITH_TPETRA
31# include <Tpetra_Map.hpp>
32# endif
33#endif
34
36
37
38
39#ifdef DEAL_II_WITH_TRILINOS
40
41# ifdef DEAL_II_TRILINOS_WITH_TPETRA
42
43template <typename NodeType>
45 const Teuchos::RCP<
46 const Tpetra::Map<int, types::signed_global_dof_index, NodeType>> &map)
47 : is_compressed(true)
48 , index_space_size(1 + map->getMaxAllGlobalIndex())
49 , largest_range(numbers::invalid_unsigned_int)
50{
51 Assert(map->getMinAllGlobalIndex() == 0,
53 "The Tpetra::Map does not contain the global index 0, "
54 "which means some entries are not present on any processor."));
55
56 // For a contiguous map, we do not need to go through the whole data...
57 if (map->isContiguous())
58 add_range(size_type(map->getMinGlobalIndex()),
59 size_type(map->getMaxGlobalIndex() + 1));
60 else
61 {
62# if DEAL_II_TRILINOS_VERSION_GTE(13, 4, 0)
63 const size_type n_indices = map->getLocalNumElements();
64# else
65 const size_type n_indices = map->getNodeNumElements();
66# endif
67 const types::signed_global_dof_index *indices =
68 map->getMyGlobalIndices().data();
69 add_indices(indices, indices + n_indices);
70 }
71 compress();
72}
73
74# endif // DEAL_II_TRILINOS_WITH_TPETRA
75
76
77// the 64-bit path uses a few different names, so put that into a separate
78// implementation
79
80# ifdef DEAL_II_WITH_64BIT_INDICES
81
82IndexSet::IndexSet(const Epetra_BlockMap &map)
83 : is_compressed(true)
84 , index_space_size(1 + map.MaxAllGID64())
85 , largest_range(numbers::invalid_unsigned_int)
86{
87 Assert(map.MinAllGID64() == 0,
89 "The Epetra_BlockMap does not contain the global index 0, "
90 "which means some entries are not present on any processor."));
91
92 // For a contiguous map, we do not need to go through the whole data...
93 if (map.LinearMap())
94 add_range(size_type(map.MinMyGID64()), size_type(map.MaxMyGID64() + 1));
95 else
96 {
97 const size_type n_indices = map.NumMyElements();
98 size_type *indices =
99 reinterpret_cast<size_type *>(map.MyGlobalElements64());
100 add_indices(indices, indices + n_indices);
101 }
102 compress();
103}
104
105# else
106
107// this is the standard 32-bit implementation
108
109IndexSet::IndexSet(const Epetra_BlockMap &map)
110 : is_compressed(true)
111 , index_space_size(1 + map.MaxAllGID())
112 , largest_range(numbers::invalid_unsigned_int)
113{
114 Assert(map.MinAllGID() == 0,
116 "The Epetra_BlockMap does not contain the global index 0, "
117 "which means some entries are not present on any processor."));
118
119 // For a contiguous map, we do not need to go through the whole data...
120 if (map.LinearMap())
121 add_range(size_type(map.MinMyGID()), size_type(map.MaxMyGID() + 1));
122 else
123 {
124 const size_type n_indices = map.NumMyElements();
125 unsigned int *indices =
126 reinterpret_cast<unsigned int *>(map.MyGlobalElements());
127 add_indices(indices, indices + n_indices);
128 }
129 compress();
130}
131
132# endif
133
134#endif // ifdef DEAL_II_WITH_TRILINOS
135
136
137
138void
140{
141 {
142 // we will, in the following, modify mutable variables. this can only
143 // work in multithreaded applications if we lock the data structures
144 // via a mutex, so that users can call 'const' functions from threads
145 // in parallel (and these 'const' functions can then call compress()
146 // which itself calls the current function)
147 std::lock_guard<std::mutex> lock(compress_mutex);
148
149 // see if any of the contiguous ranges can be merged. do not use
150 // std::vector::erase in-place as it is quadratic in the number of
151 // ranges. since the ranges are sorted by their first index, determining
152 // overlap isn't all that hard
153 std::vector<Range>::iterator store = ranges.begin();
154 for (std::vector<Range>::iterator i = ranges.begin(); i != ranges.end();)
155 {
156 std::vector<Range>::iterator next = i;
157 ++next;
158
159 size_type first_index = i->begin;
160 size_type last_index = i->end;
161
162 // see if we can merge any of the following ranges
163 while (next != ranges.end() && (next->begin <= last_index))
164 {
165 last_index = std::max(last_index, next->end);
166 ++next;
167 }
168 i = next;
169
170 // store the new range in the slot we last occupied
171 *store = Range(first_index, last_index);
172 ++store;
173 }
174 // use a compact array with exactly the right amount of storage
175 if (store != ranges.end())
176 {
177 std::vector<Range> new_ranges(ranges.begin(), store);
178 ranges.swap(new_ranges);
179 }
180
181 // now compute indices within set and the range with most elements
182 size_type next_index = 0, largest_range_size = 0;
183 for (std::vector<Range>::iterator i = ranges.begin(); i != ranges.end();
184 ++i)
185 {
186 Assert(i->begin < i->end, ExcInternalError());
187
188 i->nth_index_in_set = next_index;
189 next_index += (i->end - i->begin);
190 if (i->end - i->begin > largest_range_size)
191 {
192 largest_range_size = i->end - i->begin;
193 largest_range = i - ranges.begin();
194 }
195 }
196 is_compressed = true;
197
198 // check that next_index is correct. needs to be after the previous
199 // statement because we otherwise will get into an endless loop
200 Assert(next_index == n_elements(), ExcInternalError());
201 }
202
203#ifdef DEBUG
204 // A consistency check: We should only ever have added indices
205 // that are within the range of the index set. Instead of doing
206 // this in every one of the many functions that add indices,
207 // do this in the current, central location
208 for (const auto &range : ranges)
209 Assert((range.begin < index_space_size) && (range.end <= index_space_size),
210 ExcMessage("In the process of creating the current IndexSet "
211 "object, you added indices beyond the size of the index "
212 "space. Specifically, you added elements that form the "
213 "range [" +
214 std::to_string(range.begin) + "," +
215 std::to_string(range.end) +
216 "), but the size of the index space is only " +
217 std::to_string(index_space_size) + "."));
218#endif
219}
220
221
222
223#ifndef DOXYGEN
225IndexSet::operator&(const IndexSet &is) const
226{
227 Assert(size() == is.size(), ExcDimensionMismatch(size(), is.size()));
228
229 compress();
230 is.compress();
231
232 std::vector<Range>::const_iterator r1 = ranges.begin(),
233 r2 = is.ranges.begin();
234 IndexSet result(size());
235
236 while ((r1 != ranges.end()) && (r2 != is.ranges.end()))
237 {
238 // if r1 and r2 do not overlap at all, then move the pointer that sits
239 // to the left of the other up by one
240 if (r1->end <= r2->begin)
241 ++r1;
242 else if (r2->end <= r1->begin)
243 ++r2;
244 else
245 {
246 // the ranges must overlap somehow
247 Assert(((r1->begin <= r2->begin) && (r1->end > r2->begin)) ||
248 ((r2->begin <= r1->begin) && (r2->end > r1->begin)),
250
251 // add the overlapping range to the result
252 result.add_range(std::max(r1->begin, r2->begin),
253 std::min(r1->end, r2->end));
254
255 // now move that iterator that ends earlier one up. note that it has
256 // to be this one because a subsequent range may still have a chance
257 // of overlapping with the range that ends later
258 if (r1->end <= r2->end)
259 ++r1;
260 else
261 ++r2;
262 }
263 }
264
265 result.compress();
266 return result;
267}
268#endif
269
270
271
273IndexSet::get_view(const size_type begin, const size_type end) const
274{
275 Assert(begin <= end,
276 ExcMessage("End index needs to be larger or equal to begin index!"));
277 Assert(end <= size(),
278 ExcMessage("You are asking for a view into an IndexSet object "
279 "that would cover the sub-range [" +
280 std::to_string(begin) + ',' + std::to_string(end) +
281 "). But this is not a subset of the range "
282 "of the current object, which is [0," +
283 std::to_string(size()) + ")."));
284
285 IndexSet result(end - begin);
286 std::vector<Range>::const_iterator r1 = ranges.begin();
287
288 while (r1 != ranges.end())
289 {
290 if ((r1->end > begin) && (r1->begin < end))
291 {
292 result.add_range(std::max(r1->begin, begin) - begin,
293 std::min(r1->end, end) - begin);
294 }
295 else if (r1->begin >= end)
296 break;
297
298 ++r1;
299 }
300
301 result.compress();
302 return result;
303}
304
305
306
308IndexSet::get_view(const IndexSet &mask) const
309{
310 Assert(size() == mask.size(),
311 ExcMessage("The mask must have the same size index space "
312 "as the index set it is applied to."));
313
314 // If 'other' is an empty set, then the view is also empty:
315 if (mask == IndexSet())
316 return {};
317
318 // For everything, it is more efficient to work on compressed sets:
319 compress();
320 mask.compress();
321
322 // If 'other' has a single range, then we can just defer to the
323 // previous function
324 if (mask.ranges.size() == 1)
325 return get_view(mask.ranges[0].begin, mask.ranges[0].end);
326
327 // For the general case where the mask is an arbitrary set,
328 // the situation is slightly more complicated. We need to walk
329 // the ranges of the two index sets in parallel and search for
330 // overlaps, and then appropriately shift
331
332 // we save all new ranges to our IndexSet in an temporary vector and
333 // add all of them in one go at the end.
334 std::vector<Range> new_ranges;
335
336 std::vector<Range>::iterator own_it = ranges.begin();
337 std::vector<Range>::iterator mask_it = mask.ranges.begin();
338
339 while ((own_it != ranges.end()) && (mask_it != mask.ranges.end()))
340 {
341 // If our own range lies completely ahead of the current
342 // range in the mask, move forward and start the loop body
343 // anew. If this was the last range, the 'while' loop above
344 // will terminate, so we don't have to check for end iterators
345 if (own_it->end <= mask_it->begin)
346 {
347 ++own_it;
348 continue;
349 }
350
351 // Do the same if the current mask range lies completely ahead of
352 // the current range of the this object:
353 if (mask_it->end <= own_it->begin)
354 {
355 ++mask_it;
356 continue;
357 }
358
359 // Now own_it and other_it overlap. Check that that is true by
360 // enumerating the cases that can happen. This is
361 // surprisingly tricky because the two intervals can intersect in
362 // a number of different ways, but there really are only the four
363 // following possibilities:
364
365 // Case 1: our interval overlaps the left end of the other interval
366 //
367 // So we need to add the elements from the first element of the mask's
368 // interval to the end of our own interval. But we need to shift the
369 // indices so that they correspond to the how many'th element within the
370 // mask this is; fortunately (because we compressed the mask), this
371 // is recorded in the mask's ranges.
372 if ((own_it->begin <= mask_it->begin) && (own_it->end <= mask_it->end))
373 {
374 new_ranges.emplace_back(mask_it->begin - mask_it->nth_index_in_set,
375 own_it->end - mask_it->nth_index_in_set);
376 }
377 else
378 // Case 2:our interval overlaps the tail end of the other interval
379 if ((mask_it->begin <= own_it->begin) && (mask_it->end <= own_it->end))
380 {
381 const size_type offset_within_mask_interval =
382 own_it->begin - mask_it->begin;
383 new_ranges.emplace_back(mask_it->nth_index_in_set +
384 offset_within_mask_interval,
385 mask_it->nth_index_in_set +
386 (mask_it->end - mask_it->begin));
387 }
388 else
389 // Case 3: Our own interval completely encloses the other interval
390 if ((own_it->begin <= mask_it->begin) &&
391 (own_it->end >= mask_it->end))
392 {
393 new_ranges.emplace_back(mask_it->begin -
394 mask_it->nth_index_in_set,
395 mask_it->end - mask_it->nth_index_in_set);
396 }
397 else
398 // Case 3: The other interval completely encloses our own interval
399 if ((mask_it->begin <= own_it->begin) &&
400 (mask_it->end >= own_it->end))
401 {
402 const size_type offset_within_mask_interval =
403 own_it->begin - mask_it->begin;
404 new_ranges.emplace_back(mask_it->nth_index_in_set +
405 offset_within_mask_interval,
406 mask_it->nth_index_in_set +
407 offset_within_mask_interval +
408 (own_it->end - own_it->begin));
409 }
410 else
412
413 // We considered the overlap of these two intervals. It may of course
414 // be that one of them overlaps with another one, but that can only
415 // be the case for the interval that extends further to the right. So
416 // we can safely move on from the interval that terminates earlier:
417 if (own_it->end < mask_it->end)
418 ++own_it;
419 else if (mask_it->end < own_it->end)
420 ++mask_it;
421 else
422 {
423 // The intervals ended at the same point. We can move on from both.
424 // (The algorithm would also work if we only moved on from one,
425 // but we can micro-optimize here without too much effort.)
426 ++own_it;
427 ++mask_it;
428 }
429 }
430
431 // Now turn the ranges of overlap we have accumulated into an IndexSet in
432 // its own right:
433 IndexSet result(mask.n_elements());
434 for (const auto &range : new_ranges)
435 result.add_range(range.begin, range.end);
436 result.compress();
437
438 return result;
439}
440
441
442
443std::vector<IndexSet>
445 const std::vector<types::global_dof_index> &n_indices_per_block) const
446{
447 std::vector<IndexSet> partitioned;
448 const unsigned int n_blocks = n_indices_per_block.size();
449
450 partitioned.reserve(n_blocks);
451 types::global_dof_index start = 0;
452 for (const auto n_block_indices : n_indices_per_block)
453 {
454 partitioned.push_back(this->get_view(start, start + n_block_indices));
455 start += n_block_indices;
456 }
457
458#ifdef DEBUG
460 for (const auto &partition : partitioned)
461 {
462 sum += partition.size();
463 }
464 AssertDimension(sum, this->size());
465#endif
466
467 return partitioned;
468}
469
470
471
472void
474{
475 compress();
476 other.compress();
477 is_compressed = false;
478
479
480 // we save all new ranges to our IndexSet in an temporary vector and
481 // add all of them in one go at the end.
482 std::vector<Range> new_ranges;
483
484 std::vector<Range>::iterator own_it = ranges.begin();
485 std::vector<Range>::iterator other_it = other.ranges.begin();
486
487 while (own_it != ranges.end() && other_it != other.ranges.end())
488 {
489 // advance own iterator until we get an overlap
490 if (own_it->end <= other_it->begin)
491 {
492 new_ranges.push_back(*own_it);
493 ++own_it;
494 continue;
495 }
496 // we are done with other_it, so advance
497 if (own_it->begin >= other_it->end)
498 {
499 ++other_it;
500 continue;
501 }
502
503 // Now own_it and other_it overlap. First save the part of own_it that
504 // is before other_it (if not empty).
505 if (own_it->begin < other_it->begin)
506 {
507 Range r(own_it->begin, other_it->begin);
508 r.nth_index_in_set = 0; // fix warning of unused variable
509 new_ranges.push_back(r);
510 }
511 // change own_it to the sub range behind other_it. Do not delete own_it
512 // in any case. As removal would invalidate iterators, we just shrink
513 // the range to an empty one.
514 own_it->begin = other_it->end;
515 if (own_it->begin > own_it->end)
516 {
517 own_it->begin = own_it->end;
518 ++own_it;
519 }
520
521 // continue without advancing iterators, the right one will be advanced
522 // next.
523 }
524
525 // make sure to take over the remaining ranges
526 for (; own_it != ranges.end(); ++own_it)
527 new_ranges.push_back(*own_it);
528
529 ranges.clear();
530
531 // done, now add the temporary ranges
532 const std::vector<Range>::iterator end = new_ranges.end();
533 for (std::vector<Range>::iterator it = new_ranges.begin(); it != end; ++it)
534 add_range(it->begin, it->end);
535
536 compress();
537}
538
539
540
543{
544 IndexSet set(this->size() * other.size());
545 for (const auto el : *this)
546 set.add_indices(other, el * other.size());
547 set.compress();
548 return set;
549}
550
551
552
555{
556 Assert(is_empty() == false,
558 "pop_back() failed, because this IndexSet contains no entries."));
559
560 const size_type index = ranges.back().end - 1;
561 --ranges.back().end;
562
563 if (ranges.back().begin == ranges.back().end)
564 ranges.pop_back();
565
566 return index;
567}
568
569
570
573{
574 Assert(is_empty() == false,
576 "pop_front() failed, because this IndexSet contains no entries."));
577
578 const size_type index = ranges.front().begin;
579 ++ranges.front().begin;
580
581 if (ranges.front().begin == ranges.front().end)
582 ranges.erase(ranges.begin());
583
584 // We have to set this in any case, because nth_index_in_set is no longer
585 // up to date for all but the first range
586 is_compressed = false;
587
588 return index;
589}
590
591
592
593void
595{
596 // if the inserted range is already within the range we find by lower_bound,
597 // there is no need to do anything; we do not try to be clever here and
598 // leave all other work to compress().
599 const auto insert_position =
600 Utilities::lower_bound(ranges.begin(), ranges.end(), new_range);
601 if (insert_position == ranges.end() ||
602 insert_position->begin > new_range.begin ||
603 insert_position->end < new_range.end)
604 ranges.insert(insert_position, new_range);
605}
606
607
608
609void
611 boost::container::small_vector<std::pair<size_type, size_type>, 200>
612 &tmp_ranges,
613 const bool ranges_are_sorted)
614{
615 if (!ranges_are_sorted)
616 std::sort(tmp_ranges.begin(), tmp_ranges.end());
617
618 // if we have many ranges, we first construct a temporary index set (where
619 // we add ranges in a consecutive way, so fast), otherwise, we work with
620 // add_range(). the number 9 is chosen heuristically given the fact that
621 // there are typically up to 8 independent ranges when adding the degrees of
622 // freedom on a 3d cell or 9 when adding degrees of freedom of faces. if
623 // doing cell-by-cell additions, we want to avoid repeated calls to
624 // IndexSet::compress() which gets called upon merging two index sets, so we
625 // want to be in the other branch then.
626 if (tmp_ranges.size() > 9)
627 {
628 IndexSet tmp_set(size());
629 tmp_set.ranges.reserve(tmp_ranges.size());
630 for (const auto &i : tmp_ranges)
631 tmp_set.add_range(i.first, i.second);
632
633 // Case if we have zero or just one range: Add into the other set with
634 // its indices, as that is cheaper
635 if (this->ranges.size() <= 1)
636 {
637 if (this->ranges.size() == 1)
638 tmp_set.add_range(ranges[0].begin, ranges[0].end);
639 std::swap(*this, tmp_set);
640 }
641 else
642 this->add_indices(tmp_set);
643 }
644 else
645 for (const auto &i : tmp_ranges)
646 add_range(i.first, i.second);
647}
648
649
650
651void
652IndexSet::add_indices(const IndexSet &other, const size_type offset)
653{
654 if ((this == &other) && (offset == 0))
655 return;
656
657 if (other.ranges.size() != 0)
658 {
659 AssertIndexRange(other.ranges.back().end - 1, index_space_size);
660 }
661
662 compress();
663 other.compress();
664
665 std::vector<Range>::const_iterator r1 = ranges.begin(),
666 r2 = other.ranges.begin();
667
668 std::vector<Range> new_ranges;
669 // just get the start and end of the ranges right in this method, everything
670 // else will be done in compress()
671 while (r1 != ranges.end() || r2 != other.ranges.end())
672 {
673 // the two ranges do not overlap or we are at the end of one of the
674 // ranges
675 if (r2 == other.ranges.end() ||
676 (r1 != ranges.end() && r1->end < (r2->begin + offset)))
677 {
678 new_ranges.push_back(*r1);
679 ++r1;
680 }
681 else if (r1 == ranges.end() || (r2->end + offset) < r1->begin)
682 {
683 new_ranges.emplace_back(r2->begin + offset, r2->end + offset);
684 ++r2;
685 }
686 else
687 {
688 // ok, we do overlap, so just take the combination of the current
689 // range (do not bother to merge with subsequent ranges)
690 Range next(std::min(r1->begin, r2->begin + offset),
691 std::max(r1->end, r2->end + offset));
692 new_ranges.push_back(next);
693 ++r1;
694 ++r2;
695 }
696 }
697 ranges.swap(new_ranges);
698
699 is_compressed = false;
700 compress();
701}
702
703
704
705bool
707{
708 Assert(size() == other.size(),
709 ExcMessage("One index set can only be a subset of another if they "
710 "describe index spaces of the same size. The ones in "
711 "question here have sizes " +
712 std::to_string(size()) + " and " +
713 std::to_string(other.size()) + "."));
714
715 // See whether there are indices in the current set that are not in 'other'.
716 // If so, then this is clearly not a subset of 'other'.
717 IndexSet A_minus_B = *this;
718 A_minus_B.subtract_set(other);
719 if (A_minus_B.n_elements() > 0)
720 return false;
721 else
722 // Else, every index in 'this' is also in 'other', since we ended up
723 // with an empty set upon subtraction. This means that we have a subset:
724 return true;
725}
726
727
728
729void
730IndexSet::write(std::ostream &out) const
731{
732 compress();
733 out << size() << " ";
734 out << ranges.size() << std::endl;
735 std::vector<Range>::const_iterator r = ranges.begin();
736 for (; r != ranges.end(); ++r)
737 {
738 out << r->begin << " " << r->end << std::endl;
739 }
740}
741
742
743
744void
745IndexSet::read(std::istream &in)
746{
747 AssertThrow(in.fail() == false, ExcIO());
748
749 size_type s;
750 unsigned int n_ranges;
751
752 in >> s >> n_ranges;
753 ranges.clear();
754 set_size(s);
755 for (unsigned int i = 0; i < n_ranges; ++i)
756 {
757 AssertThrow(in.fail() == false, ExcIO());
758
759 size_type b, e;
760 in >> b >> e;
761 add_range(b, e);
762 }
763}
764
765
766void
767IndexSet::block_write(std::ostream &out) const
768{
769 AssertThrow(out.fail() == false, ExcIO());
770 out.write(reinterpret_cast<const char *>(&index_space_size),
771 sizeof(index_space_size));
772 std::size_t n_ranges = ranges.size();
773 out.write(reinterpret_cast<const char *>(&n_ranges), sizeof(n_ranges));
774 if (ranges.empty() == false)
775 out.write(reinterpret_cast<const char *>(&*ranges.begin()),
776 ranges.size() * sizeof(Range));
777 AssertThrow(out.fail() == false, ExcIO());
778}
779
780void
781IndexSet::block_read(std::istream &in)
782{
784 std::size_t n_ranges;
785 in.read(reinterpret_cast<char *>(&size), sizeof(size));
786 in.read(reinterpret_cast<char *>(&n_ranges), sizeof(n_ranges));
787 // we have to clear ranges first
788 ranges.clear();
789 set_size(size);
790 ranges.resize(n_ranges, Range(0, 0));
791 if (n_ranges != 0u)
792 in.read(reinterpret_cast<char *>(&*ranges.begin()),
793 ranges.size() * sizeof(Range));
794
795 do_compress(); // needed so that largest_range can be recomputed
796}
797
798
799
800bool
802{
803 // get the element after which we would have to insert a range that
804 // consists of all elements from this element to the end of the index
805 // range plus one. after this call we know that if p!=end() then
806 // p->begin<=index unless there is no such range at all
807 //
808 // if the searched for element is an element of this range, then we're
809 // done. otherwise, the element can't be in one of the following ranges
810 // because otherwise p would be a different iterator
811 //
812 // since we already know the position relative to the largest range (we
813 // called compress!), we can perform the binary search on ranges with
814 // lower/higher number compared to the largest range
815 std::vector<Range>::const_iterator p = std::upper_bound(
816 ranges.begin() +
817 (index < ranges[largest_range].begin ? 0 : largest_range + 1),
818 index < ranges[largest_range].begin ? ranges.begin() + largest_range :
819 ranges.end(),
820 Range(index, size() + 1));
821
822 if (p == ranges.begin())
823 return ((index >= p->begin) && (index < p->end));
824
825 Assert((p == ranges.end()) || (p->begin > index), ExcInternalError());
826
827 // now move to that previous range
828 --p;
829 Assert(p->begin <= index, ExcInternalError());
830
831 return (p->end > index);
832}
833
834
835
838{
839 // find out which chunk the local index n belongs to by using a binary
840 // search. the comparator is based on the end of the ranges.
841 Range r(n, n + 1);
842 r.nth_index_in_set = n;
843
844 const std::vector<Range>::const_iterator p = Utilities::lower_bound(
845 ranges.begin(), ranges.end(), r, Range::nth_index_compare);
846
847 Assert(p != ranges.end(), ExcInternalError());
848 return p->begin + (n - p->nth_index_in_set);
849}
850
851
852
855{
856 // we could try to use the main range for splitting up the search range, but
857 // since we only come here when the largest range did not contain the index,
858 // there is little gain from doing a first step manually.
859 Range r(n, n);
860 std::vector<Range>::const_iterator p =
862
863 // if n is not in this set
864 if (p == ranges.end() || p->end == n || p->begin > n)
866
867 Assert(p != ranges.end(), ExcInternalError());
868 Assert(p->begin <= n, ExcInternalError());
869 Assert(n < p->end, ExcInternalError());
870 return (n - p->begin) + p->nth_index_in_set;
871}
872
873
874
876IndexSet::at(const size_type global_index) const
877{
878 compress();
879 AssertIndexRange(global_index, size());
880
881 if (ranges.empty())
882 return end();
883
884 std::vector<Range>::const_iterator main_range =
885 ranges.begin() + largest_range;
886
887 Range r(global_index, global_index + 1);
888 // This optimization makes the bounds for lower_bound smaller by checking
889 // the largest range first.
890 std::vector<Range>::const_iterator range_begin, range_end;
891 if (global_index < main_range->begin)
892 {
893 range_begin = ranges.begin();
894 range_end = main_range;
895 }
896 else
897 {
898 range_begin = main_range;
899 range_end = ranges.end();
900 }
901
902 // This will give us the first range p=[a,b[ with b>=global_index using
903 // a binary search
904 const std::vector<Range>::const_iterator p =
905 Utilities::lower_bound(range_begin, range_end, r, Range::end_compare);
906
907 // We couldn't find a range, which means we have no range that contains
908 // global_index and also no range behind it, meaning we need to return end().
909 if (p == ranges.end())
910 return end();
911
912 // Finally, we can have two cases: Either global_index is not in [a,b[,
913 // which means we need to return an iterator to a because global_index, ...,
914 // a-1 is not in the IndexSet (if branch). Alternatively, global_index is in
915 // [a,b[ and we will return an iterator pointing directly at global_index
916 // (else branch).
917 if (global_index < p->begin)
918 return {this, static_cast<size_type>(p - ranges.begin()), p->begin};
919 else
920 return {this, static_cast<size_type>(p - ranges.begin()), global_index};
921}
922
923
924
925std::vector<IndexSet::size_type>
927{
928 compress();
929
930 std::vector<size_type> indices;
931 indices.reserve(n_elements());
932
933 for (const auto &range : ranges)
934 for (size_type entry = range.begin; entry < range.end; ++entry)
935 indices.push_back(entry);
936
937 Assert(indices.size() == n_elements(), ExcInternalError());
938
939 return indices;
940}
941
942
943
944void
945IndexSet::fill_index_vector(std::vector<size_type> &indices) const
946{
947 indices = get_index_vector();
948}
949
950
951
952#ifdef DEAL_II_WITH_TRILINOS
953# ifdef DEAL_II_TRILINOS_WITH_TPETRA
954
955template <typename NodeType>
956Tpetra::Map<int, types::signed_global_dof_index, NodeType>
958 const bool overlapping) const
959{
960 return *make_tpetra_map_rcp<NodeType>(communicator, overlapping);
961}
962
963
964
965template <typename NodeType>
966Teuchos::RCP<Tpetra::Map<int, types::signed_global_dof_index, NodeType>>
968 const bool overlapping) const
969{
970 compress();
971 (void)communicator;
972
973# ifdef DEBUG
974 if (!overlapping)
975 {
976 const size_type n_global_elements =
977 Utilities::MPI::sum(n_elements(), communicator);
978 Assert(n_global_elements == size(),
979 ExcMessage("You are trying to create an Tpetra::Map object "
980 "that partitions elements of an index set "
981 "between processors. However, the union of the "
982 "index sets on different processors does not "
983 "contain all indices exactly once: the sum of "
984 "the number of entries the various processors "
985 "want to store locally is " +
986 std::to_string(n_global_elements) +
987 " whereas the total size of the object to be "
988 "allocated is " +
989 std::to_string(size()) +
990 ". In other words, there are "
991 "either indices that are not spoken for "
992 "by any processor, or there are indices that are "
993 "claimed by multiple processors."));
994 }
995# endif
996
997 // Find out if the IndexSet is ascending and 1:1. This corresponds to a
998 // linear Tpetra::Map. Overlapping IndexSets are never 1:1.
999 const bool linear =
1000 overlapping ? false : is_ascending_and_one_to_one(communicator);
1001 if (linear)
1003 Tpetra::Map<int, types::signed_global_dof_index, NodeType>>(
1004 size(),
1005 n_elements(),
1006 0,
1007# ifdef DEAL_II_WITH_MPI
1008 Utilities::Trilinos::internal::make_rcp<Teuchos::MpiComm<int>>(
1009 communicator)
1010# else
1011 Utilities::Trilinos::internal::make_rcp<Teuchos::Comm<int>>()
1012# endif // DEAL_II_WITH_MPI
1013 );
1014 else
1015 {
1016 const std::vector<size_type> indices = get_index_vector();
1017 std::vector<types::signed_global_dof_index> int_indices(indices.size());
1018 std::copy(indices.begin(), indices.end(), int_indices.begin());
1019 const Teuchos::ArrayView<types::signed_global_dof_index> arr_view(
1020 int_indices);
1021
1023 Tpetra::Map<int, types::signed_global_dof_index, NodeType>>(
1024 size(),
1025 arr_view,
1026 0,
1027# ifdef DEAL_II_WITH_MPI
1028 Utilities::Trilinos::internal::make_rcp<Teuchos::MpiComm<int>>(
1029 communicator)
1030# else
1031 Utilities::Trilinos::internal::make_rcp<Teuchos::Comm<int>>()
1032# endif // DEAL_II_WITH_MPI
1033 );
1034 }
1035}
1036# endif
1037
1038
1039
1040Epetra_Map
1042 const bool overlapping) const
1043{
1044 compress();
1045 (void)communicator;
1046
1047# ifdef DEBUG
1048 if (!overlapping)
1049 {
1050 const size_type n_global_elements =
1051 Utilities::MPI::sum(n_elements(), communicator);
1052 Assert(n_global_elements == size(),
1053 ExcMessage("You are trying to create an Epetra_Map object "
1054 "that partitions elements of an index set "
1055 "between processors. However, the union of the "
1056 "index sets on different processors does not "
1057 "contain all indices exactly once: the sum of "
1058 "the number of entries the various processors "
1059 "want to store locally is " +
1060 std::to_string(n_global_elements) +
1061 " whereas the total size of the object to be "
1062 "allocated is " +
1063 std::to_string(size()) +
1064 ". In other words, there are "
1065 "either indices that are not spoken for "
1066 "by any processor, or there are indices that are "
1067 "claimed by multiple processors."));
1068 }
1069# endif
1070
1071 // Find out if the IndexSet is ascending and 1:1. This corresponds to a
1072 // linear EpetraMap. Overlapping IndexSets are never 1:1.
1073 const bool linear =
1074 overlapping ? false : is_ascending_and_one_to_one(communicator);
1075
1076 if (linear)
1077 return Epetra_Map(TrilinosWrappers::types::int_type(size()),
1079 0,
1080# ifdef DEAL_II_WITH_MPI
1081 Epetra_MpiComm(communicator)
1082# else
1083 Epetra_SerialComm()
1084# endif
1085 );
1086 else
1087 {
1088 const std::vector<size_type> indices = get_index_vector();
1089 return Epetra_Map(
1092 (n_elements() > 0 ?
1093 reinterpret_cast<const TrilinosWrappers::types::int_type *>(
1094 indices.data()) :
1095 nullptr),
1096 0,
1097# ifdef DEAL_II_WITH_MPI
1098 Epetra_MpiComm(communicator)
1099# else
1100 Epetra_SerialComm()
1101# endif
1102 );
1103 }
1104}
1105#endif
1106
1107
1108#ifdef DEAL_II_WITH_PETSC
1109IS
1110IndexSet::make_petsc_is(const MPI_Comm communicator) const
1111{
1112 std::vector<size_type> indices;
1113 fill_index_vector(indices);
1114
1115 // If the size of the index set can be converted to a PetscInt then every
1116 // value can also be converted
1117 AssertThrowIntegerConversion(static_cast<PetscInt>(size()), size());
1118 const auto local_size = static_cast<PetscInt>(n_elements());
1119 AssertIntegerConversion(local_size, n_elements());
1120
1121 size_type i = 0;
1122 std::vector<PetscInt> petsc_indices(n_elements());
1123 for (const auto &index : *this)
1124 petsc_indices[i] = static_cast<PetscInt>(index);
1125
1126 IS is;
1127 PetscErrorCode ierr = ISCreateGeneral(
1128 communicator, local_size, petsc_indices.data(), PETSC_COPY_VALUES, &is);
1129 AssertThrow(ierr == 0, ExcPETScError(ierr));
1130
1131 return is;
1132}
1133#endif
1134
1135
1136
1137bool
1139{
1140 // If the sum of local elements does not add up to the total size,
1141 // the IndexSet can't be complete.
1142 const size_type n_global_elements =
1143 Utilities::MPI::sum(n_elements(), communicator);
1144 if (n_global_elements != size())
1145 return false;
1146
1147 if (n_global_elements == 0)
1148 return true;
1149
1150#ifdef DEAL_II_WITH_MPI
1151 // Non-contiguous IndexSets can't be linear.
1152 const bool all_contiguous =
1153 (Utilities::MPI::min(is_contiguous() ? 1 : 0, communicator) == 1);
1154 if (!all_contiguous)
1155 return false;
1156
1157 bool is_globally_ascending = true;
1158 // we know that there is only one interval
1159 types::global_dof_index first_local_dof = (n_elements() > 0) ?
1160 *(begin_intervals()->begin()) :
1162
1163 const unsigned int my_rank = Utilities::MPI::this_mpi_process(communicator);
1164 const std::vector<types::global_dof_index> global_dofs =
1165 Utilities::MPI::gather(communicator, first_local_dof, 0);
1166
1167 if (my_rank == 0)
1168 {
1169 // find out if the received std::vector is ascending
1170 types::global_dof_index index = 0;
1171 while (global_dofs[index] == numbers::invalid_dof_index)
1172 ++index;
1173 types::global_dof_index old_dof = global_dofs[index++];
1174 for (; index < global_dofs.size(); ++index)
1175 {
1176 const types::global_dof_index new_dof = global_dofs[index];
1177 if (new_dof != numbers::invalid_dof_index)
1178 {
1179 if (new_dof <= old_dof)
1180 {
1181 is_globally_ascending = false;
1182 break;
1183 }
1184 else
1185 old_dof = new_dof;
1186 }
1187 }
1188 }
1189
1190 // now broadcast the result
1191 int is_ascending = is_globally_ascending ? 1 : 0;
1192 int ierr = MPI_Bcast(&is_ascending, 1, MPI_INT, 0, communicator);
1193 AssertThrowMPI(ierr);
1194
1195 return (is_ascending == 1);
1196#else
1197 return true;
1198#endif // DEAL_II_WITH_MPI
1199}
1200
1201
1202
1203std::size_t
1211
1212// explicit template instantiations
1213
1214#ifndef DOXYGEN
1215# ifdef DEAL_II_WITH_TRILINOS
1216# ifdef DEAL_II_TRILINOS_WITH_TPETRA
1217
1218template IndexSet::IndexSet(
1219 const Teuchos::RCP<const Tpetra::Map<
1220 int,
1223 &);
1224
1225# if defined(KOKKOS_ENABLE_CUDA) || defined(KOKKOS_ENABLE_HIP) || \
1226 defined(KOKKOS_ENABLE_SYCL)
1227template IndexSet::IndexSet(
1228 const Teuchos::RCP<const Tpetra::Map<
1229 int,
1232 &);
1233# endif
1234
1238 const MPI_Comm,
1239 bool) const;
1240
1241# if defined(KOKKOS_ENABLE_CUDA) || defined(KOKKOS_ENABLE_HIP) || \
1242 defined(KOKKOS_ENABLE_SYCL)
1247 const MPI_Comm,
1248 bool) const;
1249# endif
1250
1251template Teuchos::RCP<
1255 const MPI_Comm,
1256 bool) const;
1257
1258# if defined(KOKKOS_ENABLE_CUDA) || defined(KOKKOS_ENABLE_HIP) || \
1259 defined(KOKKOS_ENABLE_SYCL)
1260template Teuchos::RCP<
1264 const MPI_Comm,
1265 bool) const;
1266# endif
1267
1268# endif
1269# endif
1270#endif
1271
ElementIterator begin() const
Definition index_set.h:1265
bool is_subset_of(const IndexSet &other) const
Definition index_set.cc:706
bool is_element_binary_search(const size_type local_index) const
Definition index_set.cc:801
size_type index_within_set_binary_search(const size_type global_index) const
Definition index_set.cc:854
size_type largest_range
Definition index_set.h:1125
IS make_petsc_is(const MPI_Comm communicator=MPI_COMM_WORLD) const
bool is_ascending_and_one_to_one(const MPI_Comm communicator) const
bool is_contiguous() const
Definition index_set.h:1917
Tpetra::Map< int, types::signed_global_dof_index, NodeType > make_tpetra_map(const MPI_Comm communicator=MPI_COMM_WORLD, const bool overlapping=false) const
Definition index_set.cc:957
void do_compress() const
Definition index_set.cc:139
ElementIterator at(const size_type global_index) const
Definition index_set.cc:876
size_type size() const
Definition index_set.h:1776
void fill_index_vector(std::vector< size_type > &indices) const
Definition index_set.cc:945
bool is_empty() const
Definition index_set.h:1926
std::vector< IndexSet > split_by_block(const std::vector< types::global_dof_index > &n_indices_per_block) const
Definition index_set.cc:444
size_type n_elements() const
Definition index_set.h:1934
void add_range_lower_bound(const Range &range)
Definition index_set.cc:594
ElementIterator begin() const
Definition index_set.h:1710
size_type pop_front()
Definition index_set.cc:572
void set_size(const size_type size)
Definition index_set.h:1764
void read(std::istream &in)
Definition index_set.cc:745
Epetra_Map make_trilinos_map(const MPI_Comm communicator=MPI_COMM_WORLD, const bool overlapping=false) const
IndexSet tensor_product(const IndexSet &other) const
Definition index_set.cc:542
void write(std::ostream &out) const
Definition index_set.cc:730
void block_read(std::istream &in)
Definition index_set.cc:781
bool is_compressed
Definition index_set.h:1108
void add_ranges_internal(boost::container::small_vector< std::pair< size_type, size_type >, 200 > &tmp_ranges, const bool ranges_are_sorted)
Definition index_set.cc:610
IntervalIterator begin_intervals() const
Definition index_set.h:1731
std::vector< Range > ranges
Definition index_set.h:1098
void subtract_set(const IndexSet &other)
Definition index_set.cc:473
ElementIterator end() const
Definition index_set.h:1722
Threads::Mutex compress_mutex
Definition index_set.h:1131
size_type index_space_size
Definition index_set.h:1114
void block_write(std::ostream &out) const
Definition index_set.cc:767
IndexSet get_view(const size_type begin, const size_type end) const
Definition index_set.cc:273
void add_range(const size_type begin, const size_type end)
Definition index_set.h:1803
std::size_t memory_consumption() const
size_type nth_index_in_set_binary_search(const size_type local_index) const
Definition index_set.cc:837
void compress() const
Definition index_set.h:1784
size_type pop_back()
Definition index_set.cc:554
std::vector< size_type > get_index_vector() const
Definition index_set.cc:926
Teuchos::RCP< Tpetra::Map< int, types::signed_global_dof_index, NodeType > > make_tpetra_map_rcp(const MPI_Comm communicator=MPI_COMM_WORLD, const bool overlapping=false) const
Definition index_set.cc:967
types::global_dof_index size_type
Definition index_set.h:80
void add_indices(const ForwardIterator &begin, const ForwardIterator &end)
Definition index_set.h:1831
IndexSet operator&(const IndexSet &is) const
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:498
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:499
static ::ExceptionBase & ExcIO()
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertThrowMPI(error_code)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
#define AssertIntegerConversion(index1, index2)
#define DEAL_II_ASSERT_UNREACHABLE()
#define AssertThrowIntegerConversion(index1, index2)
Tpetra::KokkosCompat::KokkosDeviceWrapperNode< typename MemorySpace::kokkos_space::execution_space, typename MemorySpace::kokkos_space > NodeType
Tpetra::Map< LO, GO, NodeType< MemorySpace > > MapType
std::enable_if_t< std::is_fundamental_v< T >, std::size_t > memory_consumption(const T &t)
T sum(const T &t, const MPI_Comm mpi_communicator)
T min(const T &t, const MPI_Comm mpi_communicator)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:107
std::vector< T > gather(const MPI_Comm comm, const T &object_to_send, const unsigned int root_process=0)
Teuchos::RCP< T > make_rcp(Args &&...args)
Iterator lower_bound(Iterator first, Iterator last, const T &val)
Definition utilities.h:1032
const types::global_dof_index invalid_dof_index
Definition types.h:252
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
static bool end_compare(const IndexSet::Range &x, const IndexSet::Range &y)
Definition index_set.h:1056
static bool nth_index_compare(const IndexSet::Range &x, const IndexSet::Range &y)
Definition index_set.h:1062
size_type end
Definition index_set.h:1024
size_type nth_index_in_set
Definition index_set.h:1026
size_type begin
Definition index_set.h:1023