Reference documentation for deal.II version 9.0.0
index_set.cc
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2005 - 2018 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE at
12 // the top level of the deal.II distribution.
13 //
14 // ---------------------------------------------------------------------
15 
16 #include <deal.II/base/memory_consumption.h>
17 #include <deal.II/base/mpi.h>
18 #include <deal.II/base/index_set.h>
19 
20 #include <vector>
21 
22 #ifdef DEAL_II_WITH_TRILINOS
23 # ifdef DEAL_II_WITH_MPI
24 # include <Epetra_MpiComm.h>
25 # endif
26 # include <Epetra_SerialComm.h>
27 # include <Epetra_Map.h>
28 #endif
29 
30 DEAL_II_NAMESPACE_OPEN
31 
32 
33 
34 #ifdef DEAL_II_WITH_TRILINOS
35 
36 // the 64-bit path uses a few different names, so put that into a separate
37 // implementation
38 
39 #ifdef DEAL_II_WITH_64BIT_INDICES
40 
41 IndexSet::IndexSet (const Epetra_Map &map)
42  :
43  is_compressed (true),
44  index_space_size (1+map.MaxAllGID64()),
45  largest_range (numbers::invalid_unsigned_int)
46 {
47  Assert(
48  map.MinAllGID64() == 0,
49  ExcMessage("The Epetra_Map does not contain the global index 0, which "
50  "means some entries are not present on any processor."));
51 
52  // For a contiguous map, we do not need to go through the whole data...
53  if (map.LinearMap())
54  add_range(size_type(map.MinMyGID64()), size_type(map.MaxMyGID64()+1));
55  else
56  {
57  const size_type n_indices = map.NumMyElements();
58  size_type *indices = reinterpret_cast<size_type *>(map.MyGlobalElements64());
59  add_indices(indices, indices+n_indices);
60  }
61  compress();
62 }
63 
64 #else
65 
66 // this is the standard 32-bit implementation
67 
68 IndexSet::IndexSet (const Epetra_Map &map)
69  :
70  is_compressed (true),
71  index_space_size (1+map.MaxAllGID()),
72  largest_range (numbers::invalid_unsigned_int)
73 {
74  Assert(
75  map.MinAllGID() == 0,
76  ExcMessage("The Epetra_Map does not contain the global index 0, which "
77  "means some entries are not present on any processor."));
78 
79  // For a contiguous map, we do not need to go through the whole data...
80  if (map.LinearMap())
81  add_range(size_type(map.MinMyGID()), size_type(map.MaxMyGID()+1));
82  else
83  {
84  const size_type n_indices = map.NumMyElements();
85  unsigned int *indices = reinterpret_cast<unsigned int *>(map.MyGlobalElements());
86  add_indices(indices, indices+n_indices);
87  }
88  compress();
89 }
90 
91 #endif
92 
93 #endif // ifdef DEAL_II_WITH_TRILINOS
94 
95 
96 
97 void
99  const size_type end)
100 {
102  ||
104  ExcIndexRangeType<size_type> (begin, 0, index_space_size));
106  ExcIndexRangeType<size_type> (end, 0, index_space_size+1));
107  Assert (begin <= end,
108  ExcIndexRangeType<size_type> (begin, 0, end));
109 
110  if (begin != end)
111  {
112  const Range new_range(begin,end);
113 
114  // the new index might be larger than the last index present in the
115  // ranges. Then we can skip the binary search
116  if (ranges.size() == 0 || begin > ranges.back().end)
117  ranges.push_back(new_range);
118  else
119  ranges.insert (Utilities::lower_bound (ranges.begin(),
120  ranges.end(),
121  new_range),
122  new_range);
123  is_compressed = false;
124  }
125 }
126 
127 
128 
129 void
131 {
132  // we will, in the following, modify mutable variables. this can only
133  // work in multithreaded applications if we lock the data structures
134  // via a mutex, so that users can call 'const' functions from threads
135  // in parallel (and these 'const' functions can then call compress()
136  // which itself calls the current function)
138 
139  // see if any of the contiguous ranges can be merged. do not use
140  // std::vector::erase in-place as it is quadratic in the number of
141  // ranges. since the ranges are sorted by their first index, determining
142  // overlap isn't all that hard
143  std::vector<Range>::iterator store = ranges.begin();
144  for (std::vector<Range>::iterator i = ranges.begin();
145  i != ranges.end(); )
146  {
147  std::vector<Range>::iterator
148  next = i;
149  ++next;
150 
151  size_type first_index = i->begin;
152  size_type last_index = i->end;
153 
154  // see if we can merge any of the following ranges
155  while (next != ranges.end() &&
156  (next->begin <= last_index))
157  {
158  last_index = std::max (last_index, next->end);
159  ++next;
160  }
161  i = next;
162 
163  // store the new range in the slot we last occupied
164  *store = Range(first_index, last_index);
165  ++store;
166  }
167  // use a compact array with exactly the right amount of storage
168  if (store != ranges.end())
169  {
170  std::vector<Range> new_ranges(ranges.begin(), store);
171  ranges.swap(new_ranges);
172  }
173 
174  // now compute indices within set and the range with most elements
175  size_type next_index = 0, largest_range_size = 0;
176  for (std::vector<Range>::iterator i = ranges.begin(); i != ranges.end();
177  ++i)
178  {
179  Assert(i->begin < i->end, ExcInternalError());
180 
181  i->nth_index_in_set = next_index;
182  next_index += (i->end - i->begin);
183  if (i->end - i->begin > largest_range_size)
184  {
185  largest_range_size = i->end - i->begin;
186  largest_range = i - ranges.begin();
187  }
188  }
189  is_compressed = true;
190 
191  // check that next_index is correct. needs to be after the previous
192  // statement because we otherwise will get into an endless loop
193  Assert (next_index == n_elements(), ExcInternalError());
194 }
195 
196 
197 
198 IndexSet
199 IndexSet::operator & (const IndexSet &is) const
200 {
201  Assert (size() == is.size(),
202  ExcDimensionMismatch (size(), is.size()));
203 
204  compress ();
205  is.compress ();
206 
207  std::vector<Range>::const_iterator r1 = ranges.begin(),
208  r2 = is.ranges.begin();
209  IndexSet result (size());
210 
211  while ((r1 != ranges.end())
212  &&
213  (r2 != is.ranges.end()))
214  {
215  // if r1 and r2 do not overlap at all, then move the pointer that sits
216  // to the left of the other up by one
217  if (r1->end <= r2->begin)
218  ++r1;
219  else if (r2->end <= r1->begin)
220  ++r2;
221  else
222  {
223  // the ranges must overlap somehow
224  Assert (((r1->begin <= r2->begin) &&
225  (r1->end > r2->begin))
226  ||
227  ((r2->begin <= r1->begin) &&
228  (r2->end > r1->begin)),
229  ExcInternalError());
230 
231  // add the overlapping range to the result
232  result.add_range (std::max (r1->begin,
233  r2->begin),
234  std::min (r1->end,
235  r2->end));
236 
237  // now move that iterator that ends earlier one up. note that it has
238  // to be this one because a subsequent range may still have a chance
239  // of overlapping with the range that ends later
240  if (r1->end <= r2->end)
241  ++r1;
242  else
243  ++r2;
244  }
245  }
246 
247  result.compress ();
248  return result;
249 }
250 
251 
252 
253 IndexSet
255  const size_type end) const
256 {
257  Assert (begin <= end,
258  ExcMessage ("End index needs to be larger or equal to begin index!"));
259  Assert (end <= size(),
260  ExcMessage ("Given range exceeds index set dimension"));
261 
262  IndexSet result (end-begin);
263  std::vector<Range>::const_iterator r1 = ranges.begin();
264 
265  while (r1 != ranges.end())
266  {
267  if ((r1->end > begin)
268  &&
269  (r1->begin < end))
270  {
271  result.add_range (std::max(r1->begin, begin)-begin,
272  std::min(r1->end, end)-begin);
273 
274  }
275  else if (r1->begin >= end)
276  break;
277 
278  ++r1;
279  }
280 
281  result.compress();
282  return result;
283 }
284 
285 
286 
287 void
289 {
290  compress();
291  other.compress();
292  is_compressed = false;
293 
294 
295  // we save new ranges to be added to our IndexSet in an temporary vector and
296  // add all of them in one go at the end.
297  std::vector<Range> new_ranges;
298 
299  std::vector<Range>::iterator own_it = ranges.begin();
300  std::vector<Range>::iterator other_it = other.ranges.begin();
301 
302  while (own_it != ranges.end() && other_it != other.ranges.end())
303  {
304  //advance own iterator until we get an overlap
305  if (own_it->end <= other_it->begin)
306  {
307  ++own_it;
308  continue;
309  }
310  //we are done with other_it, so advance
311  if (own_it->begin >= other_it->end)
312  {
313  ++other_it;
314  continue;
315  }
316 
317  //Now own_it and other_it overlap. First save the part of own_it that
318  //is before other_it (if not empty).
319  if (own_it->begin < other_it->begin)
320  {
321  Range r(own_it->begin, other_it->begin);
322  r.nth_index_in_set = 0; //fix warning of unused variable
323  new_ranges.push_back(r);
324  }
325  // change own_it to the sub range behind other_it. Do not delete own_it
326  // in any case. As removal would invalidate iterators, we just shrink
327  // the range to an empty one.
328  own_it->begin = other_it->end;
329  if (own_it->begin > own_it->end)
330  {
331  own_it->begin = own_it->end;
332  ++own_it;
333  }
334 
335  // continue without advancing iterators, the right one will be advanced
336  // next.
337  }
338 
339  // Now delete all empty ranges we might
340  // have created.
341  for (std::vector<Range>::iterator it = ranges.begin();
342  it != ranges.end(); )
343  {
344  if (it->begin >= it->end)
345  it = ranges.erase(it);
346  else
347  ++it;
348  }
349 
350  // done, now add the temporary ranges
351  const std::vector<Range>::iterator end = new_ranges.end();
352  for (std::vector<Range>::iterator it = new_ranges.begin();
353  it != end; ++it)
354  add_range(it->begin, it->end);
355 
356  compress();
357 }
358 
359 
360 
363 {
364  Assert(is_empty() == false,
365  ExcMessage("pop_back() failed, because this IndexSet contains no entries."));
366 
367  const size_type index = ranges.back().end-1;
368  --ranges.back().end;
369 
370  if (ranges.back().begin == ranges.back().end)
371  ranges.pop_back();
372 
373  return index;
374 }
375 
376 
377 
380 {
381  Assert(is_empty() == false,
382  ExcMessage("pop_front() failed, because this IndexSet contains no entries."));
383 
384  const size_type index = ranges.front().begin;
385  ++ranges.front().begin;
386 
387  if (ranges.front().begin == ranges.front().end)
388  ranges.erase(ranges.begin());
389 
390  // We have to set this in any case, because nth_index_in_set is no longer
391  // up to date for all but the first range
392  is_compressed = false;
393 
394  return index;
395 }
396 
397 
398 
399 void
401  const unsigned int offset)
402 {
403  if ((this == &other) && (offset == 0))
404  return;
405 
406  Assert (other.ranges.size() == 0
407  || other.ranges.back().end-1 < index_space_size,
408  ExcIndexRangeType<size_type> (other.ranges.back().end-1,
409  0, index_space_size));
410 
411  compress();
412  other.compress();
413 
414  std::vector<Range>::const_iterator r1 = ranges.begin(),
415  r2 = other.ranges.begin();
416 
417  std::vector<Range> new_ranges;
418  // just get the start and end of the ranges right in this method, everything
419  // else will be done in compress()
420  while (r1 != ranges.end() || r2 != other.ranges.end())
421  {
422  // the two ranges do not overlap or we are at the end of one of the
423  // ranges
424  if (r2 == other.ranges.end() ||
425  (r1 != ranges.end() && r1->end < (r2->begin+offset)))
426  {
427  new_ranges.push_back(*r1);
428  ++r1;
429  }
430  else if (r1 == ranges.end() || (r2->end+offset) < r1->begin)
431  {
432  new_ranges.emplace_back(r2->begin+offset, r2->end+offset);
433  ++r2;
434  }
435  else
436  {
437  // ok, we do overlap, so just take the combination of the current
438  // range (do not bother to merge with subsequent ranges)
439  Range next(std::min(r1->begin, r2->begin+offset),
440  std::max(r1->end, r2->end+offset));
441  new_ranges.push_back(next);
442  ++r1;
443  ++r2;
444  }
445  }
446  ranges.swap(new_ranges);
447 
448  is_compressed = false;
449  compress();
450 }
451 
452 
453 
454 void
455 IndexSet::write(std::ostream &out) const
456 {
457  compress();
458  out << size() << " ";
459  out << ranges.size() << std::endl;
460  std::vector<Range>::const_iterator r = ranges.begin();
461  for ( ; r!=ranges.end(); ++r)
462  {
463  out << r->begin << " " << r->end << std::endl;
464  }
465 }
466 
467 
468 
469 void
470 IndexSet::read(std::istream &in)
471 {
472  AssertThrow (in, ExcIO());
473 
474  size_type s;
475  unsigned int n_ranges;
476 
477  in >> s >> n_ranges;
478  ranges.clear();
479  set_size(s);
480  for (unsigned int i=0; i<n_ranges; ++i)
481  {
482  AssertThrow (in, ExcIO());
483 
484  size_type b, e;
485  in >> b >> e;
486  add_range(b,e);
487  }
488 }
489 
490 
491 void
492 IndexSet::block_write(std::ostream &out) const
493 {
494  AssertThrow (out, ExcIO());
495  out.write(reinterpret_cast<const char *>(&index_space_size),
496  sizeof(index_space_size));
497  size_t n_ranges = ranges.size();
498  out.write(reinterpret_cast<const char *>(&n_ranges),
499  sizeof(n_ranges));
500  if (ranges.empty() == false)
501  out.write (reinterpret_cast<const char *>(&*ranges.begin()),
502  ranges.size() * sizeof(Range));
503  AssertThrow (out, ExcIO());
504 }
505 
506 void
507 IndexSet::block_read(std::istream &in)
508 {
509  size_type size;
510  size_t n_ranges;
511  in.read(reinterpret_cast<char *>(&size), sizeof(size));
512  in.read(reinterpret_cast<char *>(&n_ranges), sizeof(n_ranges));
513  // we have to clear ranges first
514  ranges.clear();
515  set_size(size);
516  ranges.resize(n_ranges, Range(0,0));
517  if (n_ranges)
518  in.read(reinterpret_cast<char *>(&*ranges.begin()),
519  ranges.size() * sizeof(Range));
520 
521  do_compress(); // needed so that largest_range can be recomputed
522 }
523 
524 
525 
526 void IndexSet::fill_index_vector(std::vector<size_type> &indices) const
527 {
528  compress();
529 
530  indices.clear();
531  indices.reserve(n_elements());
532 
533  for (std::vector<Range>::iterator it = ranges.begin();
534  it != ranges.end();
535  ++it)
536  for (size_type i=it->begin; i<it->end; ++i)
537  indices.push_back (i);
538 
539  Assert (indices.size() == n_elements(), ExcInternalError());
540 }
541 
542 
543 
544 
545 
546 #ifdef DEAL_II_WITH_TRILINOS
547 
548 Epetra_Map
549 IndexSet::make_trilinos_map (const MPI_Comm &communicator,
550  const bool overlapping) const
551 {
552  compress ();
553  (void)communicator;
554 
555 #ifdef DEBUG
556  if (!overlapping)
557  {
558  const size_type n_global_elements
559  = Utilities::MPI::sum (n_elements(), communicator);
560  Assert (n_global_elements == size(),
561  ExcMessage ("You are trying to create an Epetra_Map object "
562  "that partitions elements of an index set "
563  "between processors. However, the union of the "
564  "index sets on different processors does not "
565  "contain all indices exactly once: the sum of "
566  "the number of entries the various processors "
567  "want to store locally is "
568  + Utilities::to_string (n_global_elements) +
569  " whereas the total size of the object to be "
570  "allocated is "
571  + Utilities::to_string (size()) +
572  ". In other words, there are "
573  "either indices that are not spoken for "
574  "by any processor, or there are indices that are "
575  "claimed by multiple processors."));
576  }
577 #endif
578 
579  // Find out if the IndexSet is ascending and 1:1. This corresponds to a
580  // linear EpetraMap. Overlapping IndexSets are never 1:1.
581  const bool linear = overlapping ? false : is_ascending_and_one_to_one(communicator);
582 
583  if (linear)
584  return Epetra_Map (TrilinosWrappers::types::int_type(size()),
585  TrilinosWrappers::types::int_type(n_elements()),
586  0,
587 #ifdef DEAL_II_WITH_MPI
588  Epetra_MpiComm(communicator)
589 #else
590  Epetra_SerialComm()
591 #endif
592  );
593  else
594  {
595  std::vector<size_type> indices;
596  fill_index_vector(indices);
597  return Epetra_Map (TrilinosWrappers::types::int_type(-1),
598  TrilinosWrappers::types::int_type(n_elements()),
599  (n_elements() > 0
600  ?
601  reinterpret_cast<TrilinosWrappers::types::int_type *>(indices.data())
602  :
603  nullptr),
604  0,
605 #ifdef DEAL_II_WITH_MPI
606  Epetra_MpiComm(communicator)
607 #else
608  Epetra_SerialComm()
609 #endif
610  );
611  }
612 }
613 #endif
614 
615 
616 
617 bool
618 IndexSet::is_ascending_and_one_to_one (const MPI_Comm &communicator) const
619 {
620  // If the sum of local elements does not add up to the total size,
621  // the IndexSet can't be complete.
622  const size_type n_global_elements
623  = Utilities::MPI::sum (n_elements(), communicator);
624  if (n_global_elements != size())
625  return false;
626 
627 #ifdef DEAL_II_WITH_MPI
628  // Non-contiguous IndexSets can't be linear.
629  const bool all_contiguous = (Utilities::MPI::min (is_contiguous() ? 1 : 0, communicator) == 1);
630  if (!all_contiguous)
631  return false;
632 
633  bool is_globally_ascending = true;
634  // we know that there is only one interval
635  types::global_dof_index first_local_dof
636  = (n_elements()>0) ? *(begin_intervals()->begin())
638 
639  const unsigned int my_rank = Utilities::MPI::this_mpi_process(communicator);
640  const std::vector<types::global_dof_index> global_dofs = Utilities::MPI::gather(communicator,first_local_dof,0);
641 
642  if (my_rank == 0)
643  {
644  // find out if the received std::vector is ascending
645  types::global_dof_index index = 0;
646  while (global_dofs[index] == numbers::invalid_dof_index)
647  ++index;
648  types::global_dof_index old_dof = global_dofs[index++];
649  for (; index<global_dofs.size(); ++index)
650  {
651  const types::global_dof_index new_dof = global_dofs[index];
652  if (new_dof != numbers::invalid_dof_index)
653  {
654  if (new_dof <= old_dof)
655  {
656  is_globally_ascending = false;
657  break;
658  }
659  else
660  old_dof = new_dof;
661  }
662  }
663  }
664 
665  // now broadcast the result
666  int is_ascending = is_globally_ascending ? 1 : 0;
667  int ierr = MPI_Bcast(&is_ascending, 1, MPI_INT, 0, communicator);
668  AssertThrowMPI(ierr);
669 
670  return (is_ascending==1);
671 #else
672  return true;
673 #endif //DEAL_II_WITH_MPI
674 }
675 
676 
677 
678 std::size_t
680 {
684  sizeof (compress_mutex));
685 }
686 
687 
688 
689 DEAL_II_NAMESPACE_CLOSE
Iterator lower_bound(Iterator first, Iterator last, const T &val)
Definition: utilities.h:891
ElementIterator end() const
Definition: index_set.h:1516
IndexSet operator&(const IndexSet &is) const
static ::ExceptionBase & ExcIO()
void block_read(std::istream &in)
Definition: index_set.cc:507
void add_indices(const ForwardIterator &begin, const ForwardIterator &end)
Definition: index_set.h:1619
#define AssertThrow(cond, exc)
Definition: exceptions.h:1221
std::string to_string(const number value, const unsigned int digits=numbers::invalid_unsigned_int)
Definition: utilities.cc:107
std::size_t memory_consumption() const
Definition: index_set.cc:679
size_type index_space_size
Definition: index_set.h:864
bool is_ascending_and_one_to_one(const MPI_Comm &communicator) const
Definition: index_set.cc:618
Epetra_Map make_trilinos_map(const MPI_Comm &communicator=MPI_COMM_WORLD, const bool overlapping=false) const
Definition: index_set.cc:549
size_type size() const
Definition: index_set.h:1575
static ::ExceptionBase & ExcMessage(std::string arg1)
void block_write(std::ostream &out) const
Definition: index_set.cc:492
std::vector< Range > ranges
Definition: index_set.h:848
T sum(const T &t, const MPI_Comm &mpi_communicator)
unsigned int global_dof_index
Definition: types.h:88
void subtract_set(const IndexSet &other)
Definition: index_set.cc:288
#define Assert(cond, exc)
Definition: exceptions.h:1142
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
void do_compress() const
Definition: index_set.cc:130
std::vector< T > gather(const MPI_Comm &comm, const T &object_to_send, const unsigned int root_process=0)
size_type pop_back()
Definition: index_set.cc:362
void fill_index_vector(std::vector< size_type > &indices) const
Definition: index_set.cc:526
void add_range(const size_type begin, const size_type end)
Definition: index_set.cc:98
void set_size(const size_type size)
Definition: index_set.h:1562
IndexSet get_view(const size_type begin, const size_type end) const
Definition: index_set.cc:254
void compress() const
Definition: index_set.h:1584
IntervalIterator begin_intervals() const
Definition: index_set.h:1526
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1314
bool is_contiguous() const
Definition: index_set.h:1698
Threads::Mutex compress_mutex
Definition: index_set.h:881
T min(const T &t, const MPI_Comm &mpi_communicator)
size_type pop_front()
Definition: index_set.cc:379
bool is_compressed
Definition: index_set.h:858
ElementIterator begin() const
Definition: index_set.h:978
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:75
void write(std::ostream &out) const
Definition: index_set.cc:455
const types::global_dof_index invalid_dof_index
Definition: types.h:187
types::global_dof_index size_type
Definition: index_set.h:73
ElementIterator begin() const
Definition: index_set.h:1453
void read(std::istream &in)
Definition: index_set.cc:470
size_type n_elements() const
Definition: index_set.h:1717
bool is_empty() const
Definition: index_set.h:1708
size_type largest_range
Definition: index_set.h:875
std::enable_if< std::is_fundamental< T >::value, std::size_t >::type memory_consumption(const T &t)
static ::ExceptionBase & ExcInternalError()