Reference documentation for deal.II version GIT 3779fa9eb4 2023-09-28 13:00:02+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
mpi_compute_index_owner_internal.cc
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2019 - 2023 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 #include <deal.II/base/config.h>
17 
18 #include <deal.II/base/mpi.h>
20 
21 #include <boost/serialization/utility.hpp>
22 
24 
25 namespace Utilities
26 {
27  namespace MPI
28  {
29  namespace internal
30  {
31  namespace ComputeIndexOwner
32  {
35 
36 
37 
39  : use_vector(use_vector)
40  , size(0)
41  {}
42 
43 
44 
45  void
46  FlexibleIndexStorage::reinit(const bool use_vector,
47  const bool index_range_contiguous,
48  const std::size_t size)
49  {
50  this->use_vector = use_vector;
51  this->size = size;
52 
53  data = {};
54  data_map.clear();
55 
56  // in case we have contiguous indices, only fill the vector upon
57  // first request in `fill`
58  if (!index_range_contiguous)
59  data.resize(size, invalid_index_value);
60  }
61 
62 
63 
64  void
66  const std::size_t start,
67  const std::size_t end,
69  {
70  AssertIndexRange(start, size);
72 
73  if (use_vector)
74  {
75  if (data.empty() && end > start)
76  {
77  // in debug mode, we want to track whether we set all
78  // indices, so we first fill an invalid index and only later
79  // the actual ones, whereas we simply assign the given rank
80  // to the complete vector the first time we pass around in
81  // this function in release mode to avoid touching data
82  // unnecessarily (and overwrite the smaller pieces), as the
83  // locally owned part comes first
84 #ifdef DEBUG
85  data.resize(size, invalid_index_value);
86  std::fill(data.begin() + start, data.begin() + end, value);
87 #else
88  data.resize(size, value);
89 #endif
90  }
91  else
92  {
93  AssertDimension(data.size(), size);
94  std::fill(data.begin() + start, data.begin() + end, value);
95  }
96  }
97  else
98  {
99  for (auto i = start; i < end; ++i)
100  data_map[i] = value;
101  }
102  }
103 
104 
105 
107  FlexibleIndexStorage::operator[](const std::size_t index)
108  {
109  AssertIndexRange(index, size);
110 
111  if (use_vector)
112  {
113  AssertDimension(data.size(), size);
114  return data[index];
115  }
116  else
117  {
118  if (data_map.find(index) == data_map.end())
119  data_map[index] = invalid_index_value;
120 
121  return data_map[index];
122  }
123  }
124 
125 
126 
128  FlexibleIndexStorage::operator[](const std::size_t index) const
129  {
130  AssertIndexRange(index, size);
131 
132  if (use_vector)
133  {
134  AssertDimension(data.size(), size);
135  return data[index];
136  }
137  else
138  {
139  if (data_map.find(index) == data_map.end())
140  return invalid_index_value;
141 
142  return data_map.at(index);
143  }
144  }
145 
146 
147 
148  bool
149  FlexibleIndexStorage::entry_has_been_set(const std::size_t index) const
150  {
151  AssertIndexRange(index, size);
152 
153  if (use_vector)
154  {
155  if (data.empty())
156  return false;
157 
158  AssertDimension(data.size(), size);
159  return data[index] != invalid_index_value;
160  }
161  else
162  return data_map.find(index) != data_map.end();
163  }
164 
165 
166 
167  void
168  Dictionary::reinit(const IndexSet &owned_indices, const MPI_Comm comm)
169  {
170  // 1) set up the partition
171  this->partition(owned_indices, comm);
172 
173  unsigned int my_rank = this_mpi_process(comm);
174 
175  types::global_dof_index dic_local_received = 0;
176  std::map<unsigned int,
177  std::vector<std::pair<types::global_dof_index,
179  buffers;
180 
181  const auto owned_indices_size_actual =
182  Utilities::MPI::sum(owned_indices.n_elements(), comm);
183 
184  actually_owning_ranks.reinit((owned_indices_size_actual *
185  sparsity_factor) > owned_indices.size(),
186  owned_indices_size_actual ==
187  owned_indices.size(),
189 
190  // 2) collect relevant processes and process local dict entries
191  for (auto interval = owned_indices.begin_intervals();
192  interval != owned_indices.end_intervals();
193  ++interval)
194  {
195  // Due to the granularity of the dictionary, the interval
196  // might be split into several ranges of processor owner
197  // ranks. Here, we process the interval by breaking into
198  // smaller pieces in terms of the dictionary number.
199  std::pair<types::global_dof_index, types::global_dof_index>
200  index_range(*interval->begin(), interval->last() + 1);
201 
202  AssertThrow(index_range.second <= size, ExcInternalError());
203 
204  while (index_range.first != index_range.second)
205  {
206  Assert(index_range.first < index_range.second,
207  ExcInternalError());
208 
209  const unsigned int owner =
210  dof_to_dict_rank(index_range.first);
211 
212  // this explicitly picks up the formula of
213  // dof_to_dict_rank, so the two places must be in sync
214  const types::global_dof_index next_index =
215  std::min(get_index_offset(owner + 1), index_range.second);
216 
217  Assert(next_index > index_range.first, ExcInternalError());
218 
219 #ifdef DEBUG
220  // make sure that the owner is the same on the current
221  // interval
222  for (types::global_dof_index i = index_range.first + 1;
223  i < next_index;
224  ++i)
226 #endif
227 
228  // add the interval, either to the local range or into a
229  // buffer to be sent to another processor
230  if (owner == my_rank)
231  {
232  actually_owning_ranks.fill(index_range.first -
233  local_range.first,
234  next_index - local_range.first,
235  my_rank);
236  dic_local_received += next_index - index_range.first;
237  if (actually_owning_rank_list.empty())
238  actually_owning_rank_list.push_back(my_rank);
239  }
240  else
241  buffers[owner].emplace_back(index_range.first, next_index);
242 
243  index_range.first = next_index;
244  }
245  }
246 
247 #ifdef DEAL_II_WITH_MPI
248  n_dict_procs_in_owned_indices = buffers.size();
249  std::vector<MPI_Request> request;
250 
251  // Check if index set space is partitioned globally without gaps.
252  if (owned_indices_size_actual == owned_indices.size())
253  {
254  // no gaps: setup is simple! Processes send their locally owned
255  // indices to the dictionary. The dictionary stores the sending
256  // rank for each index. The dictionary knows exactly
257  // when it is set up when all indices it is responsible for
258  // have been processed.
259 
260  request.reserve(n_dict_procs_in_owned_indices);
261 
262  // protect the following communication steps using a mutex:
263  static CollectiveMutex mutex;
264  CollectiveMutex::ScopedLock lock(mutex, comm);
265 
266  const int mpi_tag =
268 
269 
270  // 3) send messages with local dofs to the right dict process
271  for (const auto &rank_pair : buffers)
272  {
273  request.push_back(MPI_Request());
274  const int ierr = MPI_Isend(rank_pair.second.data(),
275  rank_pair.second.size() * 2,
277  rank_pair.first,
278  mpi_tag,
279  comm,
280  &request.back());
281  AssertThrowMPI(ierr);
282  }
283 
284  // 4) receive messages until all dofs in dict are processed
285  while (this->locally_owned_size != dic_local_received)
286  {
287  // wait for an incoming message
288  MPI_Status status;
289  int ierr = MPI_Probe(MPI_ANY_SOURCE, mpi_tag, comm, &status);
290  AssertThrowMPI(ierr);
291 
292  // retrieve size of incoming message
293  int number_amount;
294  ierr = MPI_Get_count(&status,
296  &number_amount);
297  AssertThrowMPI(ierr);
298 
299  const auto other_rank = status.MPI_SOURCE;
300  actually_owning_rank_list.push_back(other_rank);
301 
302  // receive message
303  Assert(number_amount % 2 == 0, ExcInternalError());
304  std::vector<
305  std::pair<types::global_dof_index, types::global_dof_index>>
306  buffer(number_amount / 2);
307  ierr = MPI_Recv(buffer.data(),
308  number_amount,
310  status.MPI_SOURCE,
311  status.MPI_TAG,
312  comm,
313  MPI_STATUS_IGNORE);
314  AssertThrowMPI(ierr);
315  // process message: loop over all intervals
316  for (auto interval : buffer)
317  {
318 # ifdef DEBUG
319  for (types::global_dof_index i = interval.first;
320  i < interval.second;
321  i++)
323  i - local_range.first) == false,
324  ExcInternalError());
325  Assert(interval.first >= local_range.first &&
326  interval.first < local_range.second,
327  ExcInternalError());
328  Assert(interval.second > local_range.first &&
329  interval.second <= local_range.second,
330  ExcInternalError());
331 # endif
332 
333  actually_owning_ranks.fill(interval.first -
334  local_range.first,
335  interval.second -
336  local_range.first,
337  other_rank);
338  dic_local_received += interval.second - interval.first;
339  }
340  }
341  }
342  else
343  {
344  // with gap: use a ConsensusAlgorithm to determine when all
345  // dictionaries have been set up.
346 
347  // 3/4) use a ConsensusAlgorithm to send messages with local
348  // dofs to the right dict process
349 
350  using RequestType = std::vector<
351  std::pair<types::global_dof_index, types::global_dof_index>>;
352 
353  ConsensusAlgorithms::selector<RequestType>(
354  /* targets = */
355  [&buffers]() {
356  std::vector<unsigned int> targets;
357  targets.reserve(buffers.size());
358  for (const auto &rank_pair : buffers)
359  targets.emplace_back(rank_pair.first);
360 
361  return targets;
362  }(),
363 
364  /* create_request = */
365  [&buffers](const unsigned int target_rank) -> RequestType {
366  return buffers.at(target_rank);
367  },
368 
369  /* process_request = */
370  [&](const unsigned int source_rank,
371  const RequestType &request) -> void {
372  // process message: loop over all intervals
373  for (auto interval : request)
374  {
375 # ifdef DEBUG
376  for (types::global_dof_index i = interval.first;
377  i < interval.second;
378  i++)
379  Assert(
381  i - local_range.first) == false,
382  ExcMessage(
383  "Multiple processes seem to own the same global index. "
384  "A possible reason is that the sets of locally owned "
385  "indices are not distinct."));
386  Assert(interval.first < interval.second,
387  ExcInternalError());
388  Assert(
389  local_range.first <= interval.first &&
390  interval.second <= local_range.second,
391  ExcMessage(
392  "The specified interval is not handled by the current process."));
393 # endif
394  actually_owning_ranks.fill(interval.first -
395  local_range.first,
396  interval.second -
397  local_range.first,
398  source_rank);
399  }
400  actually_owning_rank_list.push_back(source_rank);
401  },
402 
403  comm);
404  }
405 
406  std::sort(actually_owning_rank_list.begin(),
408 
409  for (unsigned int i = 1; i < actually_owning_rank_list.size(); ++i)
412  ExcInternalError());
413 
414  // 5) make sure that all messages have been sent
415  if (request.size() > 0)
416  {
417  const int ierr = MPI_Waitall(request.size(),
418  request.data(),
419  MPI_STATUSES_IGNORE);
420  AssertThrowMPI(ierr);
421  }
422 
423 #else
424  Assert(buffers.empty(), ExcInternalError());
425  (void)comm;
426  (void)dic_local_received;
427 #endif
428  }
429 
430 
431 
432  void
433  Dictionary::partition(const IndexSet &owned_indices,
434  const MPI_Comm comm)
435  {
436  const unsigned int n_procs = n_mpi_processes(comm);
437  const unsigned int my_rank = this_mpi_process(comm);
438 
439  size = owned_indices.size();
440 
441  Assert(size > 0, ExcNotImplemented());
442 
443  dofs_per_process = (size + n_procs - 1) / n_procs;
445  {
448  }
449  else
450  stride_small_size = 1;
451  local_range.first = get_index_offset(my_rank);
452  local_range.second = get_index_offset(my_rank + 1);
453 
454  locally_owned_size = local_range.second - local_range.first;
455  }
456 
457 
459  const IndexSet &owned_indices,
460  const IndexSet &indices_to_look_up,
461  const MPI_Comm comm,
462  std::vector<unsigned int> &owning_ranks,
463  const bool track_index_requests)
464  : owned_indices(owned_indices)
465  , indices_to_look_up(indices_to_look_up)
466  , comm(comm)
467  , my_rank(this_mpi_process(comm))
468  , n_procs(n_mpi_processes(comm))
469  , track_index_requests(track_index_requests)
470  , owning_ranks(owning_ranks)
471  {
474  }
475 
476 
477 
478  void
480  const unsigned int other_rank,
481  const std::vector<std::pair<types::global_dof_index,
482  types::global_dof_index>> &buffer_recv,
483  std::vector<unsigned int> &request_buffer)
484  {
485  unsigned int owner_index_guess = 0;
486  for (const auto &interval : buffer_recv)
487  for (auto i = interval.first; i < interval.second; ++i)
488  {
489  const unsigned int actual_owner =
491  request_buffer.push_back(actual_owner);
492 
495  other_rank,
496  actual_owner,
497  owner_index_guess);
498  }
499  }
500 
501 
502 
503  std::vector<unsigned int>
505  {
506  std::vector<unsigned int> targets;
507 
509  unsigned int index = 0;
510  unsigned int owner_index_guess = 0;
511  for (auto i : indices_to_look_up)
512  {
513  unsigned int other_rank = dict.dof_to_dict_rank(i);
514  if (other_rank == my_rank)
515  {
516  owning_ranks[index] =
520  my_rank,
521  owning_ranks[index],
522  owner_index_guess);
523  }
524  else
525  {
526  if (targets.empty() || targets.back() != other_rank)
527  targets.push_back(other_rank);
528  auto &indices = indices_to_look_up_by_dict_rank[other_rank];
529  indices.first.push_back(i);
530  indices.second.push_back(index);
531  }
532  index++;
533  }
534 
535  Assert(targets.size() == indices_to_look_up_by_dict_rank.size(),
536  ExcMessage("Size does not match!"));
537 
538  return targets;
539  }
540 
541 
542 
543  void
545  const unsigned int other_rank,
546  std::vector<std::pair<types::global_dof_index,
547  types::global_dof_index>> &send_buffer)
548  {
549  // create index set and compress data to be sent
550  auto &indices_i = indices_to_look_up_by_dict_rank[other_rank].first;
551  IndexSet is(dict.size);
552  is.add_indices(indices_i.begin(), indices_i.end());
553  is.compress();
554 
555  for (auto interval = is.begin_intervals();
556  interval != is.end_intervals();
557  ++interval)
558  send_buffer.emplace_back(*interval->begin(), interval->last() + 1);
559  }
560 
561 
562 
563  void
565  const unsigned int other_rank,
566  const std::vector<unsigned int> &recv_buffer)
567  {
568  const auto &recv_indices =
569  indices_to_look_up_by_dict_rank[other_rank].second;
570  AssertDimension(recv_indices.size(), recv_buffer.size());
571  for (unsigned int j = 0; j < recv_indices.size(); ++j)
572  owning_ranks[recv_indices[j]] = recv_buffer[j];
573  }
574 
575 
576 
577  std::map<unsigned int, IndexSet>
579  {
581  ExcMessage("Must enable index range tracking in "
582  "constructor of ConsensusAlgorithmProcess"));
583 
584  std::map<unsigned int, ::IndexSet> requested_indices;
585 
586 #ifdef DEAL_II_WITH_MPI
587 
588  static CollectiveMutex mutex;
589  CollectiveMutex::ScopedLock lock(mutex, comm);
590 
591  const int mpi_tag = Utilities::MPI::internal::Tags::
593 
594  // reserve enough slots for the requests ahead; depending on
595  // whether the owning rank is one of the requesters or not, we
596  // might have one less requests to execute, so fill the requests
597  // on demand.
598  std::vector<MPI_Request> send_requests;
599  send_requests.reserve(requesters.size());
600 
601  // We use an integer vector for the data exchange. Since we send
602  // data associated to intervals with different requesters, we will
603  // need to send (a) the MPI rank of the requester, (b) the number
604  // of intervals directed to this requester, and (c) a list of
605  // intervals, i.e., two integers per interval. The number of items
606  // sent in total can be deduced both via the MPI status message at
607  // the receiver site as well as be counting the buckets from
608  // different requesters.
609  std::vector<std::vector<unsigned int>> send_data(requesters.size());
610  for (unsigned int i = 0; i < requesters.size(); ++i)
611  {
612  // special code for our own indices
614  {
615  for (const auto &j : requesters[i])
616  {
617  const types::global_dof_index index_offset =
619  IndexSet &my_index_set = requested_indices[j.first];
620  my_index_set.set_size(owned_indices.size());
621  for (const auto &interval : j.second)
622  my_index_set.add_range(index_offset + interval.first,
623  index_offset + interval.second);
624  }
625  }
626  else
627  {
628  for (const auto &j : requesters[i])
629  {
630  send_data[i].push_back(j.first);
631  send_data[i].push_back(j.second.size());
632  for (const auto &interval : j.second)
633  {
634  send_data[i].push_back(interval.first);
635  send_data[i].push_back(interval.second);
636  }
637  }
638  send_requests.push_back(MPI_Request());
639  const int ierr = MPI_Isend(send_data[i].data(),
640  send_data[i].size(),
641  MPI_UNSIGNED,
643  mpi_tag,
644  comm,
645  &send_requests.back());
646  AssertThrowMPI(ierr);
647  }
648  }
649 
650  // receive the data
651  for (unsigned int c = 0; c < dict.n_dict_procs_in_owned_indices; ++c)
652  {
653  // wait for an incoming message
654  MPI_Status status;
655  int ierr = MPI_Probe(MPI_ANY_SOURCE, mpi_tag, comm, &status);
656  AssertThrowMPI(ierr);
657 
658  // retrieve size of incoming message
659  int number_amount;
660  ierr = MPI_Get_count(&status, MPI_UNSIGNED, &number_amount);
661  AssertThrowMPI(ierr);
662 
663  // receive message
664  Assert(number_amount % 2 == 0, ExcInternalError());
665  std::vector<std::pair<unsigned int, unsigned int>> buffer(
666  number_amount / 2);
667  ierr = MPI_Recv(buffer.data(),
668  number_amount,
669  MPI_UNSIGNED,
670  status.MPI_SOURCE,
671  status.MPI_TAG,
672  comm,
673  &status);
674  AssertThrowMPI(ierr);
675 
676  // unpack the message and translate the dictionary-local
677  // indices coming via MPI to the global index range
678  const types::global_dof_index index_offset =
679  dict.get_index_offset(status.MPI_SOURCE);
680  unsigned int offset = 0;
681  while (offset < buffer.size())
682  {
683  AssertIndexRange(offset + buffer[offset].second,
684  buffer.size());
685 
686  IndexSet my_index_set(owned_indices.size());
687  for (unsigned int i = offset + 1;
688  i < offset + buffer[offset].second + 1;
689  ++i)
690  my_index_set.add_range(index_offset + buffer[i].first,
691  index_offset + buffer[i].second);
692 
693  // the underlying index set is able to merge ranges coming
694  // from different ranks due to the partitioning in the
695  // dictionary
696  IndexSet &index_set = requested_indices[buffer[offset].first];
697  if (index_set.size() == 0)
698  index_set.set_size(owned_indices.size());
699  index_set.add_indices(my_index_set);
700 
701  offset += buffer[offset].second + 1;
702  }
703  AssertDimension(offset, buffer.size());
704  }
705 
706  if (send_requests.size() > 0)
707  {
708  const auto ierr = MPI_Waitall(send_requests.size(),
709  send_requests.data(),
710  MPI_STATUSES_IGNORE);
711  AssertThrowMPI(ierr);
712  }
713 
714 
715 # ifdef DEBUG
716  for (const auto &it : requested_indices)
717  {
718  IndexSet copy_set = it.second;
719  copy_set.subtract_set(owned_indices);
720  Assert(copy_set.n_elements() == 0,
722  "The indices requested from the current "
723  "MPI rank should be locally owned here!"));
724  }
725 # endif
726 
727 #endif // DEAL_II_WITH_MPI
728 
729  return requested_indices;
730  }
731 
732 
733 
734  void
736  const unsigned int index_within_dict,
737  const unsigned int rank_of_request,
738  const unsigned int rank_of_owner,
739  unsigned int &owner_index_guess)
740  {
741  // remember who requested which index. We want to use an
742  // std::vector with simple addressing, via a good guess from the
743  // preceding index, rather than std::map, because this is an inner
744  // loop and it avoids the map lookup in every iteration
745  owner_index_guess =
746  dict.get_owning_rank_index(rank_of_owner, owner_index_guess);
747 
748  auto &request = requesters[owner_index_guess];
749  if (request.empty() || request.back().first != rank_of_request)
750  request.emplace_back(
751  rank_of_request,
752  std::vector<std::pair<unsigned int, unsigned int>>());
753 
754  auto &intervals = request.back().second;
755  if (intervals.empty() || intervals.back().second != index_within_dict)
756  intervals.emplace_back(index_within_dict, index_within_dict + 1);
757  else
758  ++intervals.back().second;
759  }
760  } // namespace ComputeIndexOwner
761  } // namespace internal
762  } // namespace MPI
763 } // namespace Utilities
764 
IntervalIterator end_intervals() const
Definition: index_set.h:1663
size_type size() const
Definition: index_set.h:1696
size_type n_elements() const
Definition: index_set.h:1854
void set_size(const size_type size)
Definition: index_set.h:1684
IntervalIterator begin_intervals() const
Definition: index_set.h:1651
void subtract_set(const IndexSet &other)
Definition: index_set.cc:288
void add_range(const size_type begin, const size_type end)
Definition: index_set.h:1723
void compress() const
Definition: index_set.h:1704
void add_indices(const ForwardIterator &begin, const ForwardIterator &end)
Definition: index_set.h:1751
virtual void read_answer(const unsigned int other_rank, const std::vector< unsigned int > &recv_buffer) override
std::vector< std::vector< std::pair< unsigned int, std::vector< std::pair< unsigned int, unsigned int > > > > > requesters
std::map< unsigned int, std::pair< std::vector< types::global_dof_index >, std::vector< unsigned int > > > indices_to_look_up_by_dict_rank
virtual void answer_request(const unsigned int other_rank, const std::vector< std::pair< types::global_dof_index, types::global_dof_index >> &buffer_recv, std::vector< unsigned int > &request_buffer) override
ConsensusAlgorithmsPayload(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm comm, std::vector< unsigned int > &owning_ranks, const bool track_index_requests=false)
virtual void create_request(const unsigned int other_rank, std::vector< std::pair< types::global_dof_index, types::global_dof_index >> &send_buffer) override
void append_index_origin(const unsigned int index_within_dictionary, const unsigned int rank_of_request, const unsigned int rank_of_owner, unsigned int &owner_index_guess)
void reinit(const bool use_vector, const bool index_range_contiguous, const std::size_t size)
void fill(const std::size_t start, const std::size_t end, const index_type &value)
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:477
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:478
Point< 2 > second
Definition: grid_out.cc:4615
static ::ExceptionBase & ExcInternalError()
#define Assert(cond, exc)
Definition: exceptions.h:1616
static ::ExceptionBase & ExcNotImplemented()
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1789
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1916
#define AssertIndexRange(index, range)
Definition: exceptions.h:1857
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
Definition: exceptions.h:1705
VectorType::value_type * end(VectorType &V)
@ consensus_algorithm_payload_get_requesters
ConsensusAlgorithms::Payload::get_requesters()
Definition: mpi_tags.h:81
@ dictionary_reinit
Dictionary::reinit()
Definition: mpi_tags.h:78
T sum(const T &t, const MPI_Comm mpi_communicator)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition: mpi.cc:149
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition: mpi.cc:164
unsigned int global_dof_index
Definition: types.h:82
unsigned int dof_to_dict_rank(const types::global_dof_index i)
types::global_dof_index get_index_offset(const unsigned int rank)
std::pair< types::global_dof_index, types::global_dof_index > local_range
void reinit(const IndexSet &owned_indices, const MPI_Comm comm)
void partition(const IndexSet &owned_indices, const MPI_Comm comm)
unsigned int get_owning_rank_index(const unsigned int rank_in_owned_indices, const unsigned int guess=0)
const MPI_Comm comm
#define DEAL_II_DOF_INDEX_MPI_TYPE
Definition: types.h:99