Reference documentation for deal.II version GIT relicensing-1062-gc06da148b8 2024-07-15 19:20:02+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
mpi_compute_index_owner_internal.cc
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2022 - 2024 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
15#include <deal.II/base/config.h>
16
17#include <deal.II/base/mpi.h>
19
20#include <boost/serialization/utility.hpp>
21
23
24namespace Utilities
25{
26 namespace MPI
27 {
28 namespace internal
29 {
30 namespace ComputeIndexOwner
31 {
34
35
36
38 : use_vector(use_vector)
39 , size(0)
40 {}
41
42
43
44 void
45 FlexibleIndexStorage::reinit(const bool use_vector,
46 const bool index_range_contiguous,
47 const std::size_t size)
48 {
49 this->use_vector = use_vector;
50 this->size = size;
51
52 data = {};
53 data_map.clear();
54
55 // in case we have contiguous indices, only fill the vector upon
56 // first request in `fill`
57 if (use_vector && !index_range_contiguous)
59 }
60
61
62
63 void
65 const std::size_t start,
66 const std::size_t end,
68 {
69 AssertIndexRange(start, size);
70 AssertIndexRange(end, size + 1);
71
72 if (use_vector)
73 {
74 if (data.empty() && end > start)
75 {
76 // in debug mode, we want to track whether we set all
77 // indices, so we first fill an invalid index and only later
78 // the actual ones, whereas we simply assign the given rank
79 // to the complete vector the first time we pass around in
80 // this function in release mode to avoid touching data
81 // unnecessarily (and overwrite the smaller pieces), as the
82 // locally owned part comes first
83#ifdef DEBUG
85 std::fill(data.begin() + start, data.begin() + end, value);
86#else
87 data.resize(size, value);
88#endif
89 }
90 else
91 {
92 AssertDimension(data.size(), size);
93 std::fill(data.begin() + start, data.begin() + end, value);
94 }
95 }
96 else
97 {
98 for (auto i = start; i < end; ++i)
99 data_map[i] = value;
100 }
101 }
102
103
104
106 FlexibleIndexStorage::operator[](const std::size_t index)
107 {
108 AssertIndexRange(index, size);
109
110 if (use_vector)
111 {
112 AssertDimension(data.size(), size);
113 return data[index];
114 }
115 else
116 {
117 return data_map.try_emplace(index, invalid_index_value)
118 .first->second;
119 }
120 }
121
122
123
125 FlexibleIndexStorage::operator[](const std::size_t index) const
126 {
127 AssertIndexRange(index, size);
128
129 if (use_vector)
130 {
131 AssertDimension(data.size(), size);
132 return data[index];
133 }
134 else
135 {
136 if (data_map.find(index) == data_map.end())
137 return invalid_index_value;
138
139 return data_map.at(index);
140 }
141 }
142
143
144
145 bool
146 FlexibleIndexStorage::entry_has_been_set(const std::size_t index) const
147 {
148 AssertIndexRange(index, size);
149
150 if (use_vector)
151 {
152 if (data.empty())
153 return false;
154
155 AssertDimension(data.size(), size);
156 return data[index] != invalid_index_value;
157 }
158 else
159 return data_map.find(index) != data_map.end();
160 }
161
162
163
164 void
165 Dictionary::reinit(const IndexSet &owned_indices, const MPI_Comm comm)
166 {
167 // 1) set up the partition
168 this->partition(owned_indices, comm);
169
170 unsigned int my_rank = this_mpi_process(comm);
171
172 types::global_dof_index dic_local_received = 0;
173 std::map<unsigned int,
174 std::vector<std::pair<types::global_dof_index,
176 buffers;
177
178 const auto owned_indices_size_actual =
179 Utilities::MPI::sum(owned_indices.n_elements(), comm);
180
181 actually_owning_ranks.reinit((owned_indices_size_actual *
182 sparsity_factor) > owned_indices.size(),
183 owned_indices_size_actual ==
184 owned_indices.size(),
186
187 // 2) collect relevant processes and process local dict entries
188 for (auto interval = owned_indices.begin_intervals();
189 interval != owned_indices.end_intervals();
190 ++interval)
191 {
192 // Due to the granularity of the dictionary, the interval
193 // might be split into several ranges of processor owner
194 // ranks. Here, we process the interval by breaking into
195 // smaller pieces in terms of the dictionary number.
196 std::pair<types::global_dof_index, types::global_dof_index>
197 index_range(*interval->begin(), interval->last() + 1);
198
199 AssertThrow(index_range.second <= size, ExcInternalError());
200
201 while (index_range.first != index_range.second)
202 {
203 Assert(index_range.first < index_range.second,
205
206 const unsigned int owner =
207 dof_to_dict_rank(index_range.first);
208
209 // this explicitly picks up the formula of
210 // dof_to_dict_rank, so the two places must be in sync
211 const types::global_dof_index next_index =
212 std::min(get_index_offset(owner + 1), index_range.second);
213
214 Assert(next_index > index_range.first, ExcInternalError());
215
216#ifdef DEBUG
217 // make sure that the owner is the same on the current
218 // interval
219 for (types::global_dof_index i = index_range.first + 1;
220 i < next_index;
221 ++i)
223#endif
224
225 // add the interval, either to the local range or into a
226 // buffer to be sent to another processor
227 if (owner == my_rank)
228 {
229 actually_owning_ranks.fill(index_range.first -
230 local_range.first,
231 next_index - local_range.first,
232 my_rank);
233 dic_local_received += next_index - index_range.first;
234 if (actually_owning_rank_list.empty())
235 actually_owning_rank_list.push_back(my_rank);
236 }
237 else
238 buffers[owner].emplace_back(index_range.first, next_index);
239
240 index_range.first = next_index;
241 }
242 }
243
244#ifdef DEAL_II_WITH_MPI
245 n_dict_procs_in_owned_indices = buffers.size();
246 std::vector<MPI_Request> request;
247
248 // Check if index set space is partitioned globally without gaps.
249 if (owned_indices_size_actual == owned_indices.size())
250 {
251 // no gaps: setup is simple! Processes send their locally owned
252 // indices to the dictionary. The dictionary stores the sending
253 // rank for each index. The dictionary knows exactly
254 // when it is set up when all indices it is responsible for
255 // have been processed.
256
257 request.reserve(n_dict_procs_in_owned_indices);
258
259 // protect the following communication steps using a mutex:
260 static CollectiveMutex mutex;
262
263 const int mpi_tag =
265
266
267 // 3) send messages with local dofs to the right dict process
268 for (const auto &rank_pair : buffers)
269 {
270 request.push_back(MPI_Request());
271 const int ierr = MPI_Isend(rank_pair.second.data(),
272 rank_pair.second.size() * 2,
274 rank_pair.first,
275 mpi_tag,
276 comm,
277 &request.back());
278 AssertThrowMPI(ierr);
279 }
280
281 // 4) receive messages until all dofs in dict are processed
282 while (this->locally_owned_size != dic_local_received)
283 {
284 // wait for an incoming message
285 MPI_Status status;
286 int ierr = MPI_Probe(MPI_ANY_SOURCE, mpi_tag, comm, &status);
287 AssertThrowMPI(ierr);
288
289 // retrieve size of incoming message
290 int number_amount;
291 ierr = MPI_Get_count(&status,
293 &number_amount);
294 AssertThrowMPI(ierr);
295
296 const auto other_rank = status.MPI_SOURCE;
297 actually_owning_rank_list.push_back(other_rank);
298
299 // receive message
300 Assert(number_amount % 2 == 0, ExcInternalError());
301 std::vector<
302 std::pair<types::global_dof_index, types::global_dof_index>>
303 buffer(number_amount / 2);
304 ierr = MPI_Recv(buffer.data(),
305 number_amount,
307 status.MPI_SOURCE,
308 status.MPI_TAG,
309 comm,
310 MPI_STATUS_IGNORE);
311 AssertThrowMPI(ierr);
312 // process message: loop over all intervals
313 for (auto interval : buffer)
314 {
315# ifdef DEBUG
316 for (types::global_dof_index i = interval.first;
317 i < interval.second;
318 i++)
320 i - local_range.first) == false,
322 Assert(interval.first >= local_range.first &&
323 interval.first < local_range.second,
325 Assert(interval.second > local_range.first &&
326 interval.second <= local_range.second,
328# endif
329
330 actually_owning_ranks.fill(interval.first -
331 local_range.first,
332 interval.second -
333 local_range.first,
334 other_rank);
335 dic_local_received += interval.second - interval.first;
336 }
337 }
338 }
339 else
340 {
341 // with gap: use a ConsensusAlgorithm to determine when all
342 // dictionaries have been set up.
343
344 // 3/4) use a ConsensusAlgorithm to send messages with local
345 // dofs to the right dict process
346
347 using RequestType = std::vector<
348 std::pair<types::global_dof_index, types::global_dof_index>>;
349
350 ConsensusAlgorithms::selector<RequestType>(
351 /* targets = */
352 [&buffers]() {
353 std::vector<unsigned int> targets;
354 targets.reserve(buffers.size());
355 for (const auto &rank_pair : buffers)
356 targets.emplace_back(rank_pair.first);
357
358 return targets;
359 }(),
360
361 /* create_request = */
362 [&buffers](const unsigned int target_rank) -> RequestType {
363 return buffers.at(target_rank);
364 },
365
366 /* process_request = */
367 [&](const unsigned int source_rank,
368 const RequestType &request) -> void {
369 // process message: loop over all intervals
370 for (auto interval : request)
371 {
372# ifdef DEBUG
373 for (types::global_dof_index i = interval.first;
374 i < interval.second;
375 i++)
376 Assert(
378 i - local_range.first) == false,
380 "Multiple processes seem to own the same global index. "
381 "A possible reason is that the sets of locally owned "
382 "indices are not distinct."));
383 Assert(interval.first < interval.second,
385 Assert(
386 local_range.first <= interval.first &&
387 interval.second <= local_range.second,
389 "The specified interval is not handled by the current process."));
390# endif
391 actually_owning_ranks.fill(interval.first -
392 local_range.first,
393 interval.second -
394 local_range.first,
395 source_rank);
396 }
397 actually_owning_rank_list.push_back(source_rank);
398 },
399
400 comm);
401 }
402
403 std::sort(actually_owning_rank_list.begin(),
405
406 for (unsigned int i = 1; i < actually_owning_rank_list.size(); ++i)
410
411 // 5) make sure that all messages have been sent
412 if (request.size() > 0)
413 {
414 const int ierr = MPI_Waitall(request.size(),
415 request.data(),
416 MPI_STATUSES_IGNORE);
417 AssertThrowMPI(ierr);
418 }
419
420#else
421 Assert(buffers.empty(), ExcInternalError());
422 (void)comm;
423 (void)dic_local_received;
424#endif
425 }
426
427
428
429 void
430 Dictionary::partition(const IndexSet &owned_indices,
431 const MPI_Comm comm)
432 {
433 const unsigned int n_procs = n_mpi_processes(comm);
434 const unsigned int my_rank = this_mpi_process(comm);
435
436 size = owned_indices.size();
437
439
440 dofs_per_process = (size + n_procs - 1) / n_procs;
442 {
445 }
446 else
448 local_range.first = get_index_offset(my_rank);
449 local_range.second = get_index_offset(my_rank + 1);
450
452 }
453
454
456 const IndexSet &owned_indices,
457 const IndexSet &indices_to_look_up,
458 const MPI_Comm comm,
459 std::vector<unsigned int> &owning_ranks,
460 const bool track_index_requests)
461 : owned_indices(owned_indices)
462 , indices_to_look_up(indices_to_look_up)
463 , comm(comm)
464 , my_rank(this_mpi_process(comm))
465 , n_procs(n_mpi_processes(comm))
466 , track_index_requests(track_index_requests)
467 , owning_ranks(owning_ranks)
468 {
471 }
472
473
474
475 void
477 const unsigned int other_rank,
478 const std::vector<std::pair<types::global_dof_index,
479 types::global_dof_index>> &buffer_recv,
480 std::vector<unsigned int> &request_buffer)
481 {
482 unsigned int owner_index_guess = 0;
483 for (const auto &interval : buffer_recv)
484 for (auto i = interval.first; i < interval.second; ++i)
485 {
486 const unsigned int actual_owner =
488 request_buffer.push_back(actual_owner);
489
492 other_rank,
493 actual_owner,
494 owner_index_guess);
495 }
496 }
497
498
499
500 std::vector<unsigned int>
502 {
503 std::vector<unsigned int> targets;
504
506 unsigned int index = 0;
507 unsigned int owner_index_guess = 0;
508 for (auto i : indices_to_look_up)
509 {
510 unsigned int other_rank = dict.dof_to_dict_rank(i);
511 if (other_rank == my_rank)
512 {
513 owning_ranks[index] =
517 my_rank,
518 owning_ranks[index],
519 owner_index_guess);
520 }
521 else
522 {
523 if (targets.empty() || targets.back() != other_rank)
524 targets.push_back(other_rank);
525 auto &indices = indices_to_look_up_by_dict_rank[other_rank];
526 indices.first.push_back(i);
527 indices.second.push_back(index);
528 }
529 ++index;
530 }
531
532 Assert(targets.size() == indices_to_look_up_by_dict_rank.size(),
533 ExcMessage("Size does not match!"));
534
535 return targets;
536 }
537
538
539
540 void
542 const unsigned int other_rank,
543 std::vector<std::pair<types::global_dof_index,
544 types::global_dof_index>> &send_buffer)
545 {
546 // create index set and compress data to be sent
547 auto &indices_i = indices_to_look_up_by_dict_rank[other_rank].first;
548 IndexSet is(dict.size);
549 is.add_indices(indices_i.begin(), indices_i.end());
550 is.compress();
551
552 for (auto interval = is.begin_intervals();
553 interval != is.end_intervals();
554 ++interval)
555 send_buffer.emplace_back(*interval->begin(), interval->last() + 1);
556 }
557
558
559
560 void
562 const unsigned int other_rank,
563 const std::vector<unsigned int> &recv_buffer)
564 {
565 const auto &recv_indices =
566 indices_to_look_up_by_dict_rank[other_rank].second;
567 AssertDimension(recv_indices.size(), recv_buffer.size());
568 for (unsigned int j = 0; j < recv_indices.size(); ++j)
569 owning_ranks[recv_indices[j]] = recv_buffer[j];
570 }
571
572
573
574 std::map<unsigned int, IndexSet>
576 {
578 ExcMessage("Must enable index range tracking in "
579 "constructor of ConsensusAlgorithmProcess"));
580
581 std::map<unsigned int, ::IndexSet> requested_indices;
582
583#ifdef DEAL_II_WITH_MPI
584
585 static CollectiveMutex mutex;
587
588 const int mpi_tag = Utilities::MPI::internal::Tags::
590
591 // reserve enough slots for the requests ahead; depending on
592 // whether the owning rank is one of the requesters or not, we
593 // might have one less requests to execute, so fill the requests
594 // on demand.
595 std::vector<MPI_Request> send_requests;
596 send_requests.reserve(requesters.size());
597
598 // We use an integer vector for the data exchange. Since we send
599 // data associated to intervals with different requesters, we will
600 // need to send (a) the MPI rank of the requester, (b) the number
601 // of intervals directed to this requester, and (c) a list of
602 // intervals, i.e., two integers per interval. The number of items
603 // sent in total can be deduced both via the MPI status message at
604 // the receiver site as well as be counting the buckets from
605 // different requesters.
606 std::vector<std::vector<types::global_dof_index>> send_data(
607 requesters.size());
608 for (unsigned int i = 0; i < requesters.size(); ++i)
609 {
610 // special code for our own indices
612 {
613 for (const auto &j : requesters[i])
614 {
615 const types::global_dof_index index_offset =
617 IndexSet &my_index_set = requested_indices[j.first];
618 my_index_set.set_size(owned_indices.size());
619 for (const auto &interval : j.second)
620 my_index_set.add_range(index_offset + interval.first,
621 index_offset + interval.second);
622 }
623 }
624 else
625 {
626 for (const auto &j : requesters[i])
627 {
628 send_data[i].push_back(j.first);
629 send_data[i].push_back(j.second.size());
630 for (const auto &interval : j.second)
631 {
632 send_data[i].push_back(interval.first);
633 send_data[i].push_back(interval.second);
634 }
635 }
636 send_requests.push_back(MPI_Request());
637 const int ierr =
638 MPI_Isend(send_data[i].data(),
639 send_data[i].size(),
643 mpi_tag,
644 comm,
645 &send_requests.back());
646 AssertThrowMPI(ierr);
647 }
648 }
649
650 // receive the data
651 for (unsigned int c = 0; c < dict.n_dict_procs_in_owned_indices; ++c)
652 {
653 // wait for an incoming message
654 MPI_Status status;
655 int ierr = MPI_Probe(MPI_ANY_SOURCE, mpi_tag, comm, &status);
656 AssertThrowMPI(ierr);
657
658 // retrieve size of incoming message
659 int number_amount;
660 ierr = MPI_Get_count(
661 &status,
662 Utilities::MPI::mpi_type_id_for_type<types::global_dof_index>,
663 &number_amount);
664 AssertThrowMPI(ierr);
665
666 // receive message
667 Assert(number_amount % 2 == 0, ExcInternalError());
668 std::vector<
669 std::pair<types::global_dof_index, types::global_dof_index>>
670 buffer(number_amount / 2);
671 ierr = MPI_Recv(
672 buffer.data(),
673 number_amount,
674 Utilities::MPI::mpi_type_id_for_type<types::global_dof_index>,
675 status.MPI_SOURCE,
676 status.MPI_TAG,
677 comm,
678 &status);
679 AssertThrowMPI(ierr);
680
681 // unpack the message and translate the dictionary-local
682 // indices coming via MPI to the global index range
683 const types::global_dof_index index_offset =
684 dict.get_index_offset(status.MPI_SOURCE);
685 unsigned int offset = 0;
686 while (offset < buffer.size())
687 {
688 AssertIndexRange(offset + buffer[offset].second,
689 buffer.size());
690
691 IndexSet my_index_set(owned_indices.size());
692 for (unsigned int i = offset + 1;
693 i < offset + buffer[offset].second + 1;
694 ++i)
695 my_index_set.add_range(index_offset + buffer[i].first,
696 index_offset + buffer[i].second);
697
698 // the underlying index set is able to merge ranges coming
699 // from different ranks due to the partitioning in the
700 // dictionary
701 IndexSet &index_set = requested_indices[buffer[offset].first];
702 if (index_set.size() == 0)
703 index_set.set_size(owned_indices.size());
704 index_set.add_indices(my_index_set);
705
706 offset += buffer[offset].second + 1;
707 }
708 AssertDimension(offset, buffer.size());
709 }
710
711 if (send_requests.size() > 0)
712 {
713 const auto ierr = MPI_Waitall(send_requests.size(),
714 send_requests.data(),
715 MPI_STATUSES_IGNORE);
716 AssertThrowMPI(ierr);
717 }
718
719
720# ifdef DEBUG
721 for (const auto &it : requested_indices)
722 {
723 IndexSet copy_set = it.second;
724 copy_set.subtract_set(owned_indices);
725 Assert(copy_set.n_elements() == 0,
727 "The indices requested from the current "
728 "MPI rank should be locally owned here!"));
729 }
730# endif
731
732#endif // DEAL_II_WITH_MPI
733
734 return requested_indices;
735 }
736
737
738
739 void
741 const types::global_dof_index index_within_dict,
742 const unsigned int rank_of_request,
743 const unsigned int rank_of_owner,
744 unsigned int &owner_index_guess)
745 {
746 // remember who requested which index. We want to use an
747 // std::vector with simple addressing, via a good guess from the
748 // preceding index, rather than std::map, because this is an inner
749 // loop and it avoids the map lookup in every iteration
750 owner_index_guess =
751 dict.get_owning_rank_index(rank_of_owner, owner_index_guess);
752
753 auto &request = requesters[owner_index_guess];
754 if (request.empty() || request.back().first != rank_of_request)
755 request.emplace_back(
756 rank_of_request,
757 std::vector<
758 std::pair<types::global_dof_index, types::global_dof_index>>());
759
760 auto &intervals = request.back().second;
761 if (intervals.empty() || intervals.back().second != index_within_dict)
762 intervals.emplace_back(index_within_dict, index_within_dict + 1);
763 else
764 ++intervals.back().second;
765 }
766 } // namespace ComputeIndexOwner
767 } // namespace internal
768 } // namespace MPI
769} // namespace Utilities
770
IntervalIterator end_intervals() const
Definition index_set.h:1733
size_type size() const
Definition index_set.h:1766
size_type n_elements() const
Definition index_set.h:1924
void set_size(const size_type size)
Definition index_set.h:1754
IntervalIterator begin_intervals() const
Definition index_set.h:1721
void subtract_set(const IndexSet &other)
Definition index_set.cc:471
void add_range(const size_type begin, const size_type end)
Definition index_set.h:1793
void compress() const
Definition index_set.h:1774
void add_indices(const ForwardIterator &begin, const ForwardIterator &end)
Definition index_set.h:1821
virtual void answer_request(const unsigned int other_rank, const std::vector< std::pair< types::global_dof_index, types::global_dof_index > > &buffer_recv, std::vector< unsigned int > &request_buffer) override
virtual void read_answer(const unsigned int other_rank, const std::vector< unsigned int > &recv_buffer) override
virtual void create_request(const unsigned int other_rank, std::vector< std::pair< types::global_dof_index, types::global_dof_index > > &send_buffer) override
std::vector< std::vector< std::pair< unsigned int, std::vector< std::pair< types::global_dof_index, types::global_dof_index > > > > > requesters
std::map< unsigned int, std::pair< std::vector< types::global_dof_index >, std::vector< unsigned int > > > indices_to_look_up_by_dict_rank
ConsensusAlgorithmsPayload(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm comm, std::vector< unsigned int > &owning_ranks, const bool track_index_requests=false)
void append_index_origin(const types::global_dof_index index_within_dictionary, const unsigned int rank_of_request, const unsigned int rank_of_owner, unsigned int &owner_index_guess)
void reinit(const bool use_vector, const bool index_range_contiguous, const std::size_t size)
void fill(const std::size_t start, const std::size_t end, const index_type &value)
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:502
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:503
Point< 2 > second
Definition grid_out.cc:4624
static ::ExceptionBase & ExcNotImplemented()
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertThrowMPI(error_code)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
@ consensus_algorithm_payload_get_requesters
ConsensusAlgorithms::Payload::get_requesters()
Definition mpi_tags.h:80
@ dictionary_reinit
Dictionary::reinit()
Definition mpi_tags.h:77
T sum(const T &t, const MPI_Comm mpi_communicator)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition mpi.cc:92
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:107
const MPI_Datatype mpi_type_id_for_type
Definition mpi.h:1674
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
unsigned int global_dof_index
Definition types.h:81
*braid_SplitCommworld & comm
types::global_dof_index get_index_offset(const unsigned int rank)
std::pair< types::global_dof_index, types::global_dof_index > local_range
void reinit(const IndexSet &owned_indices, const MPI_Comm comm)
void partition(const IndexSet &owned_indices, const MPI_Comm comm)
unsigned int get_owning_rank_index(const unsigned int rank_in_owned_indices, const unsigned int guess=0)
#define DEAL_II_DOF_INDEX_MPI_TYPE
Definition types.h:102