Reference documentation for deal.II version GIT relicensing-487-ge9eb5ab491 2024-04-25 07:20:02+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
mpi_compute_index_owner_internal.cc
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2022 - 2024 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
15#include <deal.II/base/config.h>
16
17#include <deal.II/base/mpi.h>
19
20#include <boost/serialization/utility.hpp>
21
23
24namespace Utilities
25{
26 namespace MPI
27 {
28 namespace internal
29 {
30 namespace ComputeIndexOwner
31 {
34
35
36
38 : use_vector(use_vector)
39 , size(0)
40 {}
41
42
43
44 void
45 FlexibleIndexStorage::reinit(const bool use_vector,
46 const bool index_range_contiguous,
47 const std::size_t size)
48 {
49 this->use_vector = use_vector;
50 this->size = size;
51
52 data = {};
53 data_map.clear();
54
55 // in case we have contiguous indices, only fill the vector upon
56 // first request in `fill`
57 if (use_vector && !index_range_contiguous)
59 }
60
61
62
63 void
65 const std::size_t start,
66 const std::size_t end,
68 {
69 AssertIndexRange(start, size);
70 AssertIndexRange(end, size + 1);
71
72 if (use_vector)
73 {
74 if (data.empty() && end > start)
75 {
76 // in debug mode, we want to track whether we set all
77 // indices, so we first fill an invalid index and only later
78 // the actual ones, whereas we simply assign the given rank
79 // to the complete vector the first time we pass around in
80 // this function in release mode to avoid touching data
81 // unnecessarily (and overwrite the smaller pieces), as the
82 // locally owned part comes first
83#ifdef DEBUG
85 std::fill(data.begin() + start, data.begin() + end, value);
86#else
87 data.resize(size, value);
88#endif
89 }
90 else
91 {
92 AssertDimension(data.size(), size);
93 std::fill(data.begin() + start, data.begin() + end, value);
94 }
95 }
96 else
97 {
98 for (auto i = start; i < end; ++i)
99 data_map[i] = value;
100 }
101 }
102
103
104
106 FlexibleIndexStorage::operator[](const std::size_t index)
107 {
108 AssertIndexRange(index, size);
109
110 if (use_vector)
111 {
112 AssertDimension(data.size(), size);
113 return data[index];
114 }
115 else
116 {
117 if (data_map.find(index) == data_map.end())
119
120 return data_map[index];
121 }
122 }
123
124
125
127 FlexibleIndexStorage::operator[](const std::size_t index) const
128 {
129 AssertIndexRange(index, size);
130
131 if (use_vector)
132 {
133 AssertDimension(data.size(), size);
134 return data[index];
135 }
136 else
137 {
138 if (data_map.find(index) == data_map.end())
139 return invalid_index_value;
140
141 return data_map.at(index);
142 }
143 }
144
145
146
147 bool
148 FlexibleIndexStorage::entry_has_been_set(const std::size_t index) const
149 {
150 AssertIndexRange(index, size);
151
152 if (use_vector)
153 {
154 if (data.empty())
155 return false;
156
157 AssertDimension(data.size(), size);
158 return data[index] != invalid_index_value;
159 }
160 else
161 return data_map.find(index) != data_map.end();
162 }
163
164
165
166 void
167 Dictionary::reinit(const IndexSet &owned_indices, const MPI_Comm comm)
168 {
169 // 1) set up the partition
170 this->partition(owned_indices, comm);
171
172 unsigned int my_rank = this_mpi_process(comm);
173
174 types::global_dof_index dic_local_received = 0;
175 std::map<unsigned int,
176 std::vector<std::pair<types::global_dof_index,
178 buffers;
179
180 const auto owned_indices_size_actual =
181 Utilities::MPI::sum(owned_indices.n_elements(), comm);
182
183 actually_owning_ranks.reinit((owned_indices_size_actual *
184 sparsity_factor) > owned_indices.size(),
185 owned_indices_size_actual ==
186 owned_indices.size(),
188
189 // 2) collect relevant processes and process local dict entries
190 for (auto interval = owned_indices.begin_intervals();
191 interval != owned_indices.end_intervals();
192 ++interval)
193 {
194 // Due to the granularity of the dictionary, the interval
195 // might be split into several ranges of processor owner
196 // ranks. Here, we process the interval by breaking into
197 // smaller pieces in terms of the dictionary number.
198 std::pair<types::global_dof_index, types::global_dof_index>
199 index_range(*interval->begin(), interval->last() + 1);
200
201 AssertThrow(index_range.second <= size, ExcInternalError());
202
203 while (index_range.first != index_range.second)
204 {
205 Assert(index_range.first < index_range.second,
207
208 const unsigned int owner =
209 dof_to_dict_rank(index_range.first);
210
211 // this explicitly picks up the formula of
212 // dof_to_dict_rank, so the two places must be in sync
213 const types::global_dof_index next_index =
214 std::min(get_index_offset(owner + 1), index_range.second);
215
216 Assert(next_index > index_range.first, ExcInternalError());
217
218#ifdef DEBUG
219 // make sure that the owner is the same on the current
220 // interval
221 for (types::global_dof_index i = index_range.first + 1;
222 i < next_index;
223 ++i)
225#endif
226
227 // add the interval, either to the local range or into a
228 // buffer to be sent to another processor
229 if (owner == my_rank)
230 {
231 actually_owning_ranks.fill(index_range.first -
232 local_range.first,
233 next_index - local_range.first,
234 my_rank);
235 dic_local_received += next_index - index_range.first;
236 if (actually_owning_rank_list.empty())
237 actually_owning_rank_list.push_back(my_rank);
238 }
239 else
240 buffers[owner].emplace_back(index_range.first, next_index);
241
242 index_range.first = next_index;
243 }
244 }
245
246#ifdef DEAL_II_WITH_MPI
247 n_dict_procs_in_owned_indices = buffers.size();
248 std::vector<MPI_Request> request;
249
250 // Check if index set space is partitioned globally without gaps.
251 if (owned_indices_size_actual == owned_indices.size())
252 {
253 // no gaps: setup is simple! Processes send their locally owned
254 // indices to the dictionary. The dictionary stores the sending
255 // rank for each index. The dictionary knows exactly
256 // when it is set up when all indices it is responsible for
257 // have been processed.
258
259 request.reserve(n_dict_procs_in_owned_indices);
260
261 // protect the following communication steps using a mutex:
262 static CollectiveMutex mutex;
264
265 const int mpi_tag =
267
268
269 // 3) send messages with local dofs to the right dict process
270 for (const auto &rank_pair : buffers)
271 {
272 request.push_back(MPI_Request());
273 const int ierr = MPI_Isend(rank_pair.second.data(),
274 rank_pair.second.size() * 2,
276 rank_pair.first,
277 mpi_tag,
278 comm,
279 &request.back());
280 AssertThrowMPI(ierr);
281 }
282
283 // 4) receive messages until all dofs in dict are processed
284 while (this->locally_owned_size != dic_local_received)
285 {
286 // wait for an incoming message
287 MPI_Status status;
288 int ierr = MPI_Probe(MPI_ANY_SOURCE, mpi_tag, comm, &status);
289 AssertThrowMPI(ierr);
290
291 // retrieve size of incoming message
292 int number_amount;
293 ierr = MPI_Get_count(&status,
295 &number_amount);
296 AssertThrowMPI(ierr);
297
298 const auto other_rank = status.MPI_SOURCE;
299 actually_owning_rank_list.push_back(other_rank);
300
301 // receive message
302 Assert(number_amount % 2 == 0, ExcInternalError());
303 std::vector<
304 std::pair<types::global_dof_index, types::global_dof_index>>
305 buffer(number_amount / 2);
306 ierr = MPI_Recv(buffer.data(),
307 number_amount,
309 status.MPI_SOURCE,
310 status.MPI_TAG,
311 comm,
312 MPI_STATUS_IGNORE);
313 AssertThrowMPI(ierr);
314 // process message: loop over all intervals
315 for (auto interval : buffer)
316 {
317# ifdef DEBUG
318 for (types::global_dof_index i = interval.first;
319 i < interval.second;
320 i++)
322 i - local_range.first) == false,
324 Assert(interval.first >= local_range.first &&
325 interval.first < local_range.second,
327 Assert(interval.second > local_range.first &&
328 interval.second <= local_range.second,
330# endif
331
332 actually_owning_ranks.fill(interval.first -
333 local_range.first,
334 interval.second -
335 local_range.first,
336 other_rank);
337 dic_local_received += interval.second - interval.first;
338 }
339 }
340 }
341 else
342 {
343 // with gap: use a ConsensusAlgorithm to determine when all
344 // dictionaries have been set up.
345
346 // 3/4) use a ConsensusAlgorithm to send messages with local
347 // dofs to the right dict process
348
349 using RequestType = std::vector<
350 std::pair<types::global_dof_index, types::global_dof_index>>;
351
352 ConsensusAlgorithms::selector<RequestType>(
353 /* targets = */
354 [&buffers]() {
355 std::vector<unsigned int> targets;
356 targets.reserve(buffers.size());
357 for (const auto &rank_pair : buffers)
358 targets.emplace_back(rank_pair.first);
359
360 return targets;
361 }(),
362
363 /* create_request = */
364 [&buffers](const unsigned int target_rank) -> RequestType {
365 return buffers.at(target_rank);
366 },
367
368 /* process_request = */
369 [&](const unsigned int source_rank,
370 const RequestType &request) -> void {
371 // process message: loop over all intervals
372 for (auto interval : request)
373 {
374# ifdef DEBUG
375 for (types::global_dof_index i = interval.first;
376 i < interval.second;
377 i++)
378 Assert(
380 i - local_range.first) == false,
382 "Multiple processes seem to own the same global index. "
383 "A possible reason is that the sets of locally owned "
384 "indices are not distinct."));
385 Assert(interval.first < interval.second,
387 Assert(
388 local_range.first <= interval.first &&
389 interval.second <= local_range.second,
391 "The specified interval is not handled by the current process."));
392# endif
393 actually_owning_ranks.fill(interval.first -
394 local_range.first,
395 interval.second -
396 local_range.first,
397 source_rank);
398 }
399 actually_owning_rank_list.push_back(source_rank);
400 },
401
402 comm);
403 }
404
405 std::sort(actually_owning_rank_list.begin(),
407
408 for (unsigned int i = 1; i < actually_owning_rank_list.size(); ++i)
412
413 // 5) make sure that all messages have been sent
414 if (request.size() > 0)
415 {
416 const int ierr = MPI_Waitall(request.size(),
417 request.data(),
418 MPI_STATUSES_IGNORE);
419 AssertThrowMPI(ierr);
420 }
421
422#else
423 Assert(buffers.empty(), ExcInternalError());
424 (void)comm;
425 (void)dic_local_received;
426#endif
427 }
428
429
430
431 void
432 Dictionary::partition(const IndexSet &owned_indices,
433 const MPI_Comm comm)
434 {
435 const unsigned int n_procs = n_mpi_processes(comm);
436 const unsigned int my_rank = this_mpi_process(comm);
437
438 size = owned_indices.size();
439
441
442 dofs_per_process = (size + n_procs - 1) / n_procs;
444 {
447 }
448 else
450 local_range.first = get_index_offset(my_rank);
451 local_range.second = get_index_offset(my_rank + 1);
452
454 }
455
456
458 const IndexSet &owned_indices,
459 const IndexSet &indices_to_look_up,
460 const MPI_Comm comm,
461 std::vector<unsigned int> &owning_ranks,
462 const bool track_index_requests)
463 : owned_indices(owned_indices)
464 , indices_to_look_up(indices_to_look_up)
465 , comm(comm)
466 , my_rank(this_mpi_process(comm))
467 , n_procs(n_mpi_processes(comm))
468 , track_index_requests(track_index_requests)
469 , owning_ranks(owning_ranks)
470 {
473 }
474
475
476
477 void
479 const unsigned int other_rank,
480 const std::vector<std::pair<types::global_dof_index,
481 types::global_dof_index>> &buffer_recv,
482 std::vector<unsigned int> &request_buffer)
483 {
484 unsigned int owner_index_guess = 0;
485 for (const auto &interval : buffer_recv)
486 for (auto i = interval.first; i < interval.second; ++i)
487 {
488 const unsigned int actual_owner =
490 request_buffer.push_back(actual_owner);
491
494 other_rank,
495 actual_owner,
496 owner_index_guess);
497 }
498 }
499
500
501
502 std::vector<unsigned int>
504 {
505 std::vector<unsigned int> targets;
506
508 unsigned int index = 0;
509 unsigned int owner_index_guess = 0;
510 for (auto i : indices_to_look_up)
511 {
512 unsigned int other_rank = dict.dof_to_dict_rank(i);
513 if (other_rank == my_rank)
514 {
515 owning_ranks[index] =
519 my_rank,
520 owning_ranks[index],
521 owner_index_guess);
522 }
523 else
524 {
525 if (targets.empty() || targets.back() != other_rank)
526 targets.push_back(other_rank);
527 auto &indices = indices_to_look_up_by_dict_rank[other_rank];
528 indices.first.push_back(i);
529 indices.second.push_back(index);
530 }
531 ++index;
532 }
533
534 Assert(targets.size() == indices_to_look_up_by_dict_rank.size(),
535 ExcMessage("Size does not match!"));
536
537 return targets;
538 }
539
540
541
542 void
544 const unsigned int other_rank,
545 std::vector<std::pair<types::global_dof_index,
546 types::global_dof_index>> &send_buffer)
547 {
548 // create index set and compress data to be sent
549 auto &indices_i = indices_to_look_up_by_dict_rank[other_rank].first;
550 IndexSet is(dict.size);
551 is.add_indices(indices_i.begin(), indices_i.end());
552 is.compress();
553
554 for (auto interval = is.begin_intervals();
555 interval != is.end_intervals();
556 ++interval)
557 send_buffer.emplace_back(*interval->begin(), interval->last() + 1);
558 }
559
560
561
562 void
564 const unsigned int other_rank,
565 const std::vector<unsigned int> &recv_buffer)
566 {
567 const auto &recv_indices =
568 indices_to_look_up_by_dict_rank[other_rank].second;
569 AssertDimension(recv_indices.size(), recv_buffer.size());
570 for (unsigned int j = 0; j < recv_indices.size(); ++j)
571 owning_ranks[recv_indices[j]] = recv_buffer[j];
572 }
573
574
575
576 std::map<unsigned int, IndexSet>
578 {
580 ExcMessage("Must enable index range tracking in "
581 "constructor of ConsensusAlgorithmProcess"));
582
583 std::map<unsigned int, ::IndexSet> requested_indices;
584
585#ifdef DEAL_II_WITH_MPI
586
587 static CollectiveMutex mutex;
589
590 const int mpi_tag = Utilities::MPI::internal::Tags::
592
593 // reserve enough slots for the requests ahead; depending on
594 // whether the owning rank is one of the requesters or not, we
595 // might have one less requests to execute, so fill the requests
596 // on demand.
597 std::vector<MPI_Request> send_requests;
598 send_requests.reserve(requesters.size());
599
600 // We use an integer vector for the data exchange. Since we send
601 // data associated to intervals with different requesters, we will
602 // need to send (a) the MPI rank of the requester, (b) the number
603 // of intervals directed to this requester, and (c) a list of
604 // intervals, i.e., two integers per interval. The number of items
605 // sent in total can be deduced both via the MPI status message at
606 // the receiver site as well as be counting the buckets from
607 // different requesters.
608 std::vector<std::vector<types::global_dof_index>> send_data(
609 requesters.size());
610 for (unsigned int i = 0; i < requesters.size(); ++i)
611 {
612 // special code for our own indices
614 {
615 for (const auto &j : requesters[i])
616 {
617 const types::global_dof_index index_offset =
619 IndexSet &my_index_set = requested_indices[j.first];
620 my_index_set.set_size(owned_indices.size());
621 for (const auto &interval : j.second)
622 my_index_set.add_range(index_offset + interval.first,
623 index_offset + interval.second);
624 }
625 }
626 else
627 {
628 for (const auto &j : requesters[i])
629 {
630 send_data[i].push_back(j.first);
631 send_data[i].push_back(j.second.size());
632 for (const auto &interval : j.second)
633 {
634 send_data[i].push_back(interval.first);
635 send_data[i].push_back(interval.second);
636 }
637 }
638 send_requests.push_back(MPI_Request());
639 const int ierr =
640 MPI_Isend(send_data[i].data(),
641 send_data[i].size(),
645 mpi_tag,
646 comm,
647 &send_requests.back());
648 AssertThrowMPI(ierr);
649 }
650 }
651
652 // receive the data
653 for (unsigned int c = 0; c < dict.n_dict_procs_in_owned_indices; ++c)
654 {
655 // wait for an incoming message
656 MPI_Status status;
657 int ierr = MPI_Probe(MPI_ANY_SOURCE, mpi_tag, comm, &status);
658 AssertThrowMPI(ierr);
659
660 // retrieve size of incoming message
661 int number_amount;
662 ierr = MPI_Get_count(
663 &status,
664 Utilities::MPI::mpi_type_id_for_type<types::global_dof_index>,
665 &number_amount);
666 AssertThrowMPI(ierr);
667
668 // receive message
669 Assert(number_amount % 2 == 0, ExcInternalError());
670 std::vector<
671 std::pair<types::global_dof_index, types::global_dof_index>>
672 buffer(number_amount / 2);
673 ierr = MPI_Recv(
674 buffer.data(),
675 number_amount,
676 Utilities::MPI::mpi_type_id_for_type<types::global_dof_index>,
677 status.MPI_SOURCE,
678 status.MPI_TAG,
679 comm,
680 &status);
681 AssertThrowMPI(ierr);
682
683 // unpack the message and translate the dictionary-local
684 // indices coming via MPI to the global index range
685 const types::global_dof_index index_offset =
686 dict.get_index_offset(status.MPI_SOURCE);
687 unsigned int offset = 0;
688 while (offset < buffer.size())
689 {
690 AssertIndexRange(offset + buffer[offset].second,
691 buffer.size());
692
693 IndexSet my_index_set(owned_indices.size());
694 for (unsigned int i = offset + 1;
695 i < offset + buffer[offset].second + 1;
696 ++i)
697 my_index_set.add_range(index_offset + buffer[i].first,
698 index_offset + buffer[i].second);
699
700 // the underlying index set is able to merge ranges coming
701 // from different ranks due to the partitioning in the
702 // dictionary
703 IndexSet &index_set = requested_indices[buffer[offset].first];
704 if (index_set.size() == 0)
705 index_set.set_size(owned_indices.size());
706 index_set.add_indices(my_index_set);
707
708 offset += buffer[offset].second + 1;
709 }
710 AssertDimension(offset, buffer.size());
711 }
712
713 if (send_requests.size() > 0)
714 {
715 const auto ierr = MPI_Waitall(send_requests.size(),
716 send_requests.data(),
717 MPI_STATUSES_IGNORE);
718 AssertThrowMPI(ierr);
719 }
720
721
722# ifdef DEBUG
723 for (const auto &it : requested_indices)
724 {
725 IndexSet copy_set = it.second;
726 copy_set.subtract_set(owned_indices);
727 Assert(copy_set.n_elements() == 0,
729 "The indices requested from the current "
730 "MPI rank should be locally owned here!"));
731 }
732# endif
733
734#endif // DEAL_II_WITH_MPI
735
736 return requested_indices;
737 }
738
739
740
741 void
743 const types::global_dof_index index_within_dict,
744 const unsigned int rank_of_request,
745 const unsigned int rank_of_owner,
746 unsigned int &owner_index_guess)
747 {
748 // remember who requested which index. We want to use an
749 // std::vector with simple addressing, via a good guess from the
750 // preceding index, rather than std::map, because this is an inner
751 // loop and it avoids the map lookup in every iteration
752 owner_index_guess =
753 dict.get_owning_rank_index(rank_of_owner, owner_index_guess);
754
755 auto &request = requesters[owner_index_guess];
756 if (request.empty() || request.back().first != rank_of_request)
757 request.emplace_back(
758 rank_of_request,
759 std::vector<
760 std::pair<types::global_dof_index, types::global_dof_index>>());
761
762 auto &intervals = request.back().second;
763 if (intervals.empty() || intervals.back().second != index_within_dict)
764 intervals.emplace_back(index_within_dict, index_within_dict + 1);
765 else
766 ++intervals.back().second;
767 }
768 } // namespace ComputeIndexOwner
769 } // namespace internal
770 } // namespace MPI
771} // namespace Utilities
772
IntervalIterator end_intervals() const
Definition index_set.h:1732
size_type size() const
Definition index_set.h:1765
size_type n_elements() const
Definition index_set.h:1923
void set_size(const size_type size)
Definition index_set.h:1753
IntervalIterator begin_intervals() const
Definition index_set.h:1720
void subtract_set(const IndexSet &other)
Definition index_set.cc:470
void add_range(const size_type begin, const size_type end)
Definition index_set.h:1792
void compress() const
Definition index_set.h:1773
void add_indices(const ForwardIterator &begin, const ForwardIterator &end)
Definition index_set.h:1820
virtual void answer_request(const unsigned int other_rank, const std::vector< std::pair< types::global_dof_index, types::global_dof_index > > &buffer_recv, std::vector< unsigned int > &request_buffer) override
virtual void read_answer(const unsigned int other_rank, const std::vector< unsigned int > &recv_buffer) override
virtual void create_request(const unsigned int other_rank, std::vector< std::pair< types::global_dof_index, types::global_dof_index > > &send_buffer) override
std::vector< std::vector< std::pair< unsigned int, std::vector< std::pair< types::global_dof_index, types::global_dof_index > > > > > requesters
std::map< unsigned int, std::pair< std::vector< types::global_dof_index >, std::vector< unsigned int > > > indices_to_look_up_by_dict_rank
ConsensusAlgorithmsPayload(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm comm, std::vector< unsigned int > &owning_ranks, const bool track_index_requests=false)
void append_index_origin(const types::global_dof_index index_within_dictionary, const unsigned int rank_of_request, const unsigned int rank_of_owner, unsigned int &owner_index_guess)
void reinit(const bool use_vector, const bool index_range_contiguous, const std::size_t size)
void fill(const std::size_t start, const std::size_t end, const index_type &value)
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:502
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:503
Point< 2 > second
Definition grid_out.cc:4624
static ::ExceptionBase & ExcNotImplemented()
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertThrowMPI(error_code)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
@ consensus_algorithm_payload_get_requesters
ConsensusAlgorithms::Payload::get_requesters()
Definition mpi_tags.h:80
@ dictionary_reinit
Dictionary::reinit()
Definition mpi_tags.h:77
T sum(const T &t, const MPI_Comm mpi_communicator)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition mpi.cc:92
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:107
const MPI_Datatype mpi_type_id_for_type
Definition mpi.h:1674
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
unsigned int global_dof_index
Definition types.h:81
*braid_SplitCommworld & comm
types::global_dof_index get_index_offset(const unsigned int rank)
std::pair< types::global_dof_index, types::global_dof_index > local_range
void reinit(const IndexSet &owned_indices, const MPI_Comm comm)
void partition(const IndexSet &owned_indices, const MPI_Comm comm)
unsigned int get_owning_rank_index(const unsigned int rank_in_owned_indices, const unsigned int guess=0)
#define DEAL_II_DOF_INDEX_MPI_TYPE
Definition types.h:102