Reference documentation for deal.II version 9.4.1
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
mpi_compute_index_owner_internal.cc
Go to the documentation of this file.
1// ---------------------------------------------------------------------
2//
3// Copyright (C) 2019 - 2022 by the deal.II authors
4//
5// This file is part of the deal.II library.
6//
7// The deal.II library is free software; you can use it, redistribute
8// it, and/or modify it under the terms of the GNU Lesser General
9// Public License as published by the Free Software Foundation; either
10// version 2.1 of the License, or (at your option) any later version.
11// The full text of the license can be found in the file LICENSE.md at
12// the top level directory of deal.II.
13//
14// ---------------------------------------------------------------------
15
16#include <deal.II/base/config.h>
17
18#include <deal.II/base/mpi.h>
20
21#include <boost/serialization/utility.hpp>
22
24
25namespace Utilities
26{
27 namespace MPI
28 {
29 namespace internal
30 {
31 namespace ComputeIndexOwner
32 {
35
36
37
39 : use_vector(use_vector)
40 , size(0)
41 {}
42
43
44
45 void
46 FlexibleIndexStorage::reinit(const bool use_vector,
47 const bool index_range_contiguous,
48 const std::size_t size)
49 {
50 this->use_vector = use_vector;
51 this->size = size;
52
53 data = {};
54 data_map.clear();
55
56 // in case we have contiguous indices, only fill the vector upon
57 // first request in `fill`
58 if (!index_range_contiguous)
60 }
61
62
63
64 void
66 const std::size_t start,
67 const std::size_t end,
69 {
70 AssertIndexRange(start, size);
71 AssertIndexRange(end, size + 1);
72
73 if (use_vector)
74 {
75 if (data.empty() && end > start)
76 {
77 // in debug mode, we want to track whether we set all
78 // indices, so we first fill an invalid index and only later
79 // the actual ones, whereas we simply assign the given rank
80 // to the complete vector the first time we pass around in
81 // this function in release mode to avoid touching data
82 // unnecessarily (and overwrite the smaller pieces), as the
83 // locally owned part comes first
84#ifdef DEBUG
86 std::fill(data.begin() + start, data.begin() + end, value);
87#else
88 data.resize(size, value);
89#endif
90 }
91 else
92 {
93 AssertDimension(data.size(), size);
94 std::fill(data.begin() + start, data.begin() + end, value);
95 }
96 }
97 else
98 {
99 for (auto i = start; i < end; ++i)
100 data_map[i] = value;
101 }
102 }
103
104
105
107 FlexibleIndexStorage::operator[](const std::size_t index)
108 {
109 AssertIndexRange(index, size);
110
111 if (use_vector)
112 {
113 AssertDimension(data.size(), size);
114 return data[index];
115 }
116 else
117 {
118 if (data_map.find(index) == data_map.end())
120
121 return data_map[index];
122 }
123 }
124
125
126
128 FlexibleIndexStorage::operator[](const std::size_t index) const
129 {
130 AssertIndexRange(index, size);
131
132 if (use_vector)
133 {
134 AssertDimension(data.size(), size);
135 return data[index];
136 }
137 else
138 {
139 if (data_map.find(index) == data_map.end())
140 return invalid_index_value;
141
142 return data_map.at(index);
143 }
144 }
145
146
147
148 bool
149 FlexibleIndexStorage::entry_has_been_set(const std::size_t index) const
150 {
151 AssertIndexRange(index, size);
152
153 if (use_vector)
154 {
155 if (data.empty())
156 return false;
157
158 AssertDimension(data.size(), size);
159 return data[index] != invalid_index_value;
160 }
161 else
162 return data_map.find(index) != data_map.end();
163 }
164
165
166
168 const std::map<unsigned int,
169 std::vector<std::pair<types::global_dof_index,
171 & buffers,
172 FlexibleIndexStorage &actually_owning_ranks,
173 const std::pair<types::global_dof_index, types::global_dof_index>
174 & local_range,
175 std::vector<unsigned int> &actually_owning_rank_list)
176 : buffers(buffers)
177 , actually_owning_ranks(actually_owning_ranks)
178 , local_range(local_range)
179 , actually_owning_rank_list(actually_owning_rank_list)
180 {
182 }
183
184
185
186 std::vector<unsigned int>
188 {
189 std::vector<unsigned int> targets;
190 for (const auto &rank_pair : buffers)
191 targets.push_back(rank_pair.first);
192
193 return targets;
194 }
195
196
197
198 void
200 const unsigned int other_rank,
201 std::vector<std::pair<types::global_dof_index,
202 types::global_dof_index>> &send_buffer)
203 {
204 send_buffer = this->buffers.at(other_rank);
205 }
206
207
208
209 void
211 const unsigned int other_rank,
212 const std::vector<std::pair<types::global_dof_index,
213 types::global_dof_index>> &buffer_recv,
214 std::vector<unsigned int> & request_buffer)
215 {
216 (void)request_buffer; // not needed
217
218
219 // process message: loop over all intervals
220 for (auto interval : buffer_recv)
221 {
222#ifdef DEBUG
223 for (types::global_dof_index i = interval.first;
224 i < interval.second;
225 i++)
226 Assert(
228 i - local_range.first) == false,
230 "Multiple processes seem to own the same global index. "
231 "A possible reason is that the sets of locally owned "
232 "indices are not distinct."));
233 Assert(interval.first < interval.second, ExcInternalError());
234 Assert(
235 local_range.first <= interval.first &&
236 interval.second <= local_range.second,
238 "The specified interval is not handled by the current process."));
239#endif
240 actually_owning_ranks.fill(interval.first - local_range.first,
241 interval.second - local_range.first,
242 other_rank);
243 }
244 actually_owning_rank_list.push_back(other_rank);
245 }
246
247
248
249 void
250 Dictionary::reinit(const IndexSet &owned_indices, const MPI_Comm &comm)
251 {
252 // 1) set up the partition
253 this->partition(owned_indices, comm);
254
255#ifdef DEAL_II_WITH_MPI
256 unsigned int my_rank = this_mpi_process(comm);
257
258 types::global_dof_index dic_local_received = 0;
259 std::map<unsigned int,
260 std::vector<std::pair<types::global_dof_index,
262 buffers;
263
264 const auto owned_indices_size_actual =
265 Utilities::MPI::sum(owned_indices.n_elements(), comm);
266
267 actually_owning_ranks.reinit((owned_indices_size_actual *
268 sparsity_factor) > owned_indices.size(),
269 owned_indices_size_actual ==
270 owned_indices.size(),
272
273 // 2) collect relevant processes and process local dict entries
274 for (auto interval = owned_indices.begin_intervals();
275 interval != owned_indices.end_intervals();
276 ++interval)
277 {
278 // Due to the granularity of the dictionary, the interval
279 // might be split into several ranges of processor owner
280 // ranks. Here, we process the interval by breaking into
281 // smaller pieces in terms of the dictionary number.
282 std::pair<types::global_dof_index, types::global_dof_index>
283 index_range(*interval->begin(), interval->last() + 1);
284
285 AssertThrow(index_range.second <= size, ExcInternalError());
286
287 while (index_range.first != index_range.second)
288 {
289 Assert(index_range.first < index_range.second,
291
292 const unsigned int owner =
293 dof_to_dict_rank(index_range.first);
294
295 // this explicitly picks up the formula of
296 // dof_to_dict_rank, so the two places must be in sync
297 const types::global_dof_index next_index =
298 std::min(get_index_offset(owner + 1), index_range.second);
299
300 Assert(next_index > index_range.first, ExcInternalError());
301
302# ifdef DEBUG
303 // make sure that the owner is the same on the current
304 // interval
305 for (types::global_dof_index i = index_range.first + 1;
306 i < next_index;
307 ++i)
309# endif
310
311 // add the interval, either to the local range or into a
312 // buffer to be sent to another processor
313 if (owner == my_rank)
314 {
315 actually_owning_ranks.fill(index_range.first -
316 local_range.first,
317 next_index - local_range.first,
318 my_rank);
319 dic_local_received += next_index - index_range.first;
320 if (actually_owning_rank_list.empty())
321 actually_owning_rank_list.push_back(my_rank);
322 }
323 else
324 buffers[owner].emplace_back(index_range.first, next_index);
325
326 index_range.first = next_index;
327 }
328 }
329
330 n_dict_procs_in_owned_indices = buffers.size();
331 std::vector<MPI_Request> request;
332
333 // Check if index set space is partitioned globally without gaps.
334 if (owned_indices_size_actual == owned_indices.size())
335 {
336 // no gaps: setup is simple! Processes send their locally owned
337 // indices to the dictionary. The dictionary stores the sending
338 // rank for each index. The dictionary knows exactly
339 // when it is set up when all indices it is responsible for
340 // have been processed.
341
342 request.reserve(n_dict_procs_in_owned_indices);
343
344 // protect the following communication steps using a mutex:
345 static CollectiveMutex mutex;
347
348 const int mpi_tag =
350
351
352 // 3) send messages with local dofs to the right dict process
353 for (const auto &rank_pair : buffers)
354 {
355 request.push_back(MPI_Request());
356 const int ierr = MPI_Isend(rank_pair.second.data(),
357 rank_pair.second.size() * 2,
359 rank_pair.first,
360 mpi_tag,
361 comm,
362 &request.back());
363 AssertThrowMPI(ierr);
364 }
365
366 // 4) receive messages until all dofs in dict are processed
367 while (this->locally_owned_size != dic_local_received)
368 {
369 // wait for an incoming message
370 MPI_Status status;
371 int ierr = MPI_Probe(MPI_ANY_SOURCE, mpi_tag, comm, &status);
372 AssertThrowMPI(ierr);
373
374 // retrieve size of incoming message
375 int number_amount;
376 ierr = MPI_Get_count(&status,
378 &number_amount);
379 AssertThrowMPI(ierr);
380
381 const auto other_rank = status.MPI_SOURCE;
382 actually_owning_rank_list.push_back(other_rank);
383
384 // receive message
385 Assert(number_amount % 2 == 0, ExcInternalError());
386 std::vector<
387 std::pair<types::global_dof_index, types::global_dof_index>>
388 buffer(number_amount / 2);
389 ierr = MPI_Recv(buffer.data(),
390 number_amount,
392 status.MPI_SOURCE,
393 status.MPI_TAG,
394 comm,
395 MPI_STATUS_IGNORE);
396 AssertThrowMPI(ierr);
397 // process message: loop over all intervals
398 for (auto interval : buffer)
399 {
400# ifdef DEBUG
401 for (types::global_dof_index i = interval.first;
402 i < interval.second;
403 i++)
405 i - local_range.first) == false,
407 Assert(interval.first >= local_range.first &&
408 interval.first < local_range.second,
410 Assert(interval.second > local_range.first &&
411 interval.second <= local_range.second,
413# endif
414
415 actually_owning_ranks.fill(interval.first -
416 local_range.first,
417 interval.second -
418 local_range.first,
419 other_rank);
420 dic_local_received += interval.second - interval.first;
421 }
422 }
423 }
424 else
425 {
426 // with gap: use a ConsensusAlgorithm to determine when all
427 // dictionaries have been set up.
428
429 // 3/4) use a ConsensusAlgorithm to send messages with local
430 // dofs to the right dict process
431 DictionaryPayLoad temp(buffers,
435
437 std::vector<
438 std::pair<types::global_dof_index, types::global_dof_index>>,
439 std::vector<unsigned int>>
440 consensus_algo(temp, comm);
441 consensus_algo.run();
442 }
443
444 std::sort(actually_owning_rank_list.begin(),
446
447 for (unsigned int i = 1; i < actually_owning_rank_list.size(); ++i)
451
452 // 5) make sure that all messages have been sent
453 if (request.size() > 0)
454 {
455 const int ierr = MPI_Waitall(request.size(),
456 request.data(),
457 MPI_STATUSES_IGNORE);
458 AssertThrowMPI(ierr);
459 }
460
461#else
462 (void)owned_indices;
463 (void)comm;
464#endif
465 }
466
467
468
469 void
470 Dictionary::partition(const IndexSet &owned_indices,
471 const MPI_Comm &comm)
472 {
473#ifdef DEAL_II_WITH_MPI
474 const unsigned int n_procs = n_mpi_processes(comm);
475 const unsigned int my_rank = this_mpi_process(comm);
476
477 size = owned_indices.size();
478
480
481 dofs_per_process = (size + n_procs - 1) / n_procs;
483 {
486 }
487 else
489 local_range.first = get_index_offset(my_rank);
490 local_range.second = get_index_offset(my_rank + 1);
491
493#else
494 (void)owned_indices;
495 (void)comm;
496#endif
497 }
498
499
501 const IndexSet & owned_indices,
502 const IndexSet & indices_to_look_up,
503 const MPI_Comm & comm,
504 std::vector<unsigned int> &owning_ranks,
505 const bool track_index_requests)
506 : owned_indices(owned_indices)
507 , indices_to_look_up(indices_to_look_up)
508 , comm(comm)
509 , my_rank(this_mpi_process(comm))
510 , n_procs(n_mpi_processes(comm))
511 , track_index_requests(track_index_requests)
512 , owning_ranks(owning_ranks)
513 {
516 }
517
518
519
520 void
522 const unsigned int other_rank,
523 const std::vector<std::pair<types::global_dof_index,
524 types::global_dof_index>> &buffer_recv,
525 std::vector<unsigned int> & request_buffer)
526 {
527 unsigned int owner_index = 0;
528 for (const auto &interval : buffer_recv)
529 for (auto i = interval.first; i < interval.second; ++i)
530 {
531 const unsigned int actual_owner =
533 request_buffer.push_back(actual_owner);
534
536 append_index_origin(i, owner_index, other_rank);
537 }
538 }
539
540
541
542 std::vector<unsigned int>
544 {
545 std::vector<unsigned int> targets;
546
547 // 1) collect relevant processes and process local dict entries
548 {
549 unsigned int index = 0;
550 unsigned int owner_index = 0;
551 for (auto i : indices_to_look_up)
552 {
553 unsigned int other_rank = dict.dof_to_dict_rank(i);
554 if (other_rank == my_rank)
555 {
556 owning_ranks[index] =
559 append_index_origin(i, owner_index, my_rank);
560 }
561 else if (targets.empty() || targets.back() != other_rank)
562 targets.push_back(other_rank);
563 index++;
564 }
565 }
566
567
568 for (auto i : targets)
569 {
570 recv_indices[i] = {};
572 }
573
574 // 3) collect indices for each process
575 {
576 unsigned int index = 0;
577 for (auto i : indices_to_look_up)
578 {
579 unsigned int other_rank = dict.dof_to_dict_rank(i);
580 if (other_rank != my_rank)
581 {
582 recv_indices[other_rank].push_back(index);
583 indices_to_look_up_by_dict_rank[other_rank].push_back(i);
584 }
585 index++;
586 }
587 }
588
589 Assert(targets.size() == recv_indices.size() &&
590 targets.size() == indices_to_look_up_by_dict_rank.size(),
591 ExcMessage("Size does not match!"));
592
593 return targets;
594 }
595
596
597
598 void
600 const unsigned int other_rank,
601 std::vector<std::pair<types::global_dof_index,
602 types::global_dof_index>> &send_buffer)
603 {
604 // create index set and compress data to be sent
605 auto & indices_i = indices_to_look_up_by_dict_rank[other_rank];
606 IndexSet is(dict.size);
607 is.add_indices(indices_i.begin(), indices_i.end());
608 is.compress();
609
610 for (auto interval = is.begin_intervals();
611 interval != is.end_intervals();
612 ++interval)
613 send_buffer.emplace_back(*interval->begin(), interval->last() + 1);
614 }
615
616
617
618 void
620 const unsigned int other_rank,
621 const std::vector<unsigned int> &recv_buffer)
622 {
623 Assert(recv_buffer.size() == recv_indices[other_rank].size(),
625 Assert(recv_indices[other_rank].size() == recv_buffer.size(),
626 ExcMessage("Sizes do not match!"));
627
628 for (unsigned int j = 0; j < recv_indices[other_rank].size(); ++j)
629 owning_ranks[recv_indices[other_rank][j]] = recv_buffer[j];
630 }
631
632
633
634 std::map<unsigned int, IndexSet>
636 {
638 ExcMessage("Must enable index range tracking in "
639 "constructor of ConsensusAlgorithmProcess"));
640
641 std::map<unsigned int, ::IndexSet> requested_indices;
642
643#ifdef DEAL_II_WITH_MPI
644
645 static CollectiveMutex mutex;
647
648 const int mpi_tag = Utilities::MPI::internal::Tags::
650
651 // reserve enough slots for the requests ahead; depending on
652 // whether the owning rank is one of the requesters or not, we
653 // might have one less requests to execute, so fill the requests
654 // on demand.
655 std::vector<MPI_Request> send_requests;
656 send_requests.reserve(requesters.size());
657
658 // We use an integer vector for the data exchange. Since we send
659 // data associated to intervals with different requesters, we will
660 // need to send (a) the MPI rank of the requester, (b) the number
661 // of intervals directed to this requester, and (c) a list of
662 // intervals, i.e., two integers per interval. The number of items
663 // sent in total can be deduced both via the MPI status message at
664 // the receiver site as well as be counting the buckets from
665 // different requesters.
666 std::vector<std::vector<unsigned int>> send_data(requesters.size());
667 for (unsigned int i = 0; i < requesters.size(); ++i)
668 {
669 // special code for our own indices
671 {
672 for (const auto &j : requesters[i])
673 {
674 const types::global_dof_index index_offset =
676 IndexSet &my_index_set = requested_indices[j.first];
677 my_index_set.set_size(owned_indices.size());
678 for (const auto &interval : j.second)
679 my_index_set.add_range(index_offset + interval.first,
680 index_offset + interval.second);
681 }
682 }
683 else
684 {
685 for (const auto &j : requesters[i])
686 {
687 send_data[i].push_back(j.first);
688 send_data[i].push_back(j.second.size());
689 for (const auto &interval : j.second)
690 {
691 send_data[i].push_back(interval.first);
692 send_data[i].push_back(interval.second);
693 }
694 }
695 send_requests.push_back(MPI_Request());
696 const int ierr = MPI_Isend(send_data[i].data(),
697 send_data[i].size(),
698 MPI_UNSIGNED,
700 mpi_tag,
701 comm,
702 &send_requests.back());
703 AssertThrowMPI(ierr);
704 }
705 }
706
707 // receive the data
708 for (unsigned int c = 0; c < dict.n_dict_procs_in_owned_indices; ++c)
709 {
710 // wait for an incoming message
711 MPI_Status status;
712 int ierr = MPI_Probe(MPI_ANY_SOURCE, mpi_tag, comm, &status);
713 AssertThrowMPI(ierr);
714
715 // retrieve size of incoming message
716 int number_amount;
717 ierr = MPI_Get_count(&status, MPI_UNSIGNED, &number_amount);
718 AssertThrowMPI(ierr);
719
720 // receive message
721 Assert(number_amount % 2 == 0, ExcInternalError());
722 std::vector<std::pair<unsigned int, unsigned int>> buffer(
723 number_amount / 2);
724 ierr = MPI_Recv(buffer.data(),
725 number_amount,
726 MPI_UNSIGNED,
727 status.MPI_SOURCE,
728 status.MPI_TAG,
729 comm,
730 &status);
731 AssertThrowMPI(ierr);
732
733 // unpack the message and translate the dictionary-local
734 // indices coming via MPI to the global index range
735 const types::global_dof_index index_offset =
736 dict.get_index_offset(status.MPI_SOURCE);
737 unsigned int offset = 0;
738 while (offset < buffer.size())
739 {
740 AssertIndexRange(offset + buffer[offset].second,
741 buffer.size());
742
743 IndexSet my_index_set(owned_indices.size());
744 for (unsigned int i = offset + 1;
745 i < offset + buffer[offset].second + 1;
746 ++i)
747 my_index_set.add_range(index_offset + buffer[i].first,
748 index_offset + buffer[i].second);
749
750 // the underlying index set is able to merge ranges coming
751 // from different ranks due to the partitioning in the
752 // dictionary
753 IndexSet &index_set = requested_indices[buffer[offset].first];
754 if (index_set.size() == 0)
755 index_set.set_size(owned_indices.size());
756 index_set.add_indices(my_index_set);
757
758 offset += buffer[offset].second + 1;
759 }
760 AssertDimension(offset, buffer.size());
761 }
762
763 if (send_requests.size() > 0)
764 {
765 const auto ierr = MPI_Waitall(send_requests.size(),
766 send_requests.data(),
767 MPI_STATUSES_IGNORE);
768 AssertThrowMPI(ierr);
769 }
770
771
772# ifdef DEBUG
773 for (const auto &it : requested_indices)
774 {
775 IndexSet copy_set = it.second;
776 copy_set.subtract_set(owned_indices);
777 Assert(copy_set.n_elements() == 0,
779 "The indices requested from the current "
780 "MPI rank should be locally owned here!"));
781 }
782# endif
783
784#endif // DEAL_II_WITH_MPI
785
786 return requested_indices;
787 }
788
789
790
791 void
793 const types::global_dof_index index,
794 unsigned int & owner_index,
795 const unsigned int rank_of_request)
796 {
797 // remember who requested which index. We want to use an
798 // std::vector with simple addressing, via a good guess from the
799 // preceding index, rather than std::map, because this is an inner
800 // loop and it avoids the map lookup in every iteration
801 const unsigned int rank_of_owner =
803 owner_index = dict.get_owning_rank_index(rank_of_owner, owner_index);
804 if (requesters[owner_index].empty() ||
805 requesters[owner_index].back().first != rank_of_request)
806 requesters[owner_index].emplace_back(
807 rank_of_request,
808 std::vector<std::pair<unsigned int, unsigned int>>());
809 if (requesters[owner_index].back().second.empty() ||
810 requesters[owner_index].back().second.back().second !=
811 index - dict.local_range.first)
812 requesters[owner_index].back().second.emplace_back(
813 index - dict.local_range.first,
814 index - dict.local_range.first + 1);
815 else
816 ++requesters[owner_index].back().second.back().second;
817 }
818 } // namespace ComputeIndexOwner
819 } // namespace internal
820 } // namespace MPI
821} // namespace Utilities
822
IntervalIterator end_intervals() const
Definition: index_set.h:1603
size_type size() const
Definition: index_set.h:1636
size_type n_elements() const
Definition: index_set.h:1834
void set_size(const size_type size)
Definition: index_set.h:1624
IntervalIterator begin_intervals() const
Definition: index_set.h:1591
void subtract_set(const IndexSet &other)
Definition: index_set.cc:266
void add_range(const size_type begin, const size_type end)
Definition: index_set.h:1675
void compress() const
Definition: index_set.h:1644
void add_indices(const ForwardIterator &begin, const ForwardIterator &end)
Definition: index_set.h:1705
virtual void answer_request(const unsigned int other_rank, const std::vector< std::pair< types::global_dof_index, types::global_dof_index > > &buffer_recv, std::vector< unsigned int > &request_buffer) override
virtual void read_answer(const unsigned int other_rank, const std::vector< unsigned int > &recv_buffer) override
virtual void create_request(const unsigned int other_rank, std::vector< std::pair< types::global_dof_index, types::global_dof_index > > &send_buffer) override
std::vector< std::vector< std::pair< unsigned int, std::vector< std::pair< unsigned int, unsigned int > > > > > requesters
void append_index_origin(const types::global_dof_index index, unsigned int &owner_index, const unsigned int rank_of_request)
std::map< unsigned int, std::vector< types::global_dof_index > > indices_to_look_up_by_dict_rank
ConsensusAlgorithmsPayload(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm &comm, std::vector< unsigned int > &owning_ranks, const bool track_index_requests=false)
DictionaryPayLoad(const std::map< unsigned int, std::vector< std::pair< types::global_dof_index, types::global_dof_index > > > &buffers, FlexibleIndexStorage &actually_owning_ranks, const std::pair< types::global_dof_index, types::global_dof_index > &local_range, std::vector< unsigned int > &actually_owning_rank_list)
const std::map< unsigned int, std::vector< std::pair< types::global_dof_index, types::global_dof_index > > > & buffers
virtual void answer_request(const unsigned int other_rank, const std::vector< std::pair< types::global_dof_index, types::global_dof_index > > &buffer_recv, std::vector< unsigned int > &request_buffer) override
virtual void create_request(const unsigned int other_rank, std::vector< std::pair< types::global_dof_index, types::global_dof_index > > &send_buffer) override
const std::pair< types::global_dof_index, types::global_dof_index > & local_range
void reinit(const bool use_vector, const bool index_range_contiguous, const std::size_t size)
void fill(const std::size_t start, const std::size_t end, const index_type &value)
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:442
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:443
Point< 2 > second
Definition: grid_out.cc:4604
Point< 2 > first
Definition: grid_out.cc:4603
static ::ExceptionBase & ExcNotImplemented()
#define Assert(cond, exc)
Definition: exceptions.h:1473
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1667
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1790
#define AssertIndexRange(index, range)
Definition: exceptions.h:1732
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
Definition: exceptions.h:1583
@ consensus_algorithm_payload_get_requesters
ConsensusAlgorithms::Payload::get_requesters()
Definition: mpi_tags.h:81
@ dictionary_reinit
Dictionary::reinit()
Definition: mpi_tags.h:78
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:151
T sum(const T &t, const MPI_Comm &mpi_communicator)
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:140
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
unsigned int global_dof_index
Definition: types.h:76
void reinit(const IndexSet &owned_indices, const MPI_Comm &comm)
unsigned int dof_to_dict_rank(const types::global_dof_index i)
types::global_dof_index get_index_offset(const unsigned int rank)
std::pair< types::global_dof_index, types::global_dof_index > local_range
void partition(const IndexSet &owned_indices, const MPI_Comm &comm)
unsigned int get_owning_rank_index(const unsigned int rank_in_owned_indices, const unsigned int guess=0)
const MPI_Comm & comm
#define DEAL_II_DOF_INDEX_MPI_TYPE
Definition: types.h:86