Reference documentation for deal.II version 9.3.3
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
mpi_compute_index_owner_internal.h
Go to the documentation of this file.
1// ---------------------------------------------------------------------
2//
3// Copyright (C) 2019 - 2021 by the deal.II authors
4//
5// This file is part of the deal.II library.
6//
7// The deal.II library is free software; you can use it, redistribute
8// it, and/or modify it under the terms of the GNU Lesser General
9// Public License as published by the Free Software Foundation; either
10// version 2.1 of the License, or (at your option) any later version.
11// The full text of the license can be found in the file LICENSE.md at
12// the top level directory of deal.II.
13//
14// ---------------------------------------------------------------------
15
16#ifndef dealii_base_mpi_compute_index_owner_internal_h
17#define dealii_base_mpi_compute_index_owner_internal_h
18
19#include <deal.II/base/config.h>
20
21#include <deal.II/base/mpi.h>
23
25
26namespace Utilities
27{
28 namespace MPI
29 {
30 namespace internal
31 {
36 namespace ComputeIndexOwner
37 {
47 std::pair<types::global_dof_index, types::global_dof_index>,
48 unsigned int>
49 {
50 public:
55 const std::map<unsigned int,
56 std::vector<std::pair<types::global_dof_index,
58 & buffers,
59 std::vector<unsigned int> &actually_owning_ranks,
60 const std::pair<types::global_dof_index, types::global_dof_index>
62 std::vector<unsigned int> &actually_owning_rank_list)
67 {}
68
73 virtual std::vector<unsigned int>
74 compute_targets() override
75 {
76 std::vector<unsigned int> targets;
77 for (const auto &rank_pair : buffers)
78 targets.push_back(rank_pair.first);
79
80 return targets;
81 }
82
87 virtual void
88 create_request(const unsigned int other_rank,
89 std::vector<std::pair<types::global_dof_index,
91 &send_buffer) override
92 {
93 send_buffer = this->buffers.at(other_rank);
94 }
95
100 virtual void
102 const unsigned int other_rank,
103 const std::vector<std::pair<types::global_dof_index,
104 types::global_dof_index>> &buffer_recv,
105 std::vector<unsigned int> &request_buffer) override
106 {
107 (void)request_buffer; // not needed
108
109
110 // process message: loop over all intervals
111 for (auto interval : buffer_recv)
112 {
113#ifdef DEBUG
114 for (types::global_dof_index i = interval.first;
115 i < interval.second;
116 i++)
120 Assert(interval.first >= local_range.first &&
121 interval.first < local_range.second,
123 Assert(interval.second > local_range.first &&
124 interval.second <= local_range.second,
126#endif
127 std::fill(actually_owning_ranks.data() + interval.first -
128 local_range.first,
129 actually_owning_ranks.data() + interval.second -
130 local_range.first,
131 other_rank);
132 }
133 actually_owning_rank_list.push_back(other_rank);
134 }
135
136 private:
137 const std::map<unsigned int,
138 std::vector<std::pair<types::global_dof_index,
141
142 std::vector<unsigned int> &actually_owning_ranks;
143
144 const std::pair<types::global_dof_index, types::global_dof_index>
146
147 std::vector<unsigned int> &actually_owning_rank_list;
148 };
149
150
151
158 {
180 static const unsigned int range_minimum_grain_size = 64;
181
188 std::vector<unsigned int> actually_owning_ranks;
189
194 std::vector<unsigned int> actually_owning_rank_list;
195
203
209 std::pair<types::global_dof_index, types::global_dof_index>
211
218
223
229
235 unsigned int stride_small_size;
236
242 void
243 reinit(const IndexSet &owned_indices, const MPI_Comm &comm)
244 {
245 // 1) set up the partition
246 this->partition(owned_indices, comm);
247
248#ifdef DEAL_II_WITH_MPI
249 unsigned int my_rank = this_mpi_process(comm);
250
251 types::global_dof_index dic_local_received = 0;
252 std::map<unsigned int,
253 std::vector<std::pair<types::global_dof_index,
255 buffers;
256
257 std::fill(actually_owning_ranks.begin(),
260
261 // 2) collect relevant processes and process local dict entries
262 for (auto interval = owned_indices.begin_intervals();
263 interval != owned_indices.end_intervals();
264 ++interval)
265 {
266 // Due to the granularity of the dictionary, the interval
267 // might be split into several ranges of processor owner
268 // ranks. Here, we process the interval by breaking into
269 // smaller pieces in terms of the dictionary number.
270 std::pair<types::global_dof_index, types::global_dof_index>
271 index_range(*interval->begin(), interval->last() + 1);
272 const unsigned int owner_last =
273 dof_to_dict_rank(interval->last());
274 unsigned int owner_first = numbers::invalid_unsigned_int;
275 while (owner_first != owner_last)
276 {
277 Assert(index_range.first < index_range.second,
279
280 owner_first = dof_to_dict_rank(index_range.first);
281
282 // this explicitly picks up the formula of
283 // dof_to_dict_rank, so the two places must be in sync
284 types::global_dof_index next_index =
285 std::min(get_index_offset(owner_first + 1),
286 index_range.second);
287
288 Assert(next_index > index_range.first, ExcInternalError());
289
290# ifdef DEBUG
291 // make sure that the owner is the same on the current
292 // interval
293 for (types::global_dof_index i = index_range.first + 1;
294 i < next_index;
295 ++i)
296 AssertDimension(owner_first, dof_to_dict_rank(i));
297# endif
298
299 // add the interval, either to the local range or into a
300 // buffer to be sent to another processor
301 if (owner_first == my_rank)
302 {
303 std::fill(actually_owning_ranks.data() +
304 index_range.first - local_range.first,
305 actually_owning_ranks.data() + next_index -
306 local_range.first,
307 my_rank);
308 dic_local_received += next_index - index_range.first;
309 if (actually_owning_rank_list.empty())
310 actually_owning_rank_list.push_back(my_rank);
311 }
312 else
313 buffers[owner_first].emplace_back(index_range.first,
314 next_index);
315
316 index_range.first = next_index;
317 }
318 }
319
320 n_dict_procs_in_owned_indices = buffers.size();
321 std::vector<MPI_Request> request;
322
323 // Check if index set space is partitioned globally without gaps.
324 if (Utilities::MPI::sum(owned_indices.n_elements(), comm) ==
325 owned_indices.size())
326 {
327 // no gaps: setup is simple! Processes send their locally owned
328 // indices to the dictionary. The dictionary stores the sending
329 // rank for each index. The dictionary knows exactly
330 // when it is set up when all indices it is responsible for
331 // have been processed.
332
333 request.reserve(n_dict_procs_in_owned_indices);
334
335 // protect the following communication steps using a mutex:
336 static CollectiveMutex mutex;
338
339 const int mpi_tag =
341
342
343 // 3) send messages with local dofs to the right dict process
344 for (const auto &rank_pair : buffers)
345 {
346 request.push_back(MPI_Request());
347 const int ierr = MPI_Isend(rank_pair.second.data(),
348 rank_pair.second.size() * 2,
350 rank_pair.first,
351 mpi_tag,
352 comm,
353 &request.back());
354 AssertThrowMPI(ierr);
355 }
356
357 // 4) receive messages until all dofs in dict are processed
358 while (this->locally_owned_size != dic_local_received)
359 {
360 // wait for an incoming message
361 MPI_Status status;
362 auto ierr =
363 MPI_Probe(MPI_ANY_SOURCE, mpi_tag, comm, &status);
364 AssertThrowMPI(ierr);
365
366 // retrieve size of incoming message
367 int number_amount;
368 ierr = MPI_Get_count(&status,
370 &number_amount);
371 AssertThrowMPI(ierr);
372
373 const auto other_rank = status.MPI_SOURCE;
374 actually_owning_rank_list.push_back(other_rank);
375
376 // receive message
377 Assert(number_amount % 2 == 0, ExcInternalError());
378 std::vector<std::pair<types::global_dof_index,
380 buffer(number_amount / 2);
381 ierr = MPI_Recv(buffer.data(),
382 number_amount,
384 status.MPI_SOURCE,
385 status.MPI_TAG,
386 comm,
387 MPI_STATUS_IGNORE);
388 AssertThrowMPI(ierr);
389 // process message: loop over all intervals
390 for (auto interval : buffer)
391 {
392# ifdef DEBUG
393 for (types::global_dof_index i = interval.first;
394 i < interval.second;
395 i++)
399 Assert(interval.first >= local_range.first &&
400 interval.first < local_range.second,
402 Assert(interval.second > local_range.first &&
403 interval.second <= local_range.second,
405# endif
406
407 std::fill(actually_owning_ranks.data() +
408 interval.first - local_range.first,
409 actually_owning_ranks.data() +
410 interval.second - local_range.first,
411 other_rank);
412 dic_local_received += interval.second - interval.first;
413 }
414 }
415 }
416 else
417 {
418 // with gap: use a ConsensusAlgorithm to determine when all
419 // dictionaries have been set up.
420
421 // 3/4) use a ConsensusAlgorithm to send messages with local
422 // dofs to the right dict process
423 DictionaryPayLoad temp(buffers,
427
429 std::pair<types::global_dof_index, types::global_dof_index>,
430 unsigned int>
431 consensus_algo(temp, comm);
432 consensus_algo.run();
433 }
434
435 std::sort(actually_owning_rank_list.begin(),
437
438 for (unsigned int i = 1; i < actually_owning_rank_list.size(); ++i)
442
443 // 5) make sure that all messages have been sent
444 if (request.size() > 0)
445 {
446 const int ierr = MPI_Waitall(request.size(),
447 request.data(),
448 MPI_STATUSES_IGNORE);
449 AssertThrowMPI(ierr);
450 }
451
452#else
453 (void)owned_indices;
454 (void)comm;
455#endif
456 }
457
463 unsigned int
465 {
466 // note: this formula is also explicitly used in
467 // get_index_offset(), so keep the two in sync
468 return (i / dofs_per_process) * stride_small_size;
469 }
470
476 get_index_offset(const unsigned int rank)
477 {
479 static_cast<types::global_dof_index>(
480 (rank + stride_small_size - 1) /
482 size);
483 }
484
490 unsigned int
491 get_owning_rank_index(const unsigned int rank_in_owned_indices,
492 const unsigned int guess = 0)
493 {
495 if (actually_owning_rank_list[guess] == rank_in_owned_indices)
496 return guess;
497 else
498 {
501 rank_in_owned_indices);
504 Assert(*it == rank_in_owned_indices, ExcInternalError());
505 return it - actually_owning_rank_list.begin();
506 }
507 }
508
509 private:
514 void
515 partition(const IndexSet &owned_indices, const MPI_Comm &comm)
516 {
517#ifdef DEAL_II_WITH_MPI
518 const unsigned int n_procs = n_mpi_processes(comm);
519 const unsigned int my_rank = this_mpi_process(comm);
520
521 size = owned_indices.size();
522
524
525 dofs_per_process = (size + n_procs - 1) / n_procs;
527 {
530 }
531 else
533 local_range.first = get_index_offset(my_rank);
534 local_range.second = get_index_offset(my_rank + 1);
535
537
541#else
542 (void)owned_indices;
543 (void)comm;
544#endif
545 }
546 };
547
548
549
558 std::pair<types::global_dof_index, types::global_dof_index>,
559 unsigned int>
560 {
561 public:
567 const MPI_Comm &comm,
568 std::vector<unsigned int> &owning_ranks,
569 const bool track_index_requests = false)
572 , comm(comm)
577 {
580 }
581
586
592
597
601 const unsigned int my_rank;
602
607 const unsigned int n_procs;
608
615
621 std::vector<unsigned int> &owning_ranks;
622
632 std::vector<std::vector<
633 std::pair<unsigned int,
634 std::vector<std::pair<unsigned int, unsigned int>>>>>
636
641
646 std::map<unsigned int, std::vector<types::global_dof_index>>
648
653 std::map<unsigned int, std::vector<unsigned int>> recv_indices;
654
662 virtual void
664 const unsigned int other_rank,
665 const std::vector<std::pair<types::global_dof_index,
666 types::global_dof_index>> &buffer_recv,
667 std::vector<unsigned int> &request_buffer) override
668 {
669 unsigned int owner_index = 0;
670 for (const auto &interval : buffer_recv)
671 for (auto i = interval.first; i < interval.second; ++i)
672 {
673 const unsigned int actual_owner =
675 request_buffer.push_back(actual_owner);
676
678 append_index_origin(i, owner_index, other_rank);
679 }
680 }
681
686 virtual std::vector<unsigned int>
688 {
689 std::vector<unsigned int> targets;
690
691 // 1) collect relevant processes and process local dict entries
692 {
693 unsigned int index = 0;
694 unsigned int owner_index = 0;
695 for (auto i : indices_to_look_up)
696 {
697 unsigned int other_rank = dict.dof_to_dict_rank(i);
698 if (other_rank == my_rank)
699 {
700 owning_ranks[index] =
703 append_index_origin(i, owner_index, my_rank);
704 }
705 else if (targets.empty() || targets.back() != other_rank)
706 targets.push_back(other_rank);
707 index++;
708 }
709 }
710
711
712 for (auto i : targets)
713 {
714 recv_indices[i] = {};
716 }
717
718 // 3) collect indices for each process
719 {
720 unsigned int index = 0;
721 for (auto i : indices_to_look_up)
722 {
723 unsigned int other_rank = dict.dof_to_dict_rank(i);
724 if (other_rank != my_rank)
725 {
726 recv_indices[other_rank].push_back(index);
727 indices_to_look_up_by_dict_rank[other_rank].push_back(i);
728 }
729 index++;
730 }
731 }
732
733 Assert(targets.size() == recv_indices.size() &&
734 targets.size() == indices_to_look_up_by_dict_rank.size(),
735 ExcMessage("Size does not match!"));
736
737 return targets;
738 }
739
744 virtual void
745 create_request(const unsigned int other_rank,
746 std::vector<std::pair<types::global_dof_index,
748 &send_buffer) override
749 {
750 // create index set and compress data to be sent
751 auto & indices_i = indices_to_look_up_by_dict_rank[other_rank];
752 IndexSet is(dict.size);
753 is.add_indices(indices_i.begin(), indices_i.end());
754 is.compress();
755
756 for (auto interval = is.begin_intervals();
757 interval != is.end_intervals();
758 ++interval)
759 send_buffer.emplace_back(*interval->begin(),
760 interval->last() + 1);
761 }
762
767 virtual void
769 const unsigned int other_rank,
770 std::vector<unsigned int> &recv_buffer) override
771 {
772 recv_buffer.resize(recv_indices[other_rank].size());
773 }
774
779 virtual void
780 read_answer(const unsigned int other_rank,
781 const std::vector<unsigned int> &recv_buffer) override
782 {
783 Assert(recv_indices[other_rank].size() == recv_buffer.size(),
784 ExcMessage("Sizes do not match!"));
785
786 for (unsigned int j = 0; j < recv_indices[other_rank].size(); j++)
787 owning_ranks[recv_indices[other_rank][j]] = recv_buffer[j];
788 }
789
799 std::map<unsigned int, IndexSet>
801 {
803 ExcMessage("Must enable index range tracking in "
804 "constructor of ConsensusAlgorithmProcess"));
805
806 std::map<unsigned int, ::IndexSet> requested_indices;
807
808#ifdef DEAL_II_WITH_MPI
809
810 static CollectiveMutex mutex;
812
813 const int mpi_tag = Utilities::MPI::internal::Tags::
815
816 // reserve enough slots for the requests ahead; depending on
817 // whether the owning rank is one of the requesters or not, we
818 // might have one less requests to execute, so fill the requests
819 // on demand.
820 std::vector<MPI_Request> send_requests;
821 send_requests.reserve(requesters.size());
822
823 // We use an integer vector for the data exchange. Since we send
824 // data associated to intervals with different requesters, we will
825 // need to send (a) the MPI rank of the requester, (b) the number
826 // of intervals directed to this requester, and (c) a list of
827 // intervals, i.e., two integers per interval. The number of items
828 // sent in total can be deduced both via the MPI status message at
829 // the receiver site as well as be counting the buckets from
830 // different requesters.
831 std::vector<std::vector<unsigned int>> send_data(requesters.size());
832 for (unsigned int i = 0; i < requesters.size(); ++i)
833 {
834 // special code for our own indices
836 {
837 for (const auto &j : requesters[i])
838 {
839 const types::global_dof_index index_offset =
841 IndexSet &my_index_set = requested_indices[j.first];
842 my_index_set.set_size(owned_indices.size());
843 for (const auto &interval : j.second)
844 my_index_set.add_range(index_offset + interval.first,
845 index_offset +
846 interval.second);
847 }
848 }
849 else
850 {
851 for (const auto &j : requesters[i])
852 {
853 send_data[i].push_back(j.first);
854 send_data[i].push_back(j.second.size());
855 for (const auto &interval : j.second)
856 {
857 send_data[i].push_back(interval.first);
858 send_data[i].push_back(interval.second);
859 }
860 }
861 send_requests.push_back(MPI_Request());
862 const int ierr =
863 MPI_Isend(send_data[i].data(),
864 send_data[i].size(),
865 MPI_UNSIGNED,
867 mpi_tag,
868 comm,
869 &send_requests.back());
870 AssertThrowMPI(ierr);
871 }
872 }
873
874 // receive the data
875 for (unsigned int c = 0; c < dict.n_dict_procs_in_owned_indices;
876 ++c)
877 {
878 // wait for an incoming message
879 MPI_Status status;
880 unsigned int ierr =
881 MPI_Probe(MPI_ANY_SOURCE, mpi_tag, comm, &status);
882 AssertThrowMPI(ierr);
883
884 // retrieve size of incoming message
885 int number_amount;
886 ierr = MPI_Get_count(&status, MPI_UNSIGNED, &number_amount);
887 AssertThrowMPI(ierr);
888
889 // receive message
890 Assert(number_amount % 2 == 0, ExcInternalError());
891 std::vector<std::pair<unsigned int, unsigned int>> buffer(
892 number_amount / 2);
893 ierr = MPI_Recv(buffer.data(),
894 number_amount,
895 MPI_UNSIGNED,
896 status.MPI_SOURCE,
897 status.MPI_TAG,
898 comm,
899 &status);
900 AssertThrowMPI(ierr);
901
902 // unpack the message and translate the dictionary-local
903 // indices coming via MPI to the global index range
904 const types::global_dof_index index_offset =
905 dict.get_index_offset(status.MPI_SOURCE);
906 unsigned int offset = 0;
907 while (offset < buffer.size())
908 {
909 AssertIndexRange(offset + buffer[offset].second,
910 buffer.size());
911
912 IndexSet my_index_set(owned_indices.size());
913 for (unsigned int i = offset + 1;
914 i < offset + buffer[offset].second + 1;
915 ++i)
916 my_index_set.add_range(index_offset + buffer[i].first,
917 index_offset + buffer[i].second);
918
919 // the underlying index set is able to merge ranges coming
920 // from different ranks due to the partitioning in the
921 // dictionary
922 IndexSet &index_set =
923 requested_indices[buffer[offset].first];
924 if (index_set.size() == 0)
925 index_set.set_size(owned_indices.size());
926 index_set.add_indices(my_index_set);
927
928 offset += buffer[offset].second + 1;
929 }
930 AssertDimension(offset, buffer.size());
931 }
932
933 if (send_requests.size() > 0)
934 {
935 const auto ierr = MPI_Waitall(send_requests.size(),
936 send_requests.data(),
937 MPI_STATUSES_IGNORE);
938 AssertThrowMPI(ierr);
939 }
940
941
942# ifdef DEBUG
943 for (const auto &it : requested_indices)
944 {
945 IndexSet copy_set = it.second;
946 copy_set.subtract_set(owned_indices);
947 Assert(copy_set.n_elements() == 0,
949 "The indices requested from the current "
950 "MPI rank should be locally owned here!"));
951 }
952# endif
953
954#endif // DEAL_II_WITH_MPI
955
956 return requested_indices;
957 }
958
959 private:
973 void
975 unsigned int & owner_index,
976 const unsigned int rank_of_request)
977 {
978 // remember who requested which index. We want to use an
979 // std::vector with simple addressing, via a good guess from the
980 // preceding index, rather than std::map, because this is an inner
981 // loop and it avoids the map lookup in every iteration
982 const unsigned int rank_of_owner =
984 owner_index =
985 dict.get_owning_rank_index(rank_of_owner, owner_index);
986 if (requesters[owner_index].empty() ||
987 requesters[owner_index].back().first != rank_of_request)
988 requesters[owner_index].emplace_back(
989 rank_of_request,
990 std::vector<std::pair<unsigned int, unsigned int>>());
991 if (requesters[owner_index].back().second.empty() ||
992 requesters[owner_index].back().second.back().second !=
993 index - dict.local_range.first)
994 requesters[owner_index].back().second.emplace_back(
995 index - dict.local_range.first,
996 index - dict.local_range.first + 1);
997 else
998 ++requesters[owner_index].back().second.back().second;
999 }
1000 };
1001
1002 } // namespace ComputeIndexOwner
1003 } // namespace internal
1004 } // namespace MPI
1005} // namespace Utilities
1006
1008
1009#endif
IntervalIterator end_intervals() const
Definition: index_set.h:1601
size_type size() const
Definition: index_set.h:1634
size_type n_elements() const
Definition: index_set.h:1832
void set_size(const size_type size)
Definition: index_set.h:1622
IntervalIterator begin_intervals() const
Definition: index_set.h:1589
void subtract_set(const IndexSet &other)
Definition: index_set.cc:258
void add_range(const size_type begin, const size_type end)
Definition: index_set.h:1673
void compress() const
Definition: index_set.h:1642
void add_indices(const ForwardIterator &begin, const ForwardIterator &end)
Definition: index_set.h:1703
virtual void prepare_buffer_for_answer(const unsigned int other_rank, std::vector< unsigned int > &recv_buffer) override
std::vector< std::vector< std::pair< unsigned int, std::vector< std::pair< unsigned int, unsigned int > > > > > requesters
void append_index_origin(const types::global_dof_index index, unsigned int &owner_index, const unsigned int rank_of_request)
virtual void read_answer(const unsigned int other_rank, const std::vector< unsigned int > &recv_buffer) override
virtual void answer_request(const unsigned int other_rank, const std::vector< std::pair< types::global_dof_index, types::global_dof_index > > &buffer_recv, std::vector< unsigned int > &request_buffer) override
virtual void create_request(const unsigned int other_rank, std::vector< std::pair< types::global_dof_index, types::global_dof_index > > &send_buffer) override
std::map< unsigned int, std::vector< types::global_dof_index > > indices_to_look_up_by_dict_rank
ConsensusAlgorithmsPayload(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm &comm, std::vector< unsigned int > &owning_ranks, const bool track_index_requests=false)
DictionaryPayLoad(const std::map< unsigned int, std::vector< std::pair< types::global_dof_index, types::global_dof_index > > > &buffers, std::vector< unsigned int > &actually_owning_ranks, const std::pair< types::global_dof_index, types::global_dof_index > &local_range, std::vector< unsigned int > &actually_owning_rank_list)
const std::map< unsigned int, std::vector< std::pair< types::global_dof_index, types::global_dof_index > > > & buffers
virtual void create_request(const unsigned int other_rank, std::vector< std::pair< types::global_dof_index, types::global_dof_index > > &send_buffer) override
const std::pair< types::global_dof_index, types::global_dof_index > & local_range
virtual void answer_request(const unsigned int other_rank, const std::vector< std::pair< types::global_dof_index, types::global_dof_index > > &buffer_recv, std::vector< unsigned int > &request_buffer) override
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:402
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:403
Point< 2 > second
Definition: grid_out.cc:4588
Point< 2 > first
Definition: grid_out.cc:4587
static ::ExceptionBase & ExcNotImplemented()
#define Assert(cond, exc)
Definition: exceptions.h:1465
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1622
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1746
#define AssertIndexRange(index, range)
Definition: exceptions.h:1690
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcMessage(std::string arg1)
@ consensus_algorithm_payload_get_requesters
ConsensusAlgorithms::Payload::get_requesters()
Definition: mpi_tags.h:81
@ dictionary_reinit
Dictionary::reinit()
Definition: mpi_tags.h:78
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:128
T sum(const T &t, const MPI_Comm &mpi_communicator)
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:117
Iterator lower_bound(Iterator first, Iterator last, const T &val)
Definition: utilities.h:1111
const types::subdomain_id invalid_subdomain_id
Definition: types.h:276
static const unsigned int invalid_unsigned_int
Definition: types.h:196
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
unsigned int global_dof_index
Definition: types.h:76
*braid_SplitCommworld & comm
void reinit(const IndexSet &owned_indices, const MPI_Comm &comm)
unsigned int dof_to_dict_rank(const types::global_dof_index i)
types::global_dof_index get_index_offset(const unsigned int rank)
std::pair< types::global_dof_index, types::global_dof_index > local_range
void partition(const IndexSet &owned_indices, const MPI_Comm &comm)
unsigned int get_owning_rank_index(const unsigned int rank_in_owned_indices, const unsigned int guess=0)
#define DEAL_II_DOF_INDEX_MPI_TYPE
Definition: types.h:86