17#include <deal.II/base/partitioner.templates.h>
19#include <boost/serialization/utility.hpp>
33 std::pair<
types::global_dof_index,
types::global_dof_index>(0, 0))
34 , n_ghost_indices_data(0)
35 , n_import_indices_data(0)
36 , n_ghost_indices_in_larger_set(0)
39 , communicator(MPI_COMM_SELF)
40 , have_ghost_indices(false)
45 Partitioner::Partitioner(
const unsigned int size)
47 , locally_owned_range_data(size)
49 std::pair<
types::global_dof_index,
types::global_dof_index>(0, size))
50 , n_ghost_indices_data(0)
51 , n_import_indices_data(0)
52 , n_ghost_indices_in_larger_set(0)
55 , communicator(MPI_COMM_SELF)
56 , have_ghost_indices(false)
58 locally_owned_range_data.add_range(0, size);
59 locally_owned_range_data.compress();
60 ghost_indices_data.set_size(size);
70 , locally_owned_range_data(global_size)
71 , local_range_data{0, local_size}
72 , n_ghost_indices_data(ghost_size)
73 , n_import_indices_data(0)
74 , n_ghost_indices_in_larger_set(0)
77 , communicator(communicator)
78 , have_ghost_indices(true)
82# ifdef DEAL_II_WITH_MPI
84 MPI_Exscan(&local_size,
93 local_range_data = {prefix_sum, prefix_sum + local_size};
95 locally_owned_range_data.add_range(prefix_sum, prefix_sum + local_size);
96 locally_owned_range_data.compress();
101 Partitioner::Partitioner(
const IndexSet &locally_owned_indices,
105 static_cast<
types::global_dof_index>(locally_owned_indices.size()))
106 , n_ghost_indices_data(0)
107 , n_import_indices_data(0)
108 , n_ghost_indices_in_larger_set(0)
111 , communicator(communicator_in)
112 , have_ghost_indices(false)
114 set_owned_indices(locally_owned_indices);
115 set_ghost_indices(ghost_indices_in);
120 Partitioner::Partitioner(
const IndexSet &locally_owned_indices,
123 static_cast<
types::global_dof_index>(locally_owned_indices.size()))
124 , n_ghost_indices_data(0)
125 , n_import_indices_data(0)
126 , n_ghost_indices_in_larger_set(0)
129 , communicator(communicator_in)
130 , have_ghost_indices(false)
132 set_owned_indices(locally_owned_indices);
138 Partitioner::reinit(
const IndexSet &locally_owned_indices,
142 have_ghost_indices =
false;
143 communicator = communicator_in;
144 set_owned_indices(locally_owned_indices);
145 set_ghost_indices(ghost_indices);
151 Partitioner::set_owned_indices(
const IndexSet &locally_owned_indices)
158 ExcMessage(
"The index set specified in locally_owned_indices "
159 "is not contiguous."));
163 std::pair<types::global_dof_index, types::global_dof_index>(
168 local_range_data.second - local_range_data.first <
170 std::numeric_limits<unsigned int>::max()),
172 "Index overflow: This class supports at most 2^32-1 locally owned vector entries"));
173 locally_owned_range_data.set_size(locally_owned_indices.
size());
174 locally_owned_range_data.add_range(local_range_data.first,
175 local_range_data.second);
176 locally_owned_range_data.compress();
178 ghost_indices_data.set_size(locally_owned_indices.
size());
184 Partitioner::set_ghost_indices(
const IndexSet &ghost_indices_in,
185 const IndexSet &larger_ghost_index_set)
191 ghost_indices_in.
size() == locally_owned_range_data.size(),
193 locally_owned_range_data.size()));
195 ghost_indices_data = ghost_indices_in;
196 if (ghost_indices_data.size() != locally_owned_range_data.size())
197 ghost_indices_data.
set_size(locally_owned_range_data.size());
198 ghost_indices_data.subtract_set(locally_owned_range_data);
199 ghost_indices_data.compress();
201 ghost_indices_data.n_elements() <
203 std::numeric_limits<unsigned int>::max()),
205 "Index overflow: This class supports at most 2^32-1 ghost elements"));
206 n_ghost_indices_data = ghost_indices_data.n_elements();
219# ifdef DEAL_II_WITH_MPI
234 my_size += local_range_data.first;
238 const int ierr = MPI_Exscan(&my_size,
253 my_shift = local_range_data.first;
257 if (local_range_data.first == 0 && my_shift != 0)
260 locally_owned_size();
261 local_range_data.first = my_shift;
262 local_range_data.second = my_shift + old_locally_owned_size;
265 std::vector<unsigned int> owning_ranks_of_ghosts(
266 ghost_indices_data.n_elements());
269 internal::ComputeIndexOwner::ConsensusAlgorithmsPayload process(
270 locally_owned_range_data,
273 owning_ranks_of_ghosts,
279 ConsensusAlgorithms::Selector<
281 std::pair<types::global_dof_index, types::global_dof_index>>,
282 std::vector<unsigned int>>
284 consensus_algorithm.run(process, communicator);
287 ghost_targets_data = {};
289 if (owning_ranks_of_ghosts.size() > 0)
291 ghost_targets_data.emplace_back(owning_ranks_of_ghosts[0], 0);
292 for (
auto i : owning_ranks_of_ghosts)
294 Assert(i >= ghost_targets_data.back().first,
296 "Expect result of ConsensusAlgorithms::Process to be "
298 if (i == ghost_targets_data.back().first)
299 ghost_targets_data.back().second++;
301 ghost_targets_data.emplace_back(i, 1);
307 std::map<unsigned int, IndexSet> import_data = process.get_requesters();
310 n_import_indices_data = 0;
311 import_targets_data = {};
312 import_targets_data.reserve(import_data.size());
313 import_indices_chunks_by_rank_data = {};
314 import_indices_chunks_by_rank_data.reserve(import_data.size());
315 import_indices_chunks_by_rank_data.resize(1);
316 for (
const auto &i : import_data)
317 if (i.
second.n_elements() > 0)
319 import_targets_data.emplace_back(i.first, i.second.n_elements());
320 n_import_indices_data += i.second.n_elements();
321 import_indices_chunks_by_rank_data.push_back(
322 import_indices_chunks_by_rank_data.back() +
323 i.second.n_intervals());
327 import_indices_data = {};
328 import_indices_data.reserve(import_indices_chunks_by_rank_data.back());
329 for (
const auto &i : import_data)
331 Assert((i.second & locally_owned_range_data) == i.second,
333 for (
auto interval = i.second.begin_intervals();
334 interval != i.second.end_intervals();
336 import_indices_data.emplace_back(*interval->begin() -
337 local_range_data.first,
338 interval->last() + 1 -
339 local_range_data.first);
360 const std::vector<types::global_dof_index> ghost_indices_ref =
361 ghost_indices_data.get_index_vector();
363 std::vector<types::global_dof_index> indices_to_send(n_import_indices());
364 std::vector<types::global_dof_index> ghost_indices(n_ghost_indices());
366 const std::vector<types::global_dof_index> my_indices =
367 locally_owned_range_data.get_index_vector();
368 std::vector<MPI_Request> requests;
369 n_ghost_indices_in_larger_set = n_ghost_indices_data;
370 export_to_ghosted_array_start(127,
372 my_indices.data(), my_indices.size()),
376 export_to_ghosted_array_finish(
make_array_view(ghost_indices), requests);
378 const int ierr = MPI_Testall(requests.size(),
381 MPI_STATUSES_IGNORE);
385 "MPI found unfinished requests. Check communication setup"));
387 for (
unsigned int i = 0; i < ghost_indices.size(); ++i)
394 if (larger_ghost_index_set.
size() == 0)
396 ghost_indices_subset_chunks_by_rank_data.clear();
397 ghost_indices_subset_data.emplace_back(0, n_ghost_indices());
398 n_ghost_indices_in_larger_set = n_ghost_indices_data;
403 ghost_indices_data.size());
405 (larger_ghost_index_set & locally_owned_range_data).n_elements() ==
407 ExcMessage(
"Ghost index set should not overlap with owned set."));
408 Assert((larger_ghost_index_set & ghost_indices_data) ==
410 ExcMessage(
"Larger ghost index set must contain the tight "
411 "ghost index set."));
413 n_ghost_indices_in_larger_set = larger_ghost_index_set.
n_elements();
417 std::vector<unsigned int> expanded_numbering;
418 for (const ::IndexSet::size_type index : ghost_indices_data)
421 ExcMessage(
"The given larger ghost index set must contain "
422 "all indices in the actual index set."));
426 std::numeric_limits<unsigned int>::max()),
428 "Index overflow: This class supports at most 2^32-1 ghost elements"));
429 expanded_numbering.push_back(
434 std::vector<std::pair<unsigned int, unsigned int>>
435 ghost_indices_subset;
436 ghost_indices_subset_chunks_by_rank_data.resize(
437 ghost_targets_data.size() + 1);
439 ghost_indices_subset_chunks_by_rank_data[0] = 0;
440 unsigned int shift = 0;
441 for (
unsigned int p = 0; p < ghost_targets_data.size(); ++p)
444 for (
unsigned int ii = 0; ii < ghost_targets_data[p].second; ++ii)
446 const unsigned int i =
shift + ii;
447 if (expanded_numbering[i] == last_index + 1)
449 ghost_indices_subset.back().second++;
452 ghost_indices_subset.emplace_back(expanded_numbering[i],
453 expanded_numbering[i] +
455 last_index = expanded_numbering[i];
457 shift += ghost_targets_data[p].second;
458 ghost_indices_subset_chunks_by_rank_data[p + 1] =
459 ghost_indices_subset.size();
461 ghost_indices_subset_data = ghost_indices_subset;
468 Partitioner::is_compatible(
const Partitioner &part)
const
474# ifdef DEAL_II_WITH_MPI
477 int communicators_same = 0;
478 const int ierr = MPI_Comm_compare(part.communicator,
480 &communicators_same);
482 if (!(communicators_same == MPI_IDENT ||
483 communicators_same == MPI_CONGRUENT))
487 return (global_size == part.global_size &&
488 local_range_data == part.local_range_data &&
489 ghost_indices_data == part.ghost_indices_data);
495 Partitioner::is_globally_compatible(
const Partitioner &part)
const
504 Partitioner::memory_consumption()
const
507 memory += locally_owned_range_data.memory_consumption();
509 memory += ghost_indices_data.memory_consumption();
510 memory +=
sizeof(n_ghost_indices_data);
513 memory +=
sizeof(import_indices_plain_dev) +
514 sizeof(*import_indices_plain_dev.begin()) *
515 import_indices_plain_dev.capacity();
519 import_indices_chunks_by_rank_data);
523 ghost_indices_subset_chunks_by_rank_data);
536 Partitioner::initialize_import_indices_plain_dev()
const
538 const unsigned int n_import_targets = import_targets_data.size();
539 import_indices_plain_dev.reserve(n_import_targets);
540 for (
unsigned int i = 0; i < n_import_targets; ++i)
543 std::vector<std::pair<unsigned int, unsigned int>>::const_iterator
544 my_imports = import_indices_data.begin() +
545 import_indices_chunks_by_rank_data[i],
546 end_my_imports = import_indices_data.begin() +
547 import_indices_chunks_by_rank_data[i + 1];
548 std::vector<unsigned int> import_indices_plain_host;
549 for (; my_imports != end_my_imports; ++my_imports)
552 my_imports->second - my_imports->first;
554 import_indices_plain_host.push_back(my_imports->first + j);
558 const auto chunk_size = import_indices_plain_host.size();
559 import_indices_plain_dev.emplace_back(
"import_indices_plain_dev" +
562 Kokkos::deep_copy(import_indices_plain_dev.back(),
563 Kokkos::View<unsigned int *, Kokkos::HostSpace>(
564 import_indices_plain_host.data(), chunk_size));
575#include "partitioner.inst"
ArrayView< std::remove_reference_t< typename std::iterator_traits< Iterator >::reference >, MemorySpaceType > make_array_view(const Iterator begin, const Iterator end)
bool is_contiguous() const
size_type index_within_set(const size_type global_index) const
size_type n_elements() const
bool is_element(const size_type index) const
void set_size(const size_type size)
size_type nth_index_in_set(const size_type local_index) const
#define DEAL_II_NAMESPACE_OPEN
#define DEAL_II_NAMESPACE_CLOSE
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertThrowMPI(error_code)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
std::enable_if_t< std::is_fundamental_v< T >, std::size_t > memory_consumption(const T &t)
T sum(const T &t, const MPI_Comm mpi_communicator)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
T max(const T &t, const MPI_Comm mpi_communicator)
T min(const T &t, const MPI_Comm mpi_communicator)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
const MPI_Datatype mpi_type_id_for_type
static const unsigned int invalid_unsigned_int
#define DEAL_II_DOF_INDEX_MPI_TYPE