Reference documentation for deal.II version 9.1.1
\(\newcommand{\dealcoloneq}{\mathrel{\vcenter{:}}=}\)
partitioner.cc
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 1999 - 2019 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 #include <deal.II/base/partitioner.h>
17 #include <deal.II/base/partitioner.templates.h>
18 
19 DEAL_II_NAMESPACE_OPEN
20 
21 namespace Utilities
22 {
23  namespace MPI
24  {
26  : global_size(0)
27  , local_range_data(
28  std::pair<types::global_dof_index, types::global_dof_index>(0, 0))
29  , n_ghost_indices_data(0)
30  , n_import_indices_data(0)
31  , n_ghost_indices_in_larger_set(0)
32  , my_pid(0)
33  , n_procs(1)
34  , communicator(MPI_COMM_SELF)
35  , have_ghost_indices(false)
36  {}
37 
38 
39 
40  Partitioner::Partitioner(const unsigned int size)
41  : global_size(size)
42  , locally_owned_range_data(size)
43  , local_range_data(
44  std::pair<types::global_dof_index, types::global_dof_index>(0, size))
45  , n_ghost_indices_data(0)
46  , n_import_indices_data(0)
47  , n_ghost_indices_in_larger_set(0)
48  , my_pid(0)
49  , n_procs(1)
50  , communicator(MPI_COMM_SELF)
51  , have_ghost_indices(false)
52  {
56  }
57 
58 
59 
60  Partitioner::Partitioner(const IndexSet &locally_owned_indices,
61  const IndexSet &ghost_indices_in,
62  const MPI_Comm communicator_in)
63  : global_size(
64  static_cast<types::global_dof_index>(locally_owned_indices.size()))
65  , n_ghost_indices_data(0)
66  , n_import_indices_data(0)
67  , n_ghost_indices_in_larger_set(0)
68  , my_pid(0)
69  , n_procs(1)
70  , communicator(communicator_in)
71  , have_ghost_indices(false)
72  {
73  set_owned_indices(locally_owned_indices);
74  set_ghost_indices(ghost_indices_in);
75  }
76 
77 
78 
79  Partitioner::Partitioner(const IndexSet &locally_owned_indices,
80  const MPI_Comm communicator_in)
81  : global_size(
82  static_cast<types::global_dof_index>(locally_owned_indices.size()))
83  , n_ghost_indices_data(0)
84  , n_import_indices_data(0)
85  , n_ghost_indices_in_larger_set(0)
86  , my_pid(0)
87  , n_procs(1)
88  , communicator(communicator_in)
89  , have_ghost_indices(false)
90  {
91  set_owned_indices(locally_owned_indices);
92  }
93 
94 
95 
96  void
97  Partitioner::reinit(const IndexSet &vector_space_vector_index_set,
98  const IndexSet &read_write_vector_index_set,
99  const MPI_Comm &communicator_in)
100  {
101  have_ghost_indices = false;
102  communicator = communicator_in;
103  set_owned_indices(vector_space_vector_index_set);
104  set_ghost_indices(read_write_vector_index_set);
105  }
106 
107 
108 
109  void
110  Partitioner::set_owned_indices(const IndexSet &locally_owned_indices)
111  {
112  if (Utilities::MPI::job_supports_mpi() == true)
113  {
116  }
117  else
118  {
119  my_pid = 0;
120  n_procs = 1;
121  }
122 
123  // set the local range
124  Assert(locally_owned_indices.is_contiguous() == true,
125  ExcMessage("The index set specified in locally_owned_indices "
126  "is not contiguous."));
127  locally_owned_indices.compress();
128  if (locally_owned_indices.n_elements() > 0)
130  std::pair<types::global_dof_index, types::global_dof_index>(
131  locally_owned_indices.nth_index_in_set(0),
132  locally_owned_indices.nth_index_in_set(0) +
133  locally_owned_indices.n_elements());
134  AssertThrow(
135  local_range_data.second - local_range_data.first <
136  static_cast<types::global_dof_index>(
137  std::numeric_limits<unsigned int>::max()),
138  ExcMessage(
139  "Index overflow: This class supports at most 2^32-1 locally owned vector entries"));
140  locally_owned_range_data.set_size(locally_owned_indices.size());
142  local_range_data.second);
144 
145  ghost_indices_data.set_size(locally_owned_indices.size());
146  }
147 
148 
149 
150  void
151  Partitioner::set_ghost_indices(const IndexSet &ghost_indices_in,
152  const IndexSet &larger_ghost_index_set)
153  {
154  // Set ghost indices from input. To be sure that no entries from the
155  // locally owned range are present, subtract the locally owned indices
156  // in any case.
157  Assert(ghost_indices_in.n_elements() == 0 ||
158  ghost_indices_in.size() == locally_owned_range_data.size(),
159  ExcDimensionMismatch(ghost_indices_in.size(),
161 
162  ghost_indices_data = ghost_indices_in;
167  AssertThrow(
169  static_cast<types::global_dof_index>(
170  std::numeric_limits<unsigned int>::max()),
171  ExcMessage(
172  "Index overflow: This class supports at most 2^32-1 ghost elements"));
174 
177 
178  // In the rest of this function, we determine the point-to-point
179  // communication pattern of the partitioner. We make up a list with both
180  // the processors the ghost indices actually belong to, and the indices
181  // that are locally held but ghost indices of other processors. This
182  // allows then to import and export data very easily.
183 
184  // find out the end index for each processor and communicate it (this
185  // implies the start index for the next processor)
186 #ifdef DEAL_II_WITH_MPI
187  if (n_procs < 2)
188  {
192  return;
193  }
194 
195  std::vector<types::global_dof_index> first_index(n_procs + 1);
196  // Allow non-zero start index for the vector. send this data to all
197  // processors
198  first_index[0] = local_range_data.first;
199  int ierr = MPI_Bcast(
200  first_index.data(), 1, DEAL_II_DOF_INDEX_MPI_TYPE, 0, communicator);
201  AssertThrowMPI(ierr);
202 
203  // Get the end-of-local_range for all processors
204  ierr = MPI_Allgather(&local_range_data.second,
205  1,
206  DEAL_II_DOF_INDEX_MPI_TYPE,
207  &first_index[1],
208  1,
209  DEAL_II_DOF_INDEX_MPI_TYPE,
210  communicator);
211  AssertThrowMPI(ierr);
212  first_index[n_procs] = global_size;
213 
214  // fix case when there are some processors without any locally owned
215  // indices: then there might be a zero in some entries. The reason
216  // is that local_range_data will contain [0,0) and second index is
217  // incorrect inside the Allgather'ed first_index. Below we fix this
218  // by ensuring that the start point is always the end index of the
219  // processor immediately before.
220  if (global_size > 0)
221  {
222  for (unsigned int i = 1; i < n_procs; ++i)
223  if (first_index[i] == 0)
224  first_index[i] = first_index[i - 1];
225 
226  // correct if our processor has a wrong local range
227  if (first_index[my_pid] != local_range_data.first)
228  {
229  Assert(local_range_data.first == local_range_data.second,
230  ExcInternalError());
231  local_range_data.first = local_range_data.second =
232  first_index[my_pid];
233  }
234  }
235 
236  // Allocate memory for data that will be exported
237  std::vector<types::global_dof_index> expanded_ghost_indices(
239  unsigned int n_ghost_targets = 0;
240  if (n_ghost_indices_data > 0)
241  {
242  // Create first a vector of ghost_targets from the list of ghost
243  // indices and then push back new values. When we are done, copy the
244  // data to that field of the partitioner. This way, the variable
245  // ghost_targets will have exactly the size we need, whereas the
246  // vector filled with emplace_back might actually be too long.
247  unsigned int current_proc = 0;
248  ghost_indices_data.fill_index_vector(expanded_ghost_indices);
249  types::global_dof_index current_index = expanded_ghost_indices[0];
250  while (current_index >= first_index[current_proc + 1])
251  current_proc++;
252  AssertIndexRange(current_proc, n_procs);
253 
254  // since DoFs are contiguous, populate a vector which stores
255  // a process rank and the number of ghosts
256  std::vector<std::pair<unsigned int, unsigned int>> ghost_targets_temp(
257  1, std::pair<unsigned int, unsigned int>(current_proc, 0));
258  n_ghost_targets++;
259 
260  // find which process is the owner of other indices:
261  for (unsigned int iterator = 1; iterator < n_ghost_indices_data;
262  ++iterator)
263  {
264  current_index = expanded_ghost_indices[iterator];
265  while (current_index >= first_index[current_proc + 1])
266  current_proc++;
267  AssertIndexRange(current_proc, n_procs);
268  // if we found a new target (i.e. higher rank) then adjust the
269  // pair.second in the last element so that it stores the total
270  // number of ghosts owned by pair.first
271  if (ghost_targets_temp[n_ghost_targets - 1].first < current_proc)
272  {
273  ghost_targets_temp[n_ghost_targets - 1].second =
274  iterator - ghost_targets_temp[n_ghost_targets - 1].second;
275  ghost_targets_temp.emplace_back(current_proc, iterator);
276  n_ghost_targets++;
277  }
278  }
279  // adjust the last element so that pair.second stores the number of
280  // elements
281  ghost_targets_temp[n_ghost_targets - 1].second =
283  ghost_targets_temp[n_ghost_targets - 1].second;
284  ghost_targets_data = ghost_targets_temp;
285  }
286  // find the processes that want to import to me
287  {
288  std::vector<int> send_buffer(n_procs, 0);
289  std::vector<int> receive_buffer(n_procs, 0);
290  for (unsigned int i = 0; i < n_ghost_targets; i++)
291  send_buffer[ghost_targets_data[i].first] =
292  ghost_targets_data[i].second;
293 
294  const int ierr = MPI_Alltoall(send_buffer.data(),
295  1,
296  MPI_INT,
297  receive_buffer.data(),
298  1,
299  MPI_INT,
300  communicator);
301  AssertThrowMPI(ierr);
302 
303  // allocate memory for import data
304  std::vector<std::pair<unsigned int, unsigned int>> import_targets_temp;
306  for (unsigned int i = 0; i < n_procs; i++)
307  if (receive_buffer[i] > 0)
308  {
309  n_import_indices_data += receive_buffer[i];
310  import_targets_temp.emplace_back(i, receive_buffer[i]);
311  }
312  // copy, don't move, to get deterministic memory usage.
313  import_targets_data = import_targets_temp;
314  }
315 
316  // now that we know how many indices each process will receive from
317  // ghosts, send and receive indices for import data. non-blocking receives
318  // and blocking sends
319  std::vector<types::global_dof_index> expanded_import_indices(
321  {
322  unsigned int current_index_start = 0;
323  std::vector<MPI_Request> import_requests(import_targets_data.size() +
324  n_ghost_targets);
325  for (unsigned int i = 0; i < import_targets_data.size(); i++)
326  {
327  const int ierr =
328  MPI_Irecv(&expanded_import_indices[current_index_start],
329  import_targets_data[i].second,
330  DEAL_II_DOF_INDEX_MPI_TYPE,
331  import_targets_data[i].first,
332  import_targets_data[i].first,
333  communicator,
334  &import_requests[i]);
335  AssertThrowMPI(ierr);
336  current_index_start += import_targets_data[i].second;
337  }
338  AssertDimension(current_index_start, n_import_indices_data);
339 
340  // use non-blocking send for ghost indices stored in
341  // expanded_ghost_indices
342  current_index_start = 0;
343  for (unsigned int i = 0; i < n_ghost_targets; i++)
344  {
345  const int ierr =
346  MPI_Isend(&expanded_ghost_indices[current_index_start],
347  ghost_targets_data[i].second,
348  DEAL_II_DOF_INDEX_MPI_TYPE,
349  ghost_targets_data[i].first,
350  my_pid,
351  communicator,
352  &import_requests[import_targets_data.size() + i]);
353  AssertThrowMPI(ierr);
354  current_index_start += ghost_targets_data[i].second;
355  }
356  AssertDimension(current_index_start, n_ghost_indices_data);
357 
358  // wait for all import from other processes to be done
359  if (import_requests.size() > 0)
360  {
361  const int ierr = MPI_Waitall(import_requests.size(),
362  import_requests.data(),
363  MPI_STATUSES_IGNORE);
364  AssertThrowMPI(ierr);
365  }
366 
367  // transform import indices to local index space and compress
368  // contiguous indices in form of ranges
369  {
371  1);
373  // a vector which stores import indices as ranges [a_i,b_i)
374  std::vector<std::pair<unsigned int, unsigned int>>
375  compressed_import_indices;
376  unsigned int shift = 0;
377  for (unsigned int p = 0; p < import_targets_data.size(); ++p)
378  {
379  types::global_dof_index last_index =
381  for (unsigned int ii = 0; ii < import_targets_data[p].second;
382  ++ii)
383  {
384  // index in expanded_import_indices for a pair (p,ii):
385  const unsigned int i = shift + ii;
386  Assert(expanded_import_indices[i] >= local_range_data.first &&
387  expanded_import_indices[i] < local_range_data.second,
388  ExcIndexRange(expanded_import_indices[i],
389  local_range_data.first,
390  local_range_data.second));
391  // local index starting from the beginning of locally owned
392  // DoFs:
393  types::global_dof_index new_index =
394  (expanded_import_indices[i] - local_range_data.first);
397  if (new_index == last_index + 1)
398  // if contiguous, increment the end of last range:
399  compressed_import_indices.back().second++;
400  else
401  // otherwise start a new range:
402  compressed_import_indices.emplace_back(new_index,
403  new_index + 1);
404  last_index = new_index;
405  }
406  shift += import_targets_data[p].second;
408  compressed_import_indices.size();
409  }
410  import_indices_data = compressed_import_indices;
411 
412  // sanity check
413 # ifdef DEBUG
414  const types::global_dof_index n_local_dofs =
415  local_range_data.second - local_range_data.first;
416  for (const auto &range : import_indices_data)
417  {
418  AssertIndexRange(range.first, n_local_dofs);
419  AssertIndexRange(range.second - 1, n_local_dofs);
420  }
421 # endif
422  }
423  }
424 #endif // #ifdef DEAL_II_WITH_MPI
425 
426  if (larger_ghost_index_set.size() == 0)
427  {
429  ghost_indices_subset_data.emplace_back(local_size(),
430  local_size() +
431  n_ghost_indices());
433  }
434  else
435  {
436  AssertDimension(larger_ghost_index_set.size(),
438  Assert(
439  (larger_ghost_index_set & locally_owned_range_data).n_elements() ==
440  0,
441  ExcMessage("Ghost index set should not overlap with owned set."));
442  Assert((larger_ghost_index_set & ghost_indices_data) ==
444  ExcMessage("Larger ghost index set must contain the tight "
445  "ghost index set."));
446 
447  n_ghost_indices_in_larger_set = larger_ghost_index_set.n_elements();
448 
449  // first translate tight ghost indices into indices within the large
450  // set:
451  std::vector<unsigned int> expanded_numbering;
453  {
454  Assert(larger_ghost_index_set.is_element(index),
455  ExcMessage("The given larger ghost index set must contain"
456  "all indices in the actual index set."));
457  Assert(
458  larger_ghost_index_set.index_within_set(index) <
459  static_cast<types::global_dof_index>(
460  std::numeric_limits<unsigned int>::max()),
461  ExcMessage(
462  "Index overflow: This class supports at most 2^32-1 ghost elements"));
463  expanded_numbering.push_back(
464  larger_ghost_index_set.index_within_set(index));
465  }
466 
467  // now rework expanded_numbering into ranges and store in:
468  std::vector<std::pair<unsigned int, unsigned int>>
469  ghost_indices_subset;
471  ghost_targets_data.size() + 1);
472  // also populate ghost_indices_subset_chunks_by_rank_data
474  unsigned int shift = 0;
475  for (unsigned int p = 0; p < ghost_targets_data.size(); ++p)
476  {
477  unsigned int last_index = numbers::invalid_unsigned_int - 1;
478  for (unsigned int ii = 0; ii < ghost_targets_data[p].second; ii++)
479  {
480  const unsigned int i = shift + ii;
481  if (expanded_numbering[i] == last_index + 1)
482  // if contiguous, increment the end of last range:
483  ghost_indices_subset.back().second++;
484  else
485  // otherwise start a new range
486  ghost_indices_subset.emplace_back(expanded_numbering[i],
487  expanded_numbering[i] +
488  1);
489  last_index = expanded_numbering[i];
490  }
491  shift += ghost_targets_data[p].second;
493  ghost_indices_subset.size();
494  }
495  ghost_indices_subset_data = ghost_indices_subset;
496  }
497  }
498 
499 
500 
501  bool
503  {
504  // if the partitioner points to the same memory location as the calling
505  // processor
506  if (&part == this)
507  return true;
508 #ifdef DEAL_II_WITH_MPI
510  {
511  int communicators_same = 0;
512  const int ierr = MPI_Comm_compare(part.communicator,
513  communicator,
514  &communicators_same);
515  AssertThrowMPI(ierr);
516  if (!(communicators_same == MPI_IDENT ||
517  communicators_same == MPI_CONGRUENT))
518  return false;
519  }
520 #endif
521  return (global_size == part.global_size &&
524  }
525 
526 
527 
528  bool
530  {
531  return Utilities::MPI::min(static_cast<int>(is_compatible(part)),
532  communicator) == 1;
533  }
534 
535 
536 
537  std::size_t
539  {
540  std::size_t memory = (3 * sizeof(types::global_dof_index) +
541  4 * sizeof(unsigned int) + sizeof(MPI_Comm));
550  memory +=
553  return memory;
554  }
555 
556  } // end of namespace MPI
557 
558 } // end of namespace Utilities
559 
560 
561 
562 // explicit instantiations from .templates.h file
563 #include "partitioner.inst"
564 
565 DEAL_II_NAMESPACE_CLOSE
std::vector< std::pair< unsigned int, unsigned int > > import_indices_data
Definition: partitioner.h:622
bool is_globally_compatible(const Partitioner &part) const
Definition: partitioner.cc:529
static const unsigned int invalid_unsigned_int
Definition: types.h:173
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1567
bool is_compatible(const Partitioner &part) const
Definition: partitioner.cc:502
unsigned int local_size() const
size_type nth_index_in_set(const unsigned int local_index) const
Definition: index_set.h:1780
types::global_dof_index global_size
Definition: partitioner.h:584
#define AssertIndexRange(index, range)
Definition: exceptions.h:1637
unsigned int n_ghost_indices_data
Definition: partitioner.h:608
STL namespace.
#define AssertThrow(cond, exc)
Definition: exceptions.h:1519
types::global_dof_index size_type
Definition: index_set.h:85
static ::ExceptionBase & ExcIndexRange(int arg1, int arg2, int arg3)
void set_owned_indices(const IndexSet &locally_owned_indices)
Definition: partitioner.cc:110
std::vector< unsigned int > ghost_indices_subset_chunks_by_rank_data
Definition: partitioner.h:666
std::vector< unsigned int > import_indices_chunks_by_rank_data
Definition: partitioner.h:654
size_type size() const
Definition: index_set.h:1600
static ::ExceptionBase & ExcMessage(std::string arg1)
unsigned int n_ghost_indices() const
Definition: types.h:31
T sum(const T &t, const MPI_Comm &mpi_communicator)
void subtract_set(const IndexSet &other)
Definition: index_set.cc:267
#define Assert(cond, exc)
Definition: exceptions.h:1407
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
size_type index_within_set(const size_type global_index) const
Definition: index_set.h:1821
std::vector< std::pair< unsigned int, unsigned int > > ghost_indices_subset_data
Definition: partitioner.h:674
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:71
void fill_index_vector(std::vector< size_type > &indices) const
Definition: index_set.cc:505
void add_range(const size_type begin, const size_type end)
Definition: index_set.cc:98
void set_size(const size_type size)
Definition: index_set.h:1588
unsigned int global_dof_index
Definition: types.h:89
Definition: cuda.h:31
types::global_dof_index size() const
void compress() const
Definition: index_set.h:1608
std::size_t memory_consumption() const
Definition: partitioner.cc:538
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1695
bool is_contiguous() const
Definition: index_set.h:1715
T min(const T &t, const MPI_Comm &mpi_communicator)
std::vector< std::pair< unsigned int, unsigned int > > import_targets_data
Definition: partitioner.h:648
std::pair< types::global_dof_index, types::global_dof_index > local_range_data
Definition: partitioner.h:596
std::vector< std::pair< unsigned int, unsigned int > > ghost_targets_data
Definition: partitioner.h:614
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:82
static ::ExceptionBase & ExcNotImplemented()
bool is_element(const size_type index) const
Definition: index_set.h:1665
const types::global_dof_index invalid_dof_index
Definition: types.h:188
bool job_supports_mpi()
Definition: mpi.cc:802
unsigned int n_import_indices_data
Definition: partitioner.h:642
virtual void reinit(const IndexSet &vector_space_vector_index_set, const IndexSet &read_write_vector_index_set, const MPI_Comm &communicator) override
Definition: partitioner.cc:97
size_type n_elements() const
Definition: index_set.h:1732
std::enable_if< std::is_fundamental< T >::value, std::size_t >::type memory_consumption(const T &t)
unsigned int n_ghost_indices_in_larger_set
Definition: partitioner.h:660
static ::ExceptionBase & ExcInternalError()
void set_ghost_indices(const IndexSet &ghost_indices, const IndexSet &larger_ghost_index_set=IndexSet())
Definition: partitioner.cc:151