Reference documentation for deal.II version 9.0.0
partitioner.cc
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 1999 - 2017 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE at
12 // the top level of the deal.II distribution.
13 //
14 // ---------------------------------------------------------------------
15 
16 #include <deal.II/base/partitioner.h>
17 #include <deal.II/base/partitioner.templates.h>
18 
19 DEAL_II_NAMESPACE_OPEN
20 
21 namespace Utilities
22 {
23  namespace MPI
24  {
26  :
27  global_size (0),
28  local_range_data (std::pair<types::global_dof_index, types::global_dof_index> (0, 0)),
29  n_ghost_indices_data (0),
30  n_import_indices_data (0),
31  n_ghost_indices_in_larger_set(0),
32  my_pid (0),
33  n_procs (1),
34  communicator (MPI_COMM_SELF),
35  have_ghost_indices (false)
36  {}
37 
38 
39 
40 
41  Partitioner::Partitioner (const unsigned int size)
42  :
43  global_size (size),
44  locally_owned_range_data (size),
45  local_range_data (std::pair<types::global_dof_index, types::global_dof_index> (0, size)),
46  n_ghost_indices_data (0),
47  n_import_indices_data (0),
48  n_ghost_indices_in_larger_set(0),
49  my_pid (0),
50  n_procs (1),
51  communicator (MPI_COMM_SELF),
52  have_ghost_indices (false)
53  {
57  }
58 
59 
60 
61  Partitioner::Partitioner (const IndexSet &locally_owned_indices,
62  const IndexSet &ghost_indices_in,
63  const MPI_Comm communicator_in)
64  :
65  global_size (static_cast<types::global_dof_index>(locally_owned_indices.size())),
66  n_ghost_indices_data (0),
67  n_import_indices_data (0),
68  n_ghost_indices_in_larger_set(0),
69  my_pid (0),
70  n_procs (1),
71  communicator (communicator_in),
72  have_ghost_indices (false)
73  {
74  set_owned_indices (locally_owned_indices);
75  set_ghost_indices (ghost_indices_in);
76  }
77 
78 
79 
80  Partitioner::Partitioner (const IndexSet &locally_owned_indices,
81  const MPI_Comm communicator_in)
82  :
83  global_size (static_cast<types::global_dof_index>(locally_owned_indices.size())),
84  n_ghost_indices_data (0),
85  n_import_indices_data (0),
86  n_ghost_indices_in_larger_set(0),
87  my_pid (0),
88  n_procs (1),
89  communicator (communicator_in),
90  have_ghost_indices (false)
91  {
92  set_owned_indices (locally_owned_indices);
93  }
94 
95 
96 
97  void
98  Partitioner::reinit(const IndexSet &vector_space_vector_index_set,
99  const IndexSet &read_write_vector_index_set,
100  const MPI_Comm &communicator_in)
101  {
102  have_ghost_indices = false;
103  communicator = communicator_in;
104  set_owned_indices (vector_space_vector_index_set);
105  set_ghost_indices (read_write_vector_index_set);
106  }
107 
108 
109 
110  void
111  Partitioner::set_owned_indices (const IndexSet &locally_owned_indices)
112  {
113  if (Utilities::MPI::job_supports_mpi() == true)
114  {
117  }
118  else
119  {
120  my_pid = 0;
121  n_procs = 1;
122  }
123 
124  // set the local range
125  Assert (locally_owned_indices.is_contiguous() == true,
126  ExcMessage ("The index set specified in locally_owned_indices "
127  "is not contiguous."));
128  locally_owned_indices.compress();
129  if (locally_owned_indices.n_elements()>0)
130  local_range_data = std::pair<types::global_dof_index, types::global_dof_index>
131  (locally_owned_indices.nth_index_in_set(0),
132  locally_owned_indices.nth_index_in_set(0) +
133  locally_owned_indices.n_elements());
135  static_cast<types::global_dof_index>(std::numeric_limits<unsigned int>::max()),
136  ExcMessage("Index overflow: This class supports at most 2^32-1 locally owned vector entries"));
137  locally_owned_range_data.set_size (locally_owned_indices.size());
140 
141  ghost_indices_data.set_size (locally_owned_indices.size());
142  }
143 
144 
145 
146  void
147  Partitioner::set_ghost_indices (const IndexSet &ghost_indices_in,
148  const IndexSet &larger_ghost_index_set)
149  {
150  // Set ghost indices from input. To be sure that no entries from the
151  // locally owned range are present, subtract the locally owned indices
152  // in any case.
153  Assert (ghost_indices_in.n_elements() == 0 ||
154  ghost_indices_in.size() == locally_owned_range_data.size(),
155  ExcDimensionMismatch (ghost_indices_in.size(),
157 
158  ghost_indices_data = ghost_indices_in;
164  static_cast<types::global_dof_index>(std::numeric_limits<unsigned int>::max()),
165  ExcMessage("Index overflow: This class supports at most 2^32-1 ghost elements"));
167 
170 
171  // In the rest of this function, we determine the point-to-point
172  // communication pattern of the partitioner. We make up a list with both
173  // the processors the ghost indices actually belong to, and the indices
174  // that are locally held but ghost indices of other processors. This
175  // allows then to import and export data very easily.
176 
177  // find out the end index for each processor and communicate it (this
178  // implies the start index for the next processor)
179 #ifdef DEAL_II_WITH_MPI
180  if (n_procs < 2)
181  {
185  return;
186  }
187 
188  std::vector<types::global_dof_index> first_index (n_procs+1);
189  // Allow non-zero start index for the vector. send this data to all
190  // processors
191  first_index[0] = local_range_data.first;
192  int ierr = MPI_Bcast(first_index.data(), 1, DEAL_II_DOF_INDEX_MPI_TYPE,
193  0, communicator);
194  AssertThrowMPI(ierr);
195 
196  // Get the end-of-local_range for all processors
197  ierr = MPI_Allgather(&local_range_data.second, 1,
198  DEAL_II_DOF_INDEX_MPI_TYPE, &first_index[1], 1,
199  DEAL_II_DOF_INDEX_MPI_TYPE, communicator);
200  AssertThrowMPI(ierr);
201  first_index[n_procs] = global_size;
202 
203  // fix case when there are some processors without any locally owned
204  // indices: then there might be a zero in some entries
205  if (global_size > 0)
206  {
207  unsigned int first_proc_with_nonzero_dofs = 0;
208  for (unsigned int i=0; i<n_procs; ++i)
209  if (first_index[i+1]>0)
210  {
211  first_proc_with_nonzero_dofs = i;
212  break;
213  }
214  for (unsigned int i=first_proc_with_nonzero_dofs+1; i<n_procs; ++i)
215  if (first_index[i] == 0)
216  first_index[i] = first_index[i-1];
217 
218  // correct if our processor has a wrong local range
219  if (first_index[my_pid] != local_range_data.first)
220  {
221  Assert(local_range_data.first == local_range_data.second,
222  ExcInternalError());
223  local_range_data.first = local_range_data.second = first_index[my_pid];
224  }
225  }
226 
227  // Allocate memory for data that will be exported
228  std::vector<types::global_dof_index> expanded_ghost_indices (n_ghost_indices_data);
229  unsigned int n_ghost_targets = 0;
230  if (n_ghost_indices_data > 0)
231  {
232  // Create first a vector of ghost_targets from the list of ghost
233  // indices and then push back new values. When we are done, copy the
234  // data to that field of the partitioner. This way, the variable
235  // ghost_targets will have exactly the size we need, whereas the
236  // vector filled with emplace_back might actually be too long.
237  unsigned int current_proc = 0;
238  ghost_indices_data.fill_index_vector (expanded_ghost_indices);
239  types::global_dof_index current_index = expanded_ghost_indices[0];
240  while (current_index >= first_index[current_proc+1])
241  current_proc++;
242  std::vector<std::pair<unsigned int, unsigned int> > ghost_targets_temp
243  (1, std::pair<unsigned int, unsigned int>(current_proc, 0));
244  n_ghost_targets++;
245 
246  for (unsigned int iterator=1; iterator<n_ghost_indices_data; ++iterator)
247  {
248  current_index = expanded_ghost_indices[iterator];
249  while (current_index >= first_index[current_proc+1])
250  current_proc++;
251  AssertIndexRange (current_proc, n_procs);
252  if ( ghost_targets_temp[n_ghost_targets-1].first < current_proc)
253  {
254  ghost_targets_temp[n_ghost_targets-1].second =
255  iterator - ghost_targets_temp[n_ghost_targets-1].second;
256  ghost_targets_temp.emplace_back (current_proc, iterator);
257  n_ghost_targets++;
258  }
259  }
260  ghost_targets_temp[n_ghost_targets-1].second =
261  n_ghost_indices_data - ghost_targets_temp[n_ghost_targets-1].second;
262  ghost_targets_data = ghost_targets_temp;
263  }
264  // find the processes that want to import to me
265  {
266  std::vector<int> send_buffer (n_procs, 0);
267  std::vector<int> receive_buffer (n_procs, 0);
268  for (unsigned int i=0; i<n_ghost_targets; i++)
269  send_buffer[ghost_targets_data[i].first] = ghost_targets_data[i].second;
270 
271  const int ierr = MPI_Alltoall (send_buffer.data(), 1, MPI_INT, receive_buffer.data(), 1,
272  MPI_INT, communicator);
273  AssertThrowMPI(ierr);
274 
275  // allocate memory for import data
276  std::vector<std::pair<unsigned int,unsigned int> > import_targets_temp;
278  for (unsigned int i=0; i<n_procs; i++)
279  if (receive_buffer[i] > 0)
280  {
281  n_import_indices_data += receive_buffer[i];
282  import_targets_temp.emplace_back(i, receive_buffer[i]);
283  }
284  // copy, don't move, to get deterministic memory usage.
285  import_targets_data = import_targets_temp;
286  }
287 
288  // send and receive indices for import data. non-blocking receives and
289  // blocking sends
290  std::vector<types::global_dof_index> expanded_import_indices (n_import_indices_data);
291  {
292  unsigned int current_index_start = 0;
293  std::vector<MPI_Request> import_requests (import_targets_data.size());
294  for (unsigned int i=0; i<import_targets_data.size(); i++)
295  {
296  const int ierr = MPI_Irecv (&expanded_import_indices[current_index_start],
297  import_targets_data[i].second,
298  DEAL_II_DOF_INDEX_MPI_TYPE,
299  import_targets_data[i].first,
300  import_targets_data[i].first,
301  communicator, &import_requests[i]);
302  AssertThrowMPI(ierr);
303  current_index_start += import_targets_data[i].second;
304  }
305  AssertDimension (current_index_start, n_import_indices_data);
306 
307  // use blocking send
308  current_index_start = 0;
309  for (unsigned int i=0; i<n_ghost_targets; i++)
310  {
311  const int ierr = MPI_Send (&expanded_ghost_indices[current_index_start],
312  ghost_targets_data[i].second, DEAL_II_DOF_INDEX_MPI_TYPE,
313  ghost_targets_data[i].first, my_pid,
314  communicator);
315  AssertThrowMPI(ierr);
316  current_index_start += ghost_targets_data[i].second;
317  }
318  AssertDimension (current_index_start, n_ghost_indices_data);
319 
320  if (import_requests.size()>0)
321  {
322  const int ierr = MPI_Waitall (import_requests.size(),
323  import_requests.data(),
324  MPI_STATUSES_IGNORE);
325  AssertThrowMPI(ierr);
326  }
327 
328  // transform import indices to local index space and compress
329  // contiguous indices in form of ranges
330  {
333  std::vector<std::pair<unsigned int,unsigned int> > compressed_import_indices;
334  unsigned int shift = 0;
335  for (unsigned int p=0; p<import_targets_data.size(); ++p)
336  {
338  for (unsigned int ii=0; ii<import_targets_data[p].second; ++ii)
339  {
340  const unsigned int i = shift + ii;
341  Assert (expanded_import_indices[i] >= local_range_data.first &&
342  expanded_import_indices[i] < local_range_data.second,
343  ExcIndexRange(expanded_import_indices[i], local_range_data.first,
344  local_range_data.second));
345  types::global_dof_index new_index = (expanded_import_indices[i] -
346  local_range_data.first);
349  if (new_index == last_index+1)
350  compressed_import_indices.back().second++;
351  else
352  compressed_import_indices.emplace_back (new_index,new_index+1);
353  last_index = new_index;
354  }
355  shift += import_targets_data[p].second;
356  import_indices_chunks_by_rank_data[p+1] = compressed_import_indices.size();
357  }
358  import_indices_data = compressed_import_indices;
359 
360  // sanity check
361 #ifdef DEBUG
362  const types::global_dof_index n_local_dofs = local_range_data.second-local_range_data.first;
363  for (unsigned int i=0; i<import_indices_data.size(); ++i)
364  {
365  AssertIndexRange (import_indices_data[i].first, n_local_dofs);
366  AssertIndexRange (import_indices_data[i].second-1, n_local_dofs);
367  }
368 #endif
369  }
370  }
371 #endif // #ifdef DEAL_II_WITH_MPI
372 
373  if (larger_ghost_index_set.size() == 0)
374  {
376  ghost_indices_subset_data.emplace_back(local_size(),
379  }
380  else
381  {
382  AssertDimension(larger_ghost_index_set.size(), ghost_indices_data.size());
383  Assert((larger_ghost_index_set & locally_owned_range_data).n_elements()==0,
384  ExcMessage("Ghost index set should not overlap with owned set."));
385  Assert((larger_ghost_index_set & ghost_indices_data) == ghost_indices_data,
386  ExcMessage("Larger ghost index set must contain the tight "
387  "ghost index set."));
388 
389  n_ghost_indices_in_larger_set = larger_ghost_index_set.n_elements();
390 
391  std::vector<unsigned int> expanded_numbering;
393  it != ghost_indices_data.end(); ++it)
394  {
395  Assert(larger_ghost_index_set.is_element(*it),
396  ExcMessage("The given larger ghost index set must contain"
397  "all indices in the actual index set."));
398  expanded_numbering.push_back(larger_ghost_index_set.index_within_set(*it));
399  }
400 
401  std::vector<std::pair<unsigned int,unsigned int> > ghost_indices_subset;
404  unsigned int shift = 0;
405  for (unsigned int p=0; p<ghost_targets_data.size(); ++p)
406  {
407  unsigned int last_index = numbers::invalid_unsigned_int-1;
408  for (unsigned int ii=0; ii<ghost_targets_data[p].second; ii++)
409  {
410  const unsigned int i = shift + ii;
411  if (expanded_numbering[i] == last_index+1)
412  ghost_indices_subset.back().second++;
413  else
414  ghost_indices_subset.emplace_back(expanded_numbering[i],
415  expanded_numbering[i]+1);
416  last_index = expanded_numbering[i];
417  }
418  shift += ghost_targets_data[p].second;
419  ghost_indices_subset_chunks_by_rank_data[p+1] = ghost_indices_subset.size();
420  }
421  ghost_indices_subset_data = ghost_indices_subset;
422  }
423  }
424 
425 
426 
427  bool
429  {
430  // if the partitioner points to the same memory location as the calling
431  // processor
432  if (&part == this)
433  return true;
434 #ifdef DEAL_II_WITH_MPI
436  {
437  int communicators_same = 0;
438  const int ierr = MPI_Comm_compare (part.communicator, communicator,
439  &communicators_same);
440  AssertThrowMPI(ierr);
441  if (!(communicators_same == MPI_IDENT ||
442  communicators_same == MPI_CONGRUENT))
443  return false;
444  }
445 #endif
446  return (global_size == part.global_size &&
449  }
450 
451 
452 
453  bool
455  {
456  return Utilities::MPI::min(static_cast<int>(is_compatible(part)),
457  communicator) == 1;
458  }
459 
460 
461 
462  std::size_t
464  {
465  std::size_t memory = (3*sizeof(types::global_dof_index)+4*sizeof(unsigned int)+
466  sizeof(MPI_Comm));
475  return memory;
476  }
477 
478  } // end of namespace MPI
479 
480 } // end of namespace Utilities
481 
482 
483 
484 // explicit instantiations from .templates.h file
485 #include "partitioner.inst"
486 
487 DEAL_II_NAMESPACE_CLOSE
std::vector< std::pair< unsigned int, unsigned int > > import_indices_data
Definition: partitioner.h:589
bool is_globally_compatible(const Partitioner &part) const
Definition: partitioner.cc:454
static const unsigned int invalid_unsigned_int
Definition: types.h:173
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1248
virtual void reinit(const IndexSet &vector_space_vector_index_set, const IndexSet &read_write_vector_index_set, const MPI_Comm &communicator)
Definition: partitioner.cc:98
ElementIterator end() const
Definition: index_set.h:1516
bool is_compatible(const Partitioner &part) const
Definition: partitioner.cc:428
unsigned int local_size() const
size_type nth_index_in_set(const unsigned int local_index) const
Definition: index_set.h:1769
types::global_dof_index global_size
Definition: partitioner.h:552
#define AssertIndexRange(index, range)
Definition: exceptions.h:1284
unsigned int n_ghost_indices_data
Definition: partitioner.h:575
STL namespace.
#define AssertThrow(cond, exc)
Definition: exceptions.h:1221
static ::ExceptionBase & ExcIndexRange(int arg1, int arg2, int arg3)
void set_owned_indices(const IndexSet &locally_owned_indices)
Definition: partitioner.cc:111
std::vector< unsigned int > ghost_indices_subset_chunks_by_rank_data
Definition: partitioner.h:619
std::vector< unsigned int > import_indices_chunks_by_rank_data
Definition: partitioner.h:607
size_type size() const
Definition: index_set.h:1575
static ::ExceptionBase & ExcMessage(std::string arg1)
unsigned int n_ghost_indices() const
Definition: types.h:30
T sum(const T &t, const MPI_Comm &mpi_communicator)
unsigned int global_dof_index
Definition: types.h:88
void subtract_set(const IndexSet &other)
Definition: index_set.cc:288
#define Assert(cond, exc)
Definition: exceptions.h:1142
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
size_type index_within_set(const size_type global_index) const
Definition: index_set.h:1811
std::vector< std::pair< unsigned int, unsigned int > > ghost_indices_subset_data
Definition: partitioner.h:626
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:65
void fill_index_vector(std::vector< size_type > &indices) const
Definition: index_set.cc:526
void add_range(const size_type begin, const size_type end)
Definition: index_set.cc:98
void set_size(const size_type size)
Definition: index_set.h:1562
Definition: cuda.h:31
types::global_dof_index size() const
void compress() const
Definition: index_set.h:1584
std::size_t memory_consumption() const
Definition: partitioner.cc:463
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1314
bool is_contiguous() const
Definition: index_set.h:1698
T min(const T &t, const MPI_Comm &mpi_communicator)
std::vector< std::pair< unsigned int, unsigned int > > import_targets_data
Definition: partitioner.h:601
std::vector< std::pair< unsigned int, unsigned int > > ghost_targets_data
Definition: partitioner.h:581
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:75
static ::ExceptionBase & ExcNotImplemented()
bool is_element(const size_type index) const
Definition: index_set.h:1645
std::pair< types::global_dof_index, types::global_dof_index > local_range_data
Definition: partitioner.h:563
const types::global_dof_index invalid_dof_index
Definition: types.h:187
bool job_supports_mpi()
Definition: mpi.cc:613
ElementIterator begin() const
Definition: index_set.h:1453
unsigned int n_import_indices_data
Definition: partitioner.h:595
size_type n_elements() const
Definition: index_set.h:1717
std::enable_if< std::is_fundamental< T >::value, std::size_t >::type memory_consumption(const T &t)
unsigned int n_ghost_indices_in_larger_set
Definition: partitioner.h:613
static ::ExceptionBase & ExcInternalError()
void set_ghost_indices(const IndexSet &ghost_indices, const IndexSet &larger_ghost_index_set=IndexSet())
Definition: partitioner.cc:147