Reference documentation for deal.II version 8.5.1
partitioner.cc
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 1999 - 2016 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE at
12 // the top level of the deal.II distribution.
13 //
14 // ---------------------------------------------------------------------
15 
16 #include <deal.II/base/partitioner.h>
17 
18 DEAL_II_NAMESPACE_OPEN
19 
20 namespace Utilities
21 {
22  namespace MPI
23  {
25  :
26  global_size (0),
27  local_range_data (std::pair<types::global_dof_index, types::global_dof_index> (0, 0)),
28  n_ghost_indices_data (0),
29  n_import_indices_data (0),
30  my_pid (0),
31  n_procs (1),
32  communicator (MPI_COMM_SELF),
33  have_ghost_indices (false)
34  {}
35 
36 
37 
38 
39  Partitioner::Partitioner (const unsigned int size)
40  :
41  global_size (size),
42  locally_owned_range_data (size),
43  local_range_data (std::pair<types::global_dof_index, types::global_dof_index> (0, size)),
44  n_ghost_indices_data (0),
45  n_import_indices_data (0),
46  my_pid (0),
47  n_procs (1),
48  communicator (MPI_COMM_SELF),
49  have_ghost_indices (false)
50  {
54  }
55 
56 
57 
58  Partitioner::Partitioner (const IndexSet &locally_owned_indices,
59  const IndexSet &ghost_indices_in,
60  const MPI_Comm communicator_in)
61  :
62  global_size (static_cast<types::global_dof_index>(locally_owned_indices.size())),
63  n_ghost_indices_data (0),
64  n_import_indices_data (0),
65  my_pid (0),
66  n_procs (1),
67  communicator (communicator_in),
68  have_ghost_indices (false)
69  {
70  set_owned_indices (locally_owned_indices);
71  set_ghost_indices (ghost_indices_in);
72  }
73 
74 
75 
76  Partitioner::Partitioner (const IndexSet &locally_owned_indices,
77  const MPI_Comm communicator_in)
78  :
79  global_size (static_cast<types::global_dof_index>(locally_owned_indices.size())),
80  n_ghost_indices_data (0),
81  n_import_indices_data (0),
82  my_pid (0),
83  n_procs (1),
84  communicator (communicator_in),
85  have_ghost_indices (false)
86  {
87  set_owned_indices (locally_owned_indices);
88  }
89 
90 
91 
92  void
93  Partitioner::reinit(const IndexSet &vector_space_vector_index_set,
94  const IndexSet &read_write_vector_index_set,
95  const MPI_Comm &communicator_in)
96  {
97  have_ghost_indices = false;
98  communicator = communicator_in;
99  set_owned_indices (vector_space_vector_index_set);
100  set_ghost_indices (read_write_vector_index_set);
101  }
102 
103 
104 
105  void
106  Partitioner::set_owned_indices (const IndexSet &locally_owned_indices)
107  {
109  {
112  }
113  else
114  {
115  my_pid = 0;
116  n_procs = 1;
117  }
118 
119  // set the local range
120  Assert (locally_owned_indices.is_contiguous() == true,
121  ExcMessage ("The index set specified in locally_owned_indices "
122  "is not contiguous."));
123  locally_owned_indices.compress();
124  if (locally_owned_indices.n_elements()>0)
125  local_range_data = std::pair<types::global_dof_index, types::global_dof_index>
126  (locally_owned_indices.nth_index_in_set(0),
127  locally_owned_indices.nth_index_in_set(0) +
128  locally_owned_indices.n_elements());
130  static_cast<types::global_dof_index>(std::numeric_limits<unsigned int>::max()),
131  ExcMessage("Index overflow: This class supports at most 2^32-1 locally owned vector entries"));
132  locally_owned_range_data.set_size (locally_owned_indices.size());
135 
136  ghost_indices_data.set_size (locally_owned_indices.size());
137  }
138 
139 
140 
141  void
142  Partitioner::set_ghost_indices (const IndexSet &ghost_indices_in)
143  {
144  // Set ghost indices from input. To be sure that no entries from the
145  // locally owned range are present, subtract the locally owned indices
146  // in any case.
147  Assert (ghost_indices_in.n_elements() == 0 ||
148  ghost_indices_in.size() == locally_owned_range_data.size(),
149  ExcDimensionMismatch (ghost_indices_in.size(),
151 
152  ghost_indices_data = ghost_indices_in;
158  static_cast<types::global_dof_index>(std::numeric_limits<unsigned int>::max()),
159  ExcMessage("Index overflow: This class supports at most 2^32-1 ghost elements"));
161 
164 
165  // In the rest of this function, we determine the point-to-point
166  // communication pattern of the partitioner. We make up a list with both
167  // the processors the ghost indices actually belong to, and the indices
168  // that are locally held but ghost indices of other processors. This
169  // allows then to import and export data very easily.
170 
171  // find out the end index for each processor and communicate it (this
172  // implies the start index for the next processor)
173 #ifdef DEAL_II_WITH_MPI
174  if (n_procs < 2)
175  {
179  return;
180  }
181 
182  std::vector<types::global_dof_index> first_index (n_procs+1);
183  // Allow non-zero start index for the vector. send this data to all
184  // processors
185  first_index[0] = local_range_data.first;
186  int ierr = MPI_Bcast(&first_index[0], 1, DEAL_II_DOF_INDEX_MPI_TYPE,
187  0, communicator);
188  AssertThrowMPI(ierr);
189 
190  // Get the end-of-local_range for all processors
191  ierr = MPI_Allgather(&local_range_data.second, 1,
192  DEAL_II_DOF_INDEX_MPI_TYPE, &first_index[1], 1,
193  DEAL_II_DOF_INDEX_MPI_TYPE, communicator);
194  AssertThrowMPI(ierr);
195  first_index[n_procs] = global_size;
196 
197  // fix case when there are some processors without any locally owned
198  // indices: then there might be a zero in some entries
199  if (global_size > 0)
200  {
201  unsigned int first_proc_with_nonzero_dofs = 0;
202  for (unsigned int i=0; i<n_procs; ++i)
203  if (first_index[i+1]>0)
204  {
205  first_proc_with_nonzero_dofs = i;
206  break;
207  }
208  for (unsigned int i=first_proc_with_nonzero_dofs+1; i<n_procs; ++i)
209  if (first_index[i] == 0)
210  first_index[i] = first_index[i-1];
211 
212  // correct if our processor has a wrong local range
213  if (first_index[my_pid] != local_range_data.first)
214  {
215  Assert(local_range_data.first == local_range_data.second,
216  ExcInternalError());
217  local_range_data.first = local_range_data.second = first_index[my_pid];
218  }
219  }
220 
221  // Allocate memory for data that will be exported
222  std::vector<types::global_dof_index> expanded_ghost_indices (n_ghost_indices_data);
223  unsigned int n_ghost_targets = 0;
224  if (n_ghost_indices_data > 0)
225  {
226  // Create first a vector of ghost_targets from the list of ghost
227  // indices and then push back new values. When we are done, copy the
228  // data to that field of the partitioner. This way, the variable
229  // ghost_targets will have exactly the size we need, whereas the
230  // vector filled with push_back might actually be too long.
231  unsigned int current_proc = 0;
232  ghost_indices_data.fill_index_vector (expanded_ghost_indices);
233  types::global_dof_index current_index = expanded_ghost_indices[0];
234  while (current_index >= first_index[current_proc+1])
235  current_proc++;
236  std::vector<std::pair<unsigned int, unsigned int> > ghost_targets_temp
237  (1, std::pair<unsigned int, unsigned int>(current_proc, 0));
238  n_ghost_targets++;
239 
240  for (unsigned int iterator=1; iterator<n_ghost_indices_data; ++iterator)
241  {
242  current_index = expanded_ghost_indices[iterator];
243  while (current_index >= first_index[current_proc+1])
244  current_proc++;
245  AssertIndexRange (current_proc, n_procs);
246  if ( ghost_targets_temp[n_ghost_targets-1].first < current_proc)
247  {
248  ghost_targets_temp[n_ghost_targets-1].second =
249  iterator - ghost_targets_temp[n_ghost_targets-1].second;
250  ghost_targets_temp.push_back(std::pair<unsigned int,
251  unsigned int>(current_proc,iterator));
252  n_ghost_targets++;
253  }
254  }
255  ghost_targets_temp[n_ghost_targets-1].second =
256  n_ghost_indices_data - ghost_targets_temp[n_ghost_targets-1].second;
257  ghost_targets_data = ghost_targets_temp;
258  }
259  // find the processes that want to import to me
260  {
261  std::vector<int> send_buffer (n_procs, 0);
262  std::vector<int> receive_buffer (n_procs, 0);
263  for (unsigned int i=0; i<n_ghost_targets; i++)
264  send_buffer[ghost_targets_data[i].first] = ghost_targets_data[i].second;
265 
266  const int ierr = MPI_Alltoall (&send_buffer[0], 1, MPI_INT, &receive_buffer[0], 1,
267  MPI_INT, communicator);
268  AssertThrowMPI(ierr);
269 
270  // allocate memory for import data
271  std::vector<std::pair<unsigned int,unsigned int> > import_targets_temp;
273  for (unsigned int i=0; i<n_procs; i++)
274  if (receive_buffer[i] > 0)
275  {
276  n_import_indices_data += receive_buffer[i];
277  import_targets_temp.push_back(std::pair<unsigned int,
278  unsigned int> (i, receive_buffer[i]));
279  }
280  import_targets_data = import_targets_temp;
281  }
282 
283  // send and receive indices for import data. non-blocking receives and
284  // blocking sends
285  std::vector<types::global_dof_index> expanded_import_indices (n_import_indices_data);
286  {
287  unsigned int current_index_start = 0;
288  std::vector<MPI_Request> import_requests (import_targets_data.size());
289  for (unsigned int i=0; i<import_targets_data.size(); i++)
290  {
291  const int ierr = MPI_Irecv (&expanded_import_indices[current_index_start],
292  import_targets_data[i].second,
293  DEAL_II_DOF_INDEX_MPI_TYPE,
294  import_targets_data[i].first,
295  import_targets_data[i].first,
296  communicator, &import_requests[i]);
297  AssertThrowMPI(ierr);
298  current_index_start += import_targets_data[i].second;
299  }
300  AssertDimension (current_index_start, n_import_indices_data);
301 
302  // use blocking send
303  current_index_start = 0;
304  for (unsigned int i=0; i<n_ghost_targets; i++)
305  {
306  const int ierr = MPI_Send (&expanded_ghost_indices[current_index_start],
307  ghost_targets_data[i].second, DEAL_II_DOF_INDEX_MPI_TYPE,
308  ghost_targets_data[i].first, my_pid,
309  communicator);
310  AssertThrowMPI(ierr);
311  current_index_start += ghost_targets_data[i].second;
312  }
313  AssertDimension (current_index_start, n_ghost_indices_data);
314 
315  if (import_requests.size()>0)
316  {
317  const int ierr = MPI_Waitall (import_requests.size(),
318  &import_requests[0],
319  MPI_STATUSES_IGNORE);
320  AssertThrowMPI(ierr);
321  }
322 
323  // transform import indices to local index space and compress
324  // contiguous indices in form of ranges
325  {
327  std::vector<std::pair<unsigned int,unsigned int> > compressed_import_indices;
328  for (unsigned int i=0; i<n_import_indices_data; i++)
329  {
330  Assert (expanded_import_indices[i] >= local_range_data.first &&
331  expanded_import_indices[i] < local_range_data.second,
332  ExcIndexRange(expanded_import_indices[i], local_range_data.first,
333  local_range_data.second));
334  types::global_dof_index new_index = (expanded_import_indices[i] -
335  local_range_data.first);
338  if (new_index == last_index+1)
339  compressed_import_indices.back().second++;
340  else
341  {
342  compressed_import_indices.push_back
343  (std::pair<unsigned int,unsigned int>(new_index,new_index+1));
344  }
345  last_index = new_index;
346  }
347  import_indices_data = compressed_import_indices;
348 
349  // sanity check
350 #ifdef DEBUG
351  const types::global_dof_index n_local_dofs = local_range_data.second-local_range_data.first;
352  for (unsigned int i=0; i<import_indices_data.size(); ++i)
353  {
354  AssertIndexRange (import_indices_data[i].first, n_local_dofs);
355  AssertIndexRange (import_indices_data[i].second-1, n_local_dofs);
356  }
357 #endif
358  }
359  }
360 #endif
361  }
362 
363 
364 
365  bool
367  {
368  // if the partitioner points to the same memory location as the calling
369  // processor
370  if (&part == this)
371  return true;
372 #ifdef DEAL_II_WITH_MPI
374  {
375  int communicators_same = 0;
376  const int ierr = MPI_Comm_compare (part.communicator, communicator,
377  &communicators_same);
378  AssertThrowMPI(ierr);
379  if (!(communicators_same == MPI_IDENT ||
380  communicators_same == MPI_CONGRUENT))
381  return false;
382  }
383 #endif
384  return (global_size == part.global_size &&
387  }
388 
389 
390 
391  bool
393  {
394  return Utilities::MPI::min(static_cast<int>(is_compatible(part)),
395  communicator) == 1;
396  }
397 
398 
399 
400  std::size_t
402  {
403  std::size_t memory = (3*sizeof(types::global_dof_index)+4*sizeof(unsigned int)+
404  sizeof(MPI_Comm));
410  return memory;
411  }
412 
413  } // end of namespace MPI
414 
415 } // end of namespace Utilities
416 
417 
418 DEAL_II_NAMESPACE_CLOSE
std::vector< std::pair< unsigned int, unsigned int > > import_indices_data
Definition: partitioner.h:334
bool is_globally_compatible(const Partitioner &part) const
Definition: partitioner.cc:392
static const unsigned int invalid_unsigned_int
Definition: types.h:170
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1146
virtual void reinit(const IndexSet &vector_space_vector_index_set, const IndexSet &read_write_vector_index_set, const MPI_Comm &communicator)
Definition: partitioner.cc:93
bool is_compatible(const Partitioner &part) const
Definition: partitioner.cc:366
size_type nth_index_in_set(const unsigned int local_index) const
Definition: index_set.h:1612
types::global_dof_index global_size
Definition: partitioner.h:297
#define AssertIndexRange(index, range)
Definition: exceptions.h:1170
unsigned int n_ghost_indices_data
Definition: partitioner.h:320
STL namespace.
#define AssertThrow(cond, exc)
Definition: exceptions.h:369
static ::ExceptionBase & ExcIndexRange(int arg1, int arg2, int arg3)
void set_owned_indices(const IndexSet &locally_owned_indices)
Definition: partitioner.cc:106
size_type size() const
Definition: index_set.h:1419
static ::ExceptionBase & ExcMessage(std::string arg1)
Definition: types.h:30
T sum(const T &t, const MPI_Comm &mpi_communicator)
unsigned int global_dof_index
Definition: types.h:88
void subtract_set(const IndexSet &other)
Definition: index_set.cc:278
#define Assert(cond, exc)
Definition: exceptions.h:313
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
bool job_supports_mpi() 1
Definition: utilities.cc:708
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:63
void fill_index_vector(std::vector< size_type > &indices) const
Definition: index_set.cc:512
void add_range(const size_type begin, const size_type end)
Definition: index_set.cc:88
void set_size(const size_type size)
Definition: index_set.h:1406
std_cxx11::enable_if< std_cxx11::is_fundamental< T >::value, std::size_t >::type memory_consumption(const T &t)
Definition: mpi.h:48
types::global_dof_index size() const
void compress() const
Definition: index_set.h:1428
std::size_t memory_consumption() const
Definition: partitioner.cc:401
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1211
bool is_contiguous() const
Definition: index_set.h:1542
T min(const T &t, const MPI_Comm &mpi_communicator)
std::vector< std::pair< unsigned int, unsigned int > > import_targets_data
Definition: partitioner.h:346
std::vector< std::pair< unsigned int, unsigned int > > ghost_targets_data
Definition: partitioner.h:326
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:73
static ::ExceptionBase & ExcNotImplemented()
std::pair< types::global_dof_index, types::global_dof_index > local_range_data
Definition: partitioner.h:308
const types::global_dof_index invalid_dof_index
Definition: types.h:184
bool job_supports_mpi()
Definition: mpi.cc:519
unsigned int n_import_indices_data
Definition: partitioner.h:340
size_type n_elements() const
Definition: index_set.h:1560
void set_ghost_indices(const IndexSet &ghost_indices)
Definition: partitioner.cc:142
static ::ExceptionBase & ExcInternalError()