Reference documentation for deal.II version 8.5.1
sparsity_tools.cc
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2008 - 2016 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE at
12 // the top level of the deal.II distribution.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #include <deal.II/base/exceptions.h>
18 #include <deal.II/lac/exceptions.h>
19 #include <deal.II/lac/sparsity_pattern.h>
20 #include <deal.II/lac/sparsity_tools.h>
21 
22 #include <algorithm>
23 #include <functional>
24 #include <set>
25 
26 #ifdef DEAL_II_WITH_MPI
27 #include <deal.II/base/utilities.h>
28 #include <deal.II/base/mpi.h>
29 #include <deal.II/lac/dynamic_sparsity_pattern.h>
30 #include <deal.II/lac/block_sparsity_pattern.h>
31 #endif
32 
33 #ifdef DEAL_II_WITH_METIS
34 DEAL_II_DISABLE_EXTRA_DIAGNOSTICS
35 extern "C"
36 {
37 #include <metis.h>
38 }
39 DEAL_II_ENABLE_EXTRA_DIAGNOSTICS
40 #endif
41 
42 
43 DEAL_II_NAMESPACE_OPEN
44 
45 namespace SparsityTools
46 {
47 
48  void partition (const SparsityPattern &sparsity_pattern,
49  const unsigned int n_partitions,
50  std::vector<unsigned int> &partition_indices)
51  {
52  Assert (sparsity_pattern.n_rows()==sparsity_pattern.n_cols(),
53  ExcNotQuadratic());
54  Assert (sparsity_pattern.is_compressed(),
56 
57  Assert (n_partitions > 0, ExcInvalidNumberOfPartitions(n_partitions));
58  Assert (partition_indices.size() == sparsity_pattern.n_rows(),
59  ExcInvalidArraySize (partition_indices.size(),
60  sparsity_pattern.n_rows()));
61 
62  // check for an easy return
63  if (n_partitions == 1 || (sparsity_pattern.n_rows()==1))
64  {
65  std::fill_n (partition_indices.begin(), partition_indices.size(), 0U);
66  return;
67  }
68 
69  // Make sure that METIS is actually
70  // installed and detected
71 #ifndef DEAL_II_WITH_METIS
72  (void)sparsity_pattern;
74 #else
75 
76  // generate the data structures for
77  // METIS. Note that this is particularly
78  // simple, since METIS wants exactly our
79  // compressed row storage format. we only
80  // have to set up a few auxiliary arrays
81  idx_t
82  n = static_cast<signed int>(sparsity_pattern.n_rows()),
83  ncon = 1, // number of balancing constraints (should be >0)
84  nparts = static_cast<int>(n_partitions), // number of subdomains to create
85  dummy; // the numbers of edges cut by the
86  // resulting partition
87 
88  // We can not partition n items into more than n parts. METIS will
89  // generate non-sensical output (everything is owned by a single process)
90  // and complain with a message (but won't return an error code!):
91  // ***Cannot bisect a graph with 0 vertices!
92  // ***You are trying to partition a graph into too many parts!
93  nparts = std::min(n, nparts);
94 
95  // use default options for METIS
96  idx_t options[METIS_NOPTIONS];
97  METIS_SetDefaultOptions (options);
98 
99  // one more nuisance: we have to copy our own data to arrays that store
100  // signed integers :-(
101  std::vector<idx_t> int_rowstart(1);
102  int_rowstart.reserve(sparsity_pattern.n_rows()+1);
103  std::vector<idx_t> int_colnums;
104  int_colnums.reserve(sparsity_pattern.n_nonzero_elements());
105  for (SparsityPattern::size_type row=0; row<sparsity_pattern.n_rows(); ++row)
106  {
107  for (SparsityPattern::iterator col=sparsity_pattern.begin(row);
108  col < sparsity_pattern.end(row); ++col)
109  int_colnums.push_back(col->column());
110  int_rowstart.push_back(int_colnums.size());
111  }
112 
113  std::vector<idx_t> int_partition_indices (sparsity_pattern.n_rows());
114 
115  // Make use of METIS' error code.
116  int ierr;
117 
118  // Select which type of partitioning to create
119 
120  // Use recursive if the number of partitions is less than or equal to 8
121  if (nparts <= 8)
122  ierr = METIS_PartGraphRecursive(&n, &ncon, &int_rowstart[0], &int_colnums[0],
123  NULL, NULL, NULL,
124  &nparts,NULL,NULL,&options[0],
125  &dummy,&int_partition_indices[0]);
126 
127  // Otherwise use kway
128  else
129  ierr = METIS_PartGraphKway(&n, &ncon, &int_rowstart[0], &int_colnums[0],
130  NULL, NULL, NULL,
131  &nparts,NULL,NULL,&options[0],
132  &dummy,&int_partition_indices[0]);
133 
134  // If metis returns normally, an error code METIS_OK=1 is returned from
135  // the above functions (see metish.h)
136  AssertThrow (ierr == 1, ExcMETISError (ierr));
137 
138  // now copy back generated indices into the output array
139  std::copy (int_partition_indices.begin(),
140  int_partition_indices.end(),
141  partition_indices.begin());
142 #endif
143  }
144 
145 
146  namespace internal
147  {
154  find_unnumbered_starting_index (const DynamicSparsityPattern &sparsity,
155  const std::vector<DynamicSparsityPattern::size_type> &new_indices)
156  {
158  DynamicSparsityPattern::size_type min_coordination = sparsity.n_rows();
159  for (DynamicSparsityPattern::size_type row=0; row<sparsity.n_rows(); ++row)
160  // look over all as-yet unnumbered indices
161  if (new_indices[row] == numbers::invalid_size_type)
162  {
163  if (sparsity.row_length(row) < min_coordination)
164  {
165  min_coordination = sparsity.row_length(row);
166  starting_point = row;
167  }
168  }
169 
170  // now we still have to care for the case that no unnumbered dof has a
171  // coordination number less than sparsity.n_rows(). this rather exotic
172  // case only happens if we only have one cell, as far as I can see,
173  // but there may be others as well.
174  //
175  // if that should be the case, we can chose an arbitrary dof as
176  // starting point, e.g. the first unnumbered one
177  if (starting_point == numbers::invalid_size_type)
178  {
179  for (DynamicSparsityPattern::size_type i=0; i<new_indices.size(); ++i)
180  if (new_indices[i] == numbers::invalid_size_type)
181  {
182  starting_point = i;
183  break;
184  }
185 
186  Assert (starting_point != numbers::invalid_size_type,
187  ExcInternalError());
188  }
189 
190  return starting_point;
191  }
192  }
193 
194 
195 
196  void
198  std::vector<DynamicSparsityPattern::size_type> &new_indices,
199  const std::vector<DynamicSparsityPattern::size_type> &starting_indices)
200  {
201  Assert (sparsity.n_rows() == sparsity.n_cols(),
202  ExcDimensionMismatch (sparsity.n_rows(), sparsity.n_cols()));
203  Assert (sparsity.n_rows() == new_indices.size(),
204  ExcDimensionMismatch (sparsity.n_rows(), new_indices.size()));
205  Assert (starting_indices.size() <= sparsity.n_rows(),
206  ExcMessage ("You can't specify more starting indices than there are rows"));
207  Assert (sparsity.row_index_set().size() == 0 ||
208  sparsity.row_index_set().size() == sparsity.n_rows(),
209  ExcMessage("Only valid for sparsity patterns which store all rows."));
210  for (SparsityPattern::size_type i=0; i<starting_indices.size(); ++i)
211  Assert (starting_indices[i] < sparsity.n_rows(),
212  ExcMessage ("Invalid starting index: All starting indices need "
213  "to be between zero and the number of rows in the "
214  "sparsity pattern."));
215 
216  // store the indices of the dofs renumbered in the last round. Default to
217  // starting points
218  std::vector<DynamicSparsityPattern::size_type> last_round_dofs (starting_indices);
219 
220  // initialize the new_indices array with invalid values
221  std::fill (new_indices.begin(), new_indices.end(),
223 
224  // if no starting indices were given: find dof with lowest coordination number
225  if (last_round_dofs.empty())
226  last_round_dofs
227  .push_back (internal::find_unnumbered_starting_index (sparsity,
228  new_indices));
229 
230  // store next free dof index
231  DynamicSparsityPattern::size_type next_free_number = 0;
232 
233  // enumerate the first round dofs
234  for (DynamicSparsityPattern::size_type i=0; i!=last_round_dofs.size(); ++i)
235  new_indices[last_round_dofs[i]] = next_free_number++;
236 
237  // now do as many steps as needed to renumber all dofs
238  while (true)
239  {
240  // store the indices of the dofs to be renumbered in the next round
241  std::vector<DynamicSparsityPattern::size_type> next_round_dofs;
242 
243  // find all neighbors of the dofs numbered in the last round
244  for (DynamicSparsityPattern::size_type i=0; i<last_round_dofs.size(); ++i)
245  for (DynamicSparsityPattern::iterator j=sparsity.begin(last_round_dofs[i]);
246  j<sparsity.end(last_round_dofs[i]); ++j)
247  next_round_dofs.push_back (j->column());
248 
249  // sort dof numbers
250  std::sort (next_round_dofs.begin(), next_round_dofs.end());
251 
252  // delete multiple entries
253  std::vector<DynamicSparsityPattern::size_type>::iterator end_sorted;
254  end_sorted = std::unique (next_round_dofs.begin(), next_round_dofs.end());
255  next_round_dofs.erase (end_sorted, next_round_dofs.end());
256 
257  // eliminate dofs which are already numbered
258  for (int s=next_round_dofs.size()-1; s>=0; --s)
259  if (new_indices[next_round_dofs[s]] != numbers::invalid_size_type)
260  next_round_dofs.erase (next_round_dofs.begin() + s);
261 
262  // check whether there are any new dofs in the list. if there are
263  // none, then we have completely numbered the current component of the
264  // graph. check if there are as yet unnumbered components of the graph
265  // that we would then have to do next
266  if (next_round_dofs.empty())
267  {
268  if (std::find (new_indices.begin(), new_indices.end(),
270  ==
271  new_indices.end())
272  // no unnumbered indices, so we can leave now
273  break;
274 
275  // otherwise find a valid starting point for the next component of
276  // the graph and continue with numbering that one. we only do so
277  // if no starting indices were provided by the user (see the
278  // documentation of this function) so produce an error if we got
279  // here and starting indices were given
280  Assert (starting_indices.empty(),
281  ExcMessage ("The input graph appears to have more than one "
282  "component, but as stated in the documentation "
283  "we only want to reorder such graphs if no "
284  "starting indices are given. The function was "
285  "called with starting indices, however."))
286 
287  next_round_dofs
288  .push_back (internal::find_unnumbered_starting_index (sparsity,
289  new_indices));
290  }
291 
292 
293 
294  // store for each coordination number the dofs with these coordination
295  // number
296  std::multimap<DynamicSparsityPattern::size_type, int> dofs_by_coordination;
297 
298  // find coordination number for each of these dofs
299  for (std::vector<DynamicSparsityPattern::size_type>::iterator s=next_round_dofs.begin();
300  s!=next_round_dofs.end(); ++s)
301  {
302  const DynamicSparsityPattern::size_type coordination = sparsity.row_length(*s);
303 
304  // insert this dof at its coordination number
305  const std::pair<const DynamicSparsityPattern::size_type, int> new_entry (coordination, *s);
306  dofs_by_coordination.insert (new_entry);
307  }
308 
309  // assign new DoF numbers to the elements of the present front:
310  std::multimap<DynamicSparsityPattern::size_type, int>::iterator i;
311  for (i = dofs_by_coordination.begin(); i!=dofs_by_coordination.end(); ++i)
312  new_indices[i->second] = next_free_number++;
313 
314  // after that: copy this round's dofs for the next round
315  last_round_dofs = next_round_dofs;
316  }
317 
318  // test for all indices numbered. this mostly tests whether the
319  // front-marching-algorithm (which Cuthill-McKee actually is) has reached
320  // all points.
321  Assert ((std::find (new_indices.begin(), new_indices.end(), numbers::invalid_size_type)
322  ==
323  new_indices.end())
324  &&
325  (next_free_number == sparsity.n_rows()),
326  ExcInternalError());
327  }
328 
329 
330 
331  void
333  std::vector<SparsityPattern::size_type> &new_indices,
334  const std::vector<SparsityPattern::size_type> &starting_indices)
335  {
336  DynamicSparsityPattern dsp(sparsity.n_rows(), sparsity.n_cols());
337  for (unsigned int row=0; row<sparsity.n_rows(); ++row)
338  {
339  for (SparsityPattern::iterator it=sparsity.begin(row); it!=sparsity.end(row)
340  && it->is_valid_entry() ; ++it)
341  dsp.add(row, it->column());
342  }
343  reorder_Cuthill_McKee(dsp, new_indices, starting_indices);
344  }
345 
346 
347 
348  namespace internal
349  {
350  void
351  reorder_hierarchical (const DynamicSparsityPattern &connectivity,
352  std::vector<DynamicSparsityPattern::size_type> &renumbering)
353  {
354  AssertDimension (connectivity.n_rows(), connectivity.n_cols());
355  AssertDimension (connectivity.n_rows(), renumbering.size());
356  Assert (connectivity.row_index_set().size() == 0 ||
357  connectivity.row_index_set().size() == connectivity.n_rows(),
358  ExcMessage("Only valid for sparsity patterns which store all rows."));
359 
360  std::vector<types::global_dof_index> touched_nodes(connectivity.n_rows(),
362  std::vector<unsigned int> row_lengths(connectivity.n_rows());
363  std::set<types::global_dof_index> current_neighbors;
364  std::vector<std::vector<types::global_dof_index> > groups;
365 
366  // First collect the number of neighbors for each node. We use this
367  // field to find next nodes with the minimum number of non-touched
368  // neighbors in the field n_remaining_neighbors, so we will count down
369  // on this field. We also cache the row lengths because we need this
370  // data frequently and getting it from the sparsity pattern is more
371  // expensive.
372  for (types::global_dof_index row=0; row<connectivity.n_rows(); ++row)
373  {
374  row_lengths[row] = connectivity.row_length(row);
375  Assert(row_lengths[row] > 0, ExcInternalError());
376  }
377  std::vector<unsigned int> n_remaining_neighbors(row_lengths);
378 
379  // This outer loop is typically traversed only once, unless the global
380  // graph is not connected
381  while (true)
382  {
383  // Find cell with the minimal number of neighbors (typically a
384  // corner node when based on FEM meshes). If no cell is left, we are
385  // done. Together with the outer while loop, this loop can possibly
386  // be of quadratic complexity in the number of disconnected
387  // partitions, i.e. up to connectivity.n_rows() in the worst case,
388  // but that is not the usual use case of this loop and thus not
389  // optimized for.
390  std::pair<types::global_dof_index,types::global_dof_index> min_neighbors
392  for (types::global_dof_index i=0; i<touched_nodes.size(); ++i)
393  if (touched_nodes[i] == numbers::invalid_dof_index)
394  if (row_lengths[i] < min_neighbors.second)
395  {
396  min_neighbors = std::make_pair(i, n_remaining_neighbors[i]);
397  if (n_remaining_neighbors[i] <= 1)
398  break;
399  }
400  if (min_neighbors.first == numbers::invalid_dof_index)
401  break;
402 
403  Assert(min_neighbors.second > 0, ExcInternalError());
404 
405  current_neighbors.clear();
406  current_neighbors.insert(min_neighbors.first);
407  while (!current_neighbors.empty())
408  {
409  // Find node with minimum number of untouched neighbors among the
410  // next set of possible neighbors
411  min_neighbors = std::make_pair (numbers::invalid_dof_index,
413  for (std::set<types::global_dof_index>::iterator it=current_neighbors.begin();
414  it != current_neighbors.end(); ++it)
415  {
416  Assert (touched_nodes[*it] == numbers::invalid_dof_index,
417  ExcInternalError());
418  if (n_remaining_neighbors[*it] < min_neighbors.second)
419  min_neighbors = std::make_pair(*it, n_remaining_neighbors[*it]);
420  }
421 
422  // Among the set of nodes with the minimal number of neighbors,
423  // choose the one with the largest number of touched neighbors,
424  // i.e., the one with the largest row length
425  const types::global_dof_index best_row_length = min_neighbors.second;
426  for (std::set<types::global_dof_index>::iterator it=current_neighbors.begin();
427  it != current_neighbors.end(); ++it)
428  if (n_remaining_neighbors[*it] == best_row_length)
429  if (row_lengths[*it] > min_neighbors.second)
430  min_neighbors = std::make_pair(*it, row_lengths[*it]);
431 
432  // Add the pivot and all direct neighbors of the pivot node not
433  // yet touched to the list of new entries.
434  groups.push_back(std::vector<types::global_dof_index>());
435  std::vector<types::global_dof_index> &next_group = groups.back();
436 
437  next_group.push_back(min_neighbors.first);
438  touched_nodes[min_neighbors.first] = groups.size()-1;
440  = connectivity.begin(min_neighbors.first);
441  it != connectivity.end(min_neighbors.first); ++it)
442  if (touched_nodes[it->column()] == numbers::invalid_dof_index)
443  {
444  next_group.push_back(it->column());
445  touched_nodes[it->column()] = groups.size()-1;
446  }
447 
448  // Add all neighbors of the current list not yet touched to the
449  // set of possible next pivots. The added node is no longer a
450  // valid neighbor (here we assume symmetry of the
451  // connectivity). Delete the entries of the current list from
452  // the set of possible next pivots.
453  for (unsigned int i=0; i<next_group.size(); ++i)
454  {
456  = connectivity.begin(next_group[i]);
457  it != connectivity.end(next_group[i]); ++it)
458  {
459  if (touched_nodes[it->column()] == numbers::invalid_dof_index)
460  current_neighbors.insert(it->column());
461  n_remaining_neighbors[it->column()]--;
462  }
463  current_neighbors.erase(next_group[i]);
464  }
465  }
466  }
467 
468  // Sanity check: for all nodes, there should not be any neighbors left
469  for (types::global_dof_index row=0; row<connectivity.n_rows(); ++row)
470  Assert(n_remaining_neighbors[row] == 0, ExcInternalError());
471 
472  // If the number of groups is smaller than the number of nodes, we
473  // continue by recursively calling this method
474  if (groups.size() < connectivity.n_rows())
475  {
476  // Form the connectivity of the groups
477  DynamicSparsityPattern connectivity_next(groups.size(),
478  groups.size());
479  for (types::global_dof_index i=0; i<groups.size(); ++i)
480  for (types::global_dof_index col=0; col<groups[i].size(); ++col)
482  = connectivity.begin(groups[i][col]);
483  it != connectivity.end(groups[i][col]); ++it)
484  connectivity_next.add(i, touched_nodes[it->column()]);
485 
486  // Recursively call the reordering
487  std::vector<types::global_dof_index> renumbering_next(groups.size());
488  reorder_hierarchical(connectivity_next, renumbering_next);
489 
490  // Renumber the indices group by group according to the incoming
491  // ordering for the groups
492  for (types::global_dof_index i=0,count=0; i<groups.size(); ++i)
493  for (types::global_dof_index col=0; col<groups[renumbering_next[i]].size(); ++col, ++count)
494  renumbering[count] = groups[renumbering_next[i]][col];
495  }
496  else
497  {
498  // All groups should have size one and no more recursion is possible,
499  // so use the numbering of the groups
500  for (types::global_dof_index i=0,count=0; i<groups.size(); ++i)
501  for (types::global_dof_index col=0; col<groups[i].size(); ++col, ++count)
502  renumbering[count] = groups[i][col];
503  }
504  }
505  }
506 
507  void
509  std::vector<DynamicSparsityPattern::size_type> &renumbering)
510  {
511  // the internal renumbering keeps the numbering the wrong way around (but
512  // we cannot invert the numbering inside that method because it is used
513  // recursively), so invert it here
514  internal::reorder_hierarchical(connectivity, renumbering);
515  renumbering = Utilities::invert_permutation(renumbering);
516  }
517 
518 
519 
520 #ifdef DEAL_II_WITH_MPI
523  const std::vector<DynamicSparsityPattern::size_type> &rows_per_cpu,
524  const MPI_Comm &mpi_comm,
525  const IndexSet &myrange)
526  {
527  const unsigned int myid = Utilities::MPI::this_mpi_process(mpi_comm);
528  std::vector<DynamicSparsityPattern::size_type> start_index(rows_per_cpu.size()+1);
529  start_index[0]=0;
530  for (DynamicSparsityPattern::size_type i=0; i<rows_per_cpu.size(); ++i)
531  start_index[i+1]=start_index[i]+rows_per_cpu[i];
532 
533  typedef std::map<DynamicSparsityPattern::size_type,
534  std::vector<DynamicSparsityPattern::size_type> >
535  map_vec_t;
536 
537  map_vec_t send_data;
538 
539  {
540  unsigned int dest_cpu=0;
541 
542  DynamicSparsityPattern::size_type n_local_rel_rows = myrange.n_elements();
543  for (DynamicSparsityPattern::size_type row_idx=0; row_idx<n_local_rel_rows; ++row_idx)
544  {
546 
547  //calculate destination CPU
548  while (row>=start_index[dest_cpu+1])
549  ++dest_cpu;
550 
551  //skip myself
552  if (dest_cpu==myid)
553  {
554  row_idx+=rows_per_cpu[myid]-1;
555  continue;
556  }
557 
559 
560  //skip empty lines
561  if (!rlen)
562  continue;
563 
564  //save entries
565  std::vector<DynamicSparsityPattern::size_type> &dst = send_data[dest_cpu];
566 
567  dst.push_back(rlen); // number of entries
568  dst.push_back(row); // row index
569  for (DynamicSparsityPattern::size_type c=0; c<rlen; ++c)
570  {
571  //columns
573  dst.push_back(column);
574  }
575  }
576 
577  }
578 
579  unsigned int num_receive=0;
580  {
581  std::vector<unsigned int> send_to;
582  send_to.reserve(send_data.size());
583  for (map_vec_t::iterator it=send_data.begin(); it!=send_data.end(); ++it)
584  send_to.push_back(it->first);
585 
586  num_receive =
588  compute_point_to_point_communication_pattern(mpi_comm, send_to).size();
589  }
590 
591  std::vector<MPI_Request> requests(send_data.size());
592 
593 
594  // send data
595  {
596  unsigned int idx=0;
597  for (map_vec_t::iterator it=send_data.begin(); it!=send_data.end(); ++it, ++idx)
598  {
599  const int ierr = MPI_Isend(&(it->second[0]),
600  it->second.size(),
601  DEAL_II_DOF_INDEX_MPI_TYPE,
602  it->first,
603  124,
604  mpi_comm,
605  &requests[idx]);
606  AssertThrowMPI(ierr);
607  }
608  }
609 
610  {
611  //receive
612  std::vector<DynamicSparsityPattern::size_type> recv_buf;
613  for (unsigned int index=0; index<num_receive; ++index)
614  {
615  MPI_Status status;
616  int len;
617  int ierr = MPI_Probe(MPI_ANY_SOURCE, MPI_ANY_TAG, mpi_comm, &status);
618  AssertThrowMPI(ierr);
619  Assert (status.MPI_TAG==124, ExcInternalError());
620 
621  ierr = MPI_Get_count(&status, DEAL_II_DOF_INDEX_MPI_TYPE, &len);
622  AssertThrowMPI(ierr);
623  recv_buf.resize(len);
624  ierr = MPI_Recv(&recv_buf[0], len, DEAL_II_DOF_INDEX_MPI_TYPE, status.MPI_SOURCE,
625  status.MPI_TAG, mpi_comm, &status);
626  AssertThrowMPI(ierr);
627 
628  std::vector<DynamicSparsityPattern::size_type>::const_iterator ptr = recv_buf.begin();
629  std::vector<DynamicSparsityPattern::size_type>::const_iterator end = recv_buf.end();
630  while (ptr!=end)
631  {
633  Assert(ptr!=end, ExcInternalError());
635  for (unsigned int c=0; c<num; ++c)
636  {
637  Assert(ptr!=end, ExcInternalError());
638  dsp.add(row, *ptr);
639  ++ptr;
640  }
641  }
642  Assert(ptr==end, ExcInternalError());
643  }
644  }
645 
646  // complete all sends, so that we can safely destroy the buffers.
647  if (requests.size())
648  {
649  const int ierr = MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
650  AssertThrowMPI(ierr);
651  }
652 
653  }
654 
656  const std::vector<IndexSet> &owned_set_per_cpu,
657  const MPI_Comm &mpi_comm,
658  const IndexSet &myrange)
659  {
660  const unsigned int myid = Utilities::MPI::this_mpi_process(mpi_comm);
661 
663  std::vector<BlockDynamicSparsityPattern::size_type> >
664  map_vec_t;
665  map_vec_t send_data;
666 
667  {
668  unsigned int dest_cpu=0;
669 
670  BlockDynamicSparsityPattern::size_type n_local_rel_rows = myrange.n_elements();
671  for (BlockDynamicSparsityPattern::size_type row_idx=0; row_idx<n_local_rel_rows; ++row_idx)
672  {
674 
675  // calculate destination CPU, note that we start the search
676  // at last destination cpu, because even if the owned ranges
677  // are not contiguous, they hopefully consist of large blocks
678  while (!owned_set_per_cpu[dest_cpu].is_element(row))
679  {
680  ++dest_cpu;
681  if (dest_cpu==owned_set_per_cpu.size()) // wrap around
682  dest_cpu=0;
683  }
684 
685  //skip myself
686  if (dest_cpu==myid)
687  continue;
688 
690 
691  //skip empty lines
692  if (!rlen)
693  continue;
694 
695  //save entries
696  std::vector<BlockDynamicSparsityPattern::size_type> &dst = send_data[dest_cpu];
697 
698  dst.push_back(rlen); // number of entries
699  dst.push_back(row); // row index
700  for (BlockDynamicSparsityPattern::size_type c=0; c<rlen; ++c)
701  {
702  //columns
704  dst.push_back(column);
705  }
706  }
707 
708  }
709 
710  unsigned int num_receive=0;
711  {
712  std::vector<unsigned int> send_to;
713  send_to.reserve(send_data.size());
714  for (map_vec_t::iterator it=send_data.begin(); it!=send_data.end(); ++it)
715  send_to.push_back(it->first);
716 
717  num_receive =
719  compute_point_to_point_communication_pattern(mpi_comm, send_to).size();
720  }
721 
722  std::vector<MPI_Request> requests(send_data.size());
723 
724 
725  // send data
726  {
727  unsigned int idx=0;
728  for (map_vec_t::iterator it=send_data.begin(); it!=send_data.end(); ++it, ++idx)
729  {
730  const int ierr = MPI_Isend(&(it->second[0]),
731  it->second.size(),
732  DEAL_II_DOF_INDEX_MPI_TYPE,
733  it->first,
734  124,
735  mpi_comm,
736  &requests[idx]);
737  AssertThrowMPI(ierr);
738  }
739  }
740 
741  {
742  //receive
743  std::vector<BlockDynamicSparsityPattern::size_type> recv_buf;
744  for (unsigned int index=0; index<num_receive; ++index)
745  {
746  MPI_Status status;
747  int len;
748  int ierr = MPI_Probe(MPI_ANY_SOURCE, MPI_ANY_TAG, mpi_comm, &status);
749  AssertThrowMPI(ierr);
750  Assert (status.MPI_TAG==124, ExcInternalError());
751 
752  ierr = MPI_Get_count(&status, DEAL_II_DOF_INDEX_MPI_TYPE, &len);
753  AssertThrowMPI(ierr);
754  recv_buf.resize(len);
755  ierr = MPI_Recv(&recv_buf[0], len, DEAL_II_DOF_INDEX_MPI_TYPE, status.MPI_SOURCE,
756  status.MPI_TAG, mpi_comm, &status);
757  AssertThrowMPI(ierr);
758 
759  std::vector<BlockDynamicSparsityPattern::size_type>::const_iterator ptr = recv_buf.begin();
760  std::vector<BlockDynamicSparsityPattern::size_type>::const_iterator end = recv_buf.end();
761  while (ptr!=end)
762  {
764  Assert(ptr!=end, ExcInternalError());
766  for (unsigned int c=0; c<num; ++c)
767  {
768  Assert(ptr!=end, ExcInternalError());
769  dsp.add(row, *ptr);
770  ++ptr;
771  }
772  }
773  Assert(ptr==end, ExcInternalError());
774  }
775  }
776 
777  // complete all sends, so that we can safely destroy the buffers.
778  if (requests.size())
779  {
780  const int ierr = MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
781  AssertThrowMPI(ierr);
782  }
783  }
784 #endif
785 }
786 
787 DEAL_II_NAMESPACE_CLOSE
const types::global_dof_index invalid_size_type
Definition: types.h:179
size_type column_number(const size_type row, const unsigned int index) const
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1146
std::size_t n_nonzero_elements() const
size_type nth_index_in_set(const unsigned int local_index) const
Definition: index_set.h:1612
size_type column_number(const size_type row, const size_type index) const
void add(const size_type i, const size_type j)
static ::ExceptionBase & ExcMETISNotInstalled()
void reorder_Cuthill_McKee(const DynamicSparsityPattern &sparsity, std::vector< DynamicSparsityPattern::size_type > &new_indices, const std::vector< DynamicSparsityPattern::size_type > &starting_indices=std::vector< DynamicSparsityPattern::size_type >())
iterator end() const
#define AssertThrow(cond, exc)
Definition: exceptions.h:369
size_type n_cols() const
bool is_compressed() const
types::global_dof_index size_type
size_type size() const
Definition: index_set.h:1419
size_type n_rows() const
static ::ExceptionBase & ExcNotCompressed()
static ::ExceptionBase & ExcMessage(std::string arg1)
void partition(const SparsityPattern &sparsity_pattern, const unsigned int n_partitions, std::vector< unsigned int > &partition_indices)
static ::ExceptionBase & ExcInvalidNumberOfPartitions(int arg1)
void add(const size_type i, const size_type j)
void distribute_sparsity_pattern(DynamicSparsityPattern &dsp, const std::vector< DynamicSparsityPattern::size_type > &rows_per_cpu, const MPI_Comm &mpi_comm, const IndexSet &myrange)
unsigned int global_dof_index
Definition: types.h:88
#define Assert(cond, exc)
Definition: exceptions.h:313
static ::ExceptionBase & ExcInvalidArraySize(int arg1, int arg2)
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
const IndexSet & row_index_set() const
std::vector< unsigned int > invert_permutation(const std::vector< unsigned int > &permutation)
Definition: utilities.cc:486
static ::ExceptionBase & ExcNotQuadratic()
void reorder_hierarchical(const DynamicSparsityPattern &sparsity, std::vector< DynamicSparsityPattern::size_type > &new_indices)
types::global_dof_index size_type
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1211
size_type row_length(const size_type row) const
unsigned int row_length(const size_type row) const
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:73
iterator begin() const
const types::global_dof_index invalid_dof_index
Definition: types.h:184
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:93
size_type n_elements() const
Definition: index_set.h:1560
static ::ExceptionBase & ExcMETISError(int arg1)
static ::ExceptionBase & ExcInternalError()