deal.II version GIT relicensing-3540-g7552a02177 2025-06-20 13:50:00+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
sparsity_tools.cc
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2008 - 2024 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
15
17
21
22#include <algorithm>
23#include <functional>
24#include <memory>
25#include <set>
26
27#ifdef DEAL_II_WITH_MPI
28# include <deal.II/base/mpi.h>
30
33#endif
34
35#ifdef DEAL_II_WITH_METIS
36extern "C"
37{
38# include <metis.h>
39}
40#endif
41
42#ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
43# include <zoltan_cpp.h>
44#endif
45
46#include <string>
47
48
50
51namespace SparsityTools
52{
53 namespace
54 {
55 void
56 partition_metis(const SparsityPattern &sparsity_pattern,
57 const std::vector<unsigned int> &cell_weights,
58 const unsigned int n_partitions,
59 std::vector<unsigned int> &partition_indices)
60 {
61 // Make sure that METIS is actually
62 // installed and detected
63#ifndef DEAL_II_WITH_METIS
64 (void)sparsity_pattern;
65 (void)cell_weights;
66 (void)n_partitions;
67 (void)partition_indices;
69#else
70
71 // Generate the data structures for METIS. Note that this is particularly
72 // simple, since METIS wants exactly our compressed row storage format.
73 // We only have to set up a few auxiliary arrays and convert from our
74 // unsigned cell weights to signed ones.
75 idx_t n = static_cast<signed int>(sparsity_pattern.n_rows());
76
77 idx_t ncon = 1; // number of balancing constraints (should be >0)
78
79 // We can not partition n items into more than n parts. METIS will
80 // generate non-sensical output (everything is owned by a single process)
81 // and complain with a message (but won't return an error code!):
82 // ***Cannot bisect a graph with 0 vertices!
83 // ***You are trying to partition a graph into too many parts!
84 idx_t nparts =
85 std::min(n,
86 static_cast<idx_t>(
87 n_partitions)); // number of subdomains to create
88
89 // use default options for METIS
90 idx_t options[METIS_NOPTIONS];
91 METIS_SetDefaultOptions(options);
92
93 // one more nuisance: we have to copy our own data to arrays that store
94 // signed integers :-(
95 std::vector<idx_t> int_rowstart(1);
96 int_rowstart.reserve(sparsity_pattern.n_rows() + 1);
97 std::vector<idx_t> int_colnums;
98 int_colnums.reserve(sparsity_pattern.n_nonzero_elements());
99 for (SparsityPattern::size_type row = 0; row < sparsity_pattern.n_rows();
100 ++row)
101 {
102 for (SparsityPattern::iterator col = sparsity_pattern.begin(row);
103 col < sparsity_pattern.end(row);
104 ++col)
105 int_colnums.push_back(col->column());
106 int_rowstart.push_back(int_colnums.size());
107 }
108
109 std::vector<idx_t> int_partition_indices(sparsity_pattern.n_rows());
110
111 // Set up cell weighting option
112 std::vector<idx_t> int_cell_weights;
113 if (cell_weights.size() > 0)
114 {
115 Assert(cell_weights.size() == sparsity_pattern.n_rows(),
116 ExcDimensionMismatch(cell_weights.size(),
117 sparsity_pattern.n_rows()));
118 int_cell_weights.resize(cell_weights.size());
119 std::copy(cell_weights.begin(),
120 cell_weights.end(),
121 int_cell_weights.begin());
122 }
123 // Set a pointer to the optional cell weighting information.
124 // METIS expects a null pointer if there are no weights to be considered.
125 idx_t *const p_int_cell_weights =
126 (cell_weights.size() > 0 ? int_cell_weights.data() : nullptr);
127
128
129 // Make use of METIS' error code.
130 int ierr;
131
132 // Select which type of partitioning to create
133
134 // Use recursive if the number of partitions is less than or equal to 8
135 idx_t dummy; // output: # of edges cut by the resulting partition
136 if (nparts <= 8)
137 ierr = METIS_PartGraphRecursive(&n,
138 &ncon,
139 int_rowstart.data(),
140 int_colnums.data(),
141 p_int_cell_weights,
142 nullptr,
143 nullptr,
144 &nparts,
145 nullptr,
146 nullptr,
147 options,
148 &dummy,
149 int_partition_indices.data());
150
151 // Otherwise use kway
152 else
153 ierr = METIS_PartGraphKway(&n,
154 &ncon,
155 int_rowstart.data(),
156 int_colnums.data(),
157 p_int_cell_weights,
158 nullptr,
159 nullptr,
160 &nparts,
161 nullptr,
162 nullptr,
163 options,
164 &dummy,
165 int_partition_indices.data());
166
167 // If metis returns normally, an error code METIS_OK=1 is returned from
168 // the above functions (see metish.h)
169 AssertThrow(ierr == 1, ExcMETISError(ierr));
170
171 // now copy back generated indices into the output array
172 std::copy(int_partition_indices.begin(),
173 int_partition_indices.end(),
174 partition_indices.begin());
175#endif
176 }
177
178
179// Query functions unused if zoltan is not installed
180#ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
181 // Query functions for partition_zoltan
182 int
183 get_number_of_objects(void *data, int *ierr)
184 {
185 SparsityPattern *graph = reinterpret_cast<SparsityPattern *>(data);
186
187 *ierr = ZOLTAN_OK;
188
189 return graph->n_rows();
190 }
191
192
193 void
194 get_object_list(void *data,
195 int /*sizeGID*/,
196 int /*sizeLID*/,
197 ZOLTAN_ID_PTR globalID,
198 ZOLTAN_ID_PTR localID,
199 int /*wgt_dim*/,
200 float * /*obj_wgts*/,
201 int *ierr)
202 {
203 SparsityPattern *graph = reinterpret_cast<SparsityPattern *>(data);
204 *ierr = ZOLTAN_OK;
205
206 Assert(globalID != nullptr, ExcInternalError());
207 Assert(localID != nullptr, ExcInternalError());
208
209 // set global degrees of freedom
210 auto n_dofs = graph->n_rows();
211
212 for (unsigned int i = 0; i < n_dofs; ++i)
213 {
214 globalID[i] = i;
215 localID[i] = i; // Same as global ids.
216 }
217 }
218
219
220 void
221 get_num_edges_list(void *data,
222 int /*sizeGID*/,
223 int /*sizeLID*/,
224 int num_obj,
225 ZOLTAN_ID_PTR globalID,
226 ZOLTAN_ID_PTR /*localID*/,
227 int *numEdges,
228 int *ierr)
229 {
230 SparsityPattern *graph = reinterpret_cast<SparsityPattern *>(data);
231
232 *ierr = ZOLTAN_OK;
233
234 Assert(numEdges != nullptr, ExcInternalError());
235
236 for (int i = 0; i < num_obj; ++i)
237 {
238 if (graph->exists(i, i)) // Check if diagonal element is present
239 numEdges[i] = graph->row_length(globalID[i]) - 1;
240 else
241 numEdges[i] = graph->row_length(globalID[i]);
242 }
243 }
244
245
246
247 void
248 get_edge_list(void *data,
249 int /*sizeGID*/,
250 int /*sizeLID*/,
251 int num_obj,
252 ZOLTAN_ID_PTR /*globalID*/,
253 ZOLTAN_ID_PTR /*localID*/,
254 int * /*num_edges*/,
255 ZOLTAN_ID_PTR nborGID,
256 int *nborProc,
257 int /*wgt_dim*/,
258 float * /*ewgts*/,
259 int *ierr)
260 {
261 SparsityPattern *graph = reinterpret_cast<SparsityPattern *>(data);
262 *ierr = ZOLTAN_OK;
263
264 ZOLTAN_ID_PTR nextNborGID = nborGID;
265 int *nextNborProc = nborProc;
266
267 // Loop through rows corresponding to indices in globalID implicitly
269 i < static_cast<SparsityPattern::size_type>(num_obj);
270 ++i)
271 {
272 // Loop through each column to find neighbours
273 for (SparsityPattern::iterator col = graph->begin(i);
274 col < graph->end(i);
275 ++col)
276 // Ignore diagonal entries. Not needed for partitioning.
277 if (i != col->column())
278 {
279 Assert(nextNborGID != nullptr, ExcInternalError());
280 Assert(nextNborProc != nullptr, ExcInternalError());
281
282 *nextNborGID++ = col->column();
283 *nextNborProc++ = 0; // All the vertices on processor 0
284 }
285 }
286 }
287#endif
288
289
290 void
291 partition_zoltan(const SparsityPattern &sparsity_pattern,
292 const std::vector<unsigned int> &cell_weights,
293 const unsigned int n_partitions,
294 std::vector<unsigned int> &partition_indices)
295 {
296 // Make sure that ZOLTAN is actually
297 // installed and detected
298#ifndef DEAL_II_TRILINOS_WITH_ZOLTAN
299 (void)sparsity_pattern;
300 (void)cell_weights;
301 (void)n_partitions;
302 (void)partition_indices;
304#else
305
306 Assert(
307 cell_weights.empty(),
309 "The cell weighting functionality for Zoltan has not yet been implemented."));
310
311 // MPI environment must have been initialized by this point.
312 std::unique_ptr<Zoltan> zz = std::make_unique<Zoltan>(MPI_COMM_SELF);
313
314 // General parameters
315 // DEBUG_LEVEL call must precede the call to LB_METHOD
316 zz->Set_Param("DEBUG_LEVEL", "0"); // set level of debug info
317 zz->Set_Param(
318 "LB_METHOD",
319 "GRAPH"); // graph based partition method (LB-load balancing)
320 zz->Set_Param("NUM_LOCAL_PARTS",
321 std::to_string(n_partitions)); // set number of partitions
322
323 // The PHG partitioner is a hypergraph partitioner that Zoltan could use
324 // for graph partitioning.
325 // If number of vertices in hyperedge divided by total vertices in
326 // hypergraph exceeds PHG_EDGE_SIZE_THRESHOLD,
327 // then the hyperedge will be omitted as such (dense) edges will likely
328 // incur high communication costs regardless of the partition.
329 // PHG_EDGE_SIZE_THRESHOLD value is raised to 0.5 from the default
330 // value of 0.25 so that the PHG partitioner doesn't throw warning saying
331 // "PHG_EDGE_SIZE_THRESHOLD is low ..." after removing all dense edges.
332 // For instance, in two dimensions if the triangulation being partitioned
333 // is two quadrilaterals sharing an edge and if PHG_EDGE_SIZE_THRESHOLD
334 // value is set to 0.25, PHG will remove all the edges throwing the
335 // above warning.
336 zz->Set_Param("PHG_EDGE_SIZE_THRESHOLD", "0.5");
337
338 // Need a non-const object equal to sparsity_pattern
339 SparsityPattern graph;
340 graph.copy_from(sparsity_pattern);
341
342 // Set query functions
343 zz->Set_Num_Obj_Fn(get_number_of_objects, &graph);
344 zz->Set_Obj_List_Fn(get_object_list, &graph);
345 zz->Set_Num_Edges_Multi_Fn(get_num_edges_list, &graph);
346 zz->Set_Edge_List_Multi_Fn(get_edge_list, &graph);
347
348 // Variables needed by partition function
349 int changes = 0;
350 int num_gid_entries = 1;
351 int num_lid_entries = 1;
352 int num_import = 0;
353 ZOLTAN_ID_PTR import_global_ids = nullptr;
354 ZOLTAN_ID_PTR import_local_ids = nullptr;
355 int *import_procs = nullptr;
356 int *import_to_part = nullptr;
357 int num_export = 0;
358 ZOLTAN_ID_PTR export_global_ids = nullptr;
359 ZOLTAN_ID_PTR export_local_ids = nullptr;
360 int *export_procs = nullptr;
361 int *export_to_part = nullptr;
362
363 // call partitioner
364 const int rc = zz->LB_Partition(changes,
365 num_gid_entries,
366 num_lid_entries,
367 num_import,
368 import_global_ids,
369 import_local_ids,
370 import_procs,
371 import_to_part,
372 num_export,
373 export_global_ids,
374 export_local_ids,
375 export_procs,
376 export_to_part);
377
378 // check for error code in partitioner
379 Assert(rc == ZOLTAN_OK, ExcInternalError());
380
381 // By default, all indices belong to part 0. After zoltan partition
382 // some are migrated to different part ID, which is stored in
383 // export_to_part array.
384 std::fill(partition_indices.begin(), partition_indices.end(), 0);
385
386 // copy from export_to_part to partition_indices, whose part_ids != 0.
387 Assert(export_to_part != nullptr, ExcInternalError());
388 for (int i = 0; i < num_export; ++i)
389 partition_indices[export_local_ids[i]] = export_to_part[i];
390#endif
391 }
392 } // namespace
393
394
395 void
396 partition(const SparsityPattern &sparsity_pattern,
397 const unsigned int n_partitions,
398 std::vector<unsigned int> &partition_indices,
399 const Partitioner partitioner)
400 {
401 std::vector<unsigned int> cell_weights;
402
403 // Call the other more general function
404 partition(sparsity_pattern,
405 cell_weights,
406 n_partitions,
407 partition_indices,
408 partitioner);
409 }
410
411
412 void
413 partition(const SparsityPattern &sparsity_pattern,
414 const std::vector<unsigned int> &cell_weights,
415 const unsigned int n_partitions,
416 std::vector<unsigned int> &partition_indices,
417 const Partitioner partitioner)
418 {
419 Assert(sparsity_pattern.n_rows() == sparsity_pattern.n_cols(),
421 Assert(sparsity_pattern.is_compressed(),
423
424 Assert(n_partitions > 0, ExcInvalidNumberOfPartitions(n_partitions));
425 Assert(partition_indices.size() == sparsity_pattern.n_rows(),
426 ExcInvalidArraySize(partition_indices.size(),
427 sparsity_pattern.n_rows()));
428
429 // check for an easy return
430 if (n_partitions == 1 || (sparsity_pattern.n_rows() == 1))
431 {
432 std::fill_n(partition_indices.begin(), partition_indices.size(), 0U);
433 return;
434 }
435
436 if (partitioner == Partitioner::metis)
437 partition_metis(sparsity_pattern,
438 cell_weights,
439 n_partitions,
440 partition_indices);
441 else if (partitioner == Partitioner::zoltan)
442 partition_zoltan(sparsity_pattern,
443 cell_weights,
444 n_partitions,
445 partition_indices);
446 else
448 }
449
450
451 unsigned int
453 std::vector<unsigned int> &color_indices)
454 {
455 // Make sure that ZOLTAN is actually
456 // installed and detected
457#ifndef DEAL_II_TRILINOS_WITH_ZOLTAN
458 (void)sparsity_pattern;
459 (void)color_indices;
461 return 0;
462#else
463 // coloring algorithm is run in serial by each processor.
464 std::unique_ptr<Zoltan> zz = std::make_unique<Zoltan>(MPI_COMM_SELF);
465
466 // Coloring parameters
467 // DEBUG_LEVEL must precede all other calls
468 zz->Set_Param("DEBUG_LEVEL", "0"); // level of debug info
469 zz->Set_Param("COLORING_PROBLEM", "DISTANCE-1"); // Standard coloring
470 zz->Set_Param("NUM_GID_ENTRIES", "1"); // 1 entry represents global ID
471 zz->Set_Param("NUM_LID_ENTRIES", "1"); // 1 entry represents local ID
472 zz->Set_Param("OBJ_WEIGHT_DIM", "0"); // object weights not used
473 zz->Set_Param("RECOLORING_NUM_OF_ITERATIONS", "0");
474
475 // Zoltan::Color function requires a non-const SparsityPattern object
476 SparsityPattern graph;
477 graph.copy_from(sparsity_pattern);
478
479 // Set query functions required by coloring function
480 zz->Set_Num_Obj_Fn(get_number_of_objects, &graph);
481 zz->Set_Obj_List_Fn(get_object_list, &graph);
482 zz->Set_Num_Edges_Multi_Fn(get_num_edges_list, &graph);
483 zz->Set_Edge_List_Multi_Fn(get_edge_list, &graph);
484
485 // Variables needed by coloring function
486 int num_gid_entries = 1;
487 const int num_objects = graph.n_rows();
488
489 // Preallocate input variables. Element type fixed by ZOLTAN.
490 std::vector<ZOLTAN_ID_TYPE> global_ids(num_objects);
491 std::vector<int> color_exp(num_objects);
492
493 // Set ids for which coloring needs to be done
494 for (int i = 0; i < num_objects; ++i)
495 global_ids[i] = i;
496
497 // Call ZOLTAN coloring algorithm
498 int rc = zz->Color(num_gid_entries,
499 num_objects,
500 global_ids.data(),
501 color_exp.data());
502 // Check for error code
503 Assert(rc == ZOLTAN_OK, ExcInternalError());
504
505 // Allocate and assign color indices
506 color_indices.resize(num_objects);
507 Assert(color_exp.size() == color_indices.size(),
508 ExcDimensionMismatch(color_exp.size(), color_indices.size()));
509
510 std::copy(color_exp.begin(), color_exp.end(), color_indices.begin());
511
512 unsigned int n_colors =
513 *(std::max_element(color_indices.begin(), color_indices.end()));
514 return n_colors;
515#endif
516 }
517
518
519 namespace internal
520 {
528 const DynamicSparsityPattern &sparsity,
529 const std::vector<DynamicSparsityPattern::size_type> &new_indices)
530 {
531 DynamicSparsityPattern::size_type starting_point =
533 DynamicSparsityPattern::size_type min_coordination = sparsity.n_rows();
534 for (DynamicSparsityPattern::size_type row = 0; row < sparsity.n_rows();
535 ++row)
536 // look over all as-yet unnumbered indices
537 if (new_indices[row] == numbers::invalid_size_type)
538 {
539 if (sparsity.row_length(row) < min_coordination)
540 {
541 min_coordination = sparsity.row_length(row);
542 starting_point = row;
543 }
544 }
545
546 // now we still have to care for the case that no unnumbered dof has a
547 // coordination number less than sparsity.n_rows(). this rather exotic
548 // case only happens if we only have one cell, as far as I can see,
549 // but there may be others as well.
550 //
551 // if that should be the case, we can chose an arbitrary dof as
552 // starting point, e.g. the first unnumbered one
553 if (starting_point == numbers::invalid_size_type)
554 {
555 for (DynamicSparsityPattern::size_type i = 0; i < new_indices.size();
556 ++i)
557 if (new_indices[i] == numbers::invalid_size_type)
558 {
559 starting_point = i;
560 break;
561 }
562
563 Assert(starting_point != numbers::invalid_size_type,
565 }
566
567 return starting_point;
568 }
569 } // namespace internal
570
571
572
573 void
575 const DynamicSparsityPattern &sparsity,
576 std::vector<DynamicSparsityPattern::size_type> &new_indices,
577 const std::vector<DynamicSparsityPattern::size_type> &starting_indices)
578 {
579 Assert(sparsity.n_rows() == sparsity.n_cols(),
580 ExcDimensionMismatch(sparsity.n_rows(), sparsity.n_cols()));
581 Assert(sparsity.n_rows() == new_indices.size(),
582 ExcDimensionMismatch(sparsity.n_rows(), new_indices.size()));
583 Assert(starting_indices.size() <= sparsity.n_rows(),
585 "You can't specify more starting indices than there are rows"));
586 Assert(sparsity.row_index_set().size() == 0 ||
587 sparsity.row_index_set().size() == sparsity.n_rows(),
589 "Only valid for sparsity patterns which store all rows."));
590 for (const auto starting_index : starting_indices)
591 {
592 (void)starting_index;
593 Assert(starting_index < sparsity.n_rows(),
594 ExcMessage("Invalid starting index: All starting indices need "
595 "to be between zero and the number of rows in the "
596 "sparsity pattern."));
597 }
598
599 // store the indices of the dofs renumbered in the last round. Default to
600 // starting points
601 std::vector<DynamicSparsityPattern::size_type> last_round_dofs(
602 starting_indices);
603
604 // initialize the new_indices array with invalid values
605 std::fill(new_indices.begin(),
606 new_indices.end(),
608
609 // if no starting indices were given: find dof with lowest coordination
610 // number
611 if (last_round_dofs.empty())
612 last_round_dofs.push_back(
613 internal::find_unnumbered_starting_index(sparsity, new_indices));
614
615 // store next free dof index
616 DynamicSparsityPattern::size_type next_free_number = 0;
617
618 // enumerate the first round dofs
619 for (const auto &last_round_dof : last_round_dofs)
620 new_indices[last_round_dof] = next_free_number++;
621
622 // store the indices of the dofs to be renumbered in the next round
623 std::vector<DynamicSparsityPattern::size_type> next_round_dofs;
624
625 // store for each coordination number the dofs with these coordination
626 // number
627 std::vector<std::pair<unsigned int, DynamicSparsityPattern::size_type>>
628 dofs_by_coordination;
629
630 // now do as many steps as needed to renumber all dofs
631 while (true)
632 {
633 next_round_dofs.clear();
634
635 // find all neighbors of the dofs numbered in the last round
636 for (const auto dof : last_round_dofs)
637 {
638 const unsigned int row_length = sparsity.row_length(dof);
639 for (unsigned int i = 0; i < row_length; ++i)
640 {
641 // skip dofs which are already numbered
642 const auto column = sparsity.column_number(dof, i);
643 if (new_indices[column] == numbers::invalid_size_type)
644 {
645 next_round_dofs.push_back(column);
646
647 // assign a dummy value to 'new_indices' to avoid adding
648 // the same index again; those will get the right number
649 // at the end of the outer 'while' loop
650 new_indices[column] = 0;
651 }
652 }
653 }
654
655 // check whether there are any new dofs in the list. if there are
656 // none, then we have completely numbered the current component of the
657 // graph. check if there are as yet unnumbered components of the graph
658 // that we would then have to do next
659 if (next_round_dofs.empty())
660 {
661 if (std::find(new_indices.begin(),
662 new_indices.end(),
663 numbers::invalid_size_type) == new_indices.end())
664 // no unnumbered indices, so we can leave now
665 break;
666
667 // otherwise find a valid starting point for the next component of
668 // the graph and continue with numbering that one. we only do so
669 // if no starting indices were provided by the user (see the
670 // documentation of this function) so produce an error if we got
671 // here and starting indices were given
672 Assert(starting_indices.empty(),
673 ExcMessage("The input graph appears to have more than one "
674 "component, but as stated in the documentation "
675 "we only want to reorder such graphs if no "
676 "starting indices are given. The function was "
677 "called with starting indices, however."));
678
679 next_round_dofs.push_back(
680 internal::find_unnumbered_starting_index(sparsity, new_indices));
681 }
682
683
684 // find coordination number for each of these dofs
685 dofs_by_coordination.clear();
686 for (const types::global_dof_index next_round_dof : next_round_dofs)
687 dofs_by_coordination.emplace_back(sparsity.row_length(next_round_dof),
688 next_round_dof);
689 std::sort(dofs_by_coordination.begin(), dofs_by_coordination.end());
690
691 // assign new DoF numbers to the elements of the present front:
692 for (const auto &i : dofs_by_coordination)
693 new_indices[i.second] = next_free_number++;
694
695 // after that: use this round's dofs for the next round
696 last_round_dofs.swap(next_round_dofs);
697 }
698
699 // test for all indices numbered. this mostly tests whether the
700 // front-marching-algorithm (which Cuthill-McKee actually is) has reached
701 // all points.
702 Assert((std::find(new_indices.begin(),
703 new_indices.end(),
704 numbers::invalid_size_type) == new_indices.end()) &&
705 (next_free_number == sparsity.n_rows()),
707 }
708
709
710
711 namespace internal
712 {
713 void
715 const DynamicSparsityPattern &connectivity,
716 std::vector<DynamicSparsityPattern::size_type> &renumbering)
717 {
718 AssertDimension(connectivity.n_rows(), connectivity.n_cols());
719 AssertDimension(connectivity.n_rows(), renumbering.size());
720 Assert(connectivity.row_index_set().size() == 0 ||
721 connectivity.row_index_set().size() == connectivity.n_rows(),
723 "Only valid for sparsity patterns which store all rows."));
724
725 // The algorithm below works by partitioning the rows in the
726 // connectivity graph, called nodes, into groups. The groups are defined
727 // as those nodes in immediate neighborhood of some pivot node, which we
728 // choose by minimal adjacency below.
729
730 // We define two types of node categories for nodes not yet classified,
731 // one consisting of all nodes we've not seen at all, and one for nodes
732 // identified as neighbors (variable current_neighbors below) but not
733 // yet grouped. We use this classification in combination with an
734 // unsorted vector, which is much faster than keeping a sorted data
735 // structure (e.g. std::set)
736 constexpr types::global_dof_index unseen_node =
738 constexpr types::global_dof_index available_node = unseen_node - 1;
739 const types::global_dof_index n_nodes = connectivity.n_rows();
740 std::vector<types::global_dof_index> touched_nodes(n_nodes, unseen_node);
741
742 std::vector<unsigned int> row_lengths(n_nodes);
743 std::vector<types::global_dof_index> current_neighbors;
744 std::vector<types::global_dof_index> group_starts(1);
745 std::vector<types::global_dof_index> group_indices;
746 group_indices.reserve(n_nodes);
747
748 // First collect the number of neighbors for each node. We use this
749 // field to find the next node with the minimum number of non-touched
750 // neighbors in the field n_remaining_neighbors, so we will count down
751 // on this field. We also cache the row lengths because we need this
752 // data frequently and getting it from the sparsity pattern is more
753 // expensive.
754 for (types::global_dof_index row = 0; row < n_nodes; ++row)
755 {
756 row_lengths[row] = connectivity.row_length(row);
757 Assert(row_lengths[row] > 0, ExcInternalError());
758 }
759 std::vector<unsigned int> n_remaining_neighbors(row_lengths);
760
761 // This outer loop is typically traversed only once, unless the global
762 // graph is not connected
763 while (true)
764 {
765 // Find node with the minimal number of neighbors (typically a
766 // corner node when based on FEM meshes). If no node is left, we are
767 // done. Together with the outer while loop, this loop can possibly
768 // be of quadratic complexity in the number of disconnected
769 // partitions, i.e. up to n_nodes in the worst case,
770 // but that is not the usual use case of this loop and thus not
771 // optimized for.
772 {
773 unsigned int candidate_valence = numbers::invalid_unsigned_int;
774 types::global_dof_index candidate_index =
776 for (types::global_dof_index i = 0; i < n_nodes; ++i)
777 if (touched_nodes[i] == unseen_node)
778 if (row_lengths[i] < candidate_valence)
779 {
780 candidate_index = i;
781 candidate_valence = n_remaining_neighbors[i];
782 if (candidate_valence <= 1)
783 break;
784 }
785 if (candidate_index == numbers::invalid_dof_index)
786 break;
787
788 Assert(candidate_valence > 0, ExcInternalError());
789
790 current_neighbors = {candidate_index};
791 touched_nodes[candidate_index] = available_node;
792 }
793
794 while (true)
795 {
796 // Find node with minimum number of untouched neighbors among
797 // the next set of possible neighbors (= valence), and among the
798 // set of nodes with the minimal number of neighbors, choose the
799 // one with the largest number of touched neighbors (i.e., the
800 // largest row length).
801 //
802 // This loop is typically the most expensive part for large
803 // graphs and thus only run once. We also do some cleanup, i.e.,
804 // the indices added to a group in the previous round need to be
805 // removed at this point.
806 unsigned int candidate_valence = numbers::invalid_unsigned_int;
807 types::global_dof_index candidate_index =
809 unsigned int candidate_row_length = 0;
810 const unsigned int loop_length = current_neighbors.size();
811 unsigned int write_index = 0;
812 for (unsigned int i = 0; i < loop_length; ++i)
813 {
814 const types::global_dof_index node = current_neighbors[i];
815 Assert(touched_nodes[node] != unseen_node,
817 if (touched_nodes[node] == available_node)
818 {
819 current_neighbors[write_index] = node;
820 ++write_index;
821 if (n_remaining_neighbors[node] < candidate_valence ||
822 (n_remaining_neighbors[node] == candidate_valence &&
823 (row_lengths[node] > candidate_row_length ||
824 (row_lengths[node] == candidate_row_length &&
825 node < candidate_index))))
826 {
827 candidate_index = node;
828 candidate_valence = n_remaining_neighbors[node];
829 candidate_row_length = row_lengths[node];
830 }
831 }
832 }
833 current_neighbors.resize(write_index);
834
835 if constexpr (running_in_debug_mode())
836 {
837 for (const types::global_dof_index node : current_neighbors)
838 Assert(touched_nodes[node] == available_node,
840 }
841
842 // No more neighbors left -> terminate loop
843 if (current_neighbors.empty())
844 break;
845
846 // Add the pivot and all direct neighbors of the pivot node not
847 // yet touched to the list of new entries.
848 group_indices.push_back(candidate_index);
849 touched_nodes[candidate_index] = group_starts.size() - 1;
850 const auto end_it = connectivity.end(candidate_index);
851 for (auto it = connectivity.begin(candidate_index); it != end_it;
852 ++it)
853 if (touched_nodes[it->column()] >= available_node)
854 {
855 group_indices.push_back(it->column());
856 touched_nodes[it->column()] = group_starts.size() - 1;
857 }
858 group_starts.push_back(group_indices.size());
859
860 // Add all neighbors of the current list not yet seen to the set
861 // of possible next nodes. The added node is grouped and thus no
862 // longer a valid neighbor (here we assume symmetry of the
863 // connectivity). It will be removed from the list of neighbors
864 // by the code further up in the next iteration of the
865 // surrounding loop.
866 for (types::global_dof_index index =
867 group_starts[group_starts.size() - 2];
868 index < group_starts.back();
869 ++index)
870 {
871 auto it = connectivity.begin(group_indices[index]);
872 const auto end_row = connectivity.end(group_indices[index]);
873 for (; it != end_row; ++it)
874 {
875 if (touched_nodes[it->column()] == unseen_node)
876 {
877 current_neighbors.push_back(it->column());
878 touched_nodes[it->column()] = available_node;
879 }
880 n_remaining_neighbors[it->column()]--;
881 }
882 }
883 }
884 }
885
886 // Sanity check: for all nodes, there should not be any neighbors left
887 for (types::global_dof_index row = 0; row < n_nodes; ++row)
888 Assert(n_remaining_neighbors[row] == 0, ExcInternalError());
889
890 // If the number of groups is smaller than the number of nodes, we
891 // continue by recursively calling this method
892 const unsigned int n_groups = group_starts.size() - 1;
893 if (n_groups < n_nodes)
894 {
895 // Form the connectivity of the groups
896 DynamicSparsityPattern connectivity_next(n_groups, n_groups);
897 for (types::global_dof_index row = 0; row < n_groups; ++row)
898 for (types::global_dof_index index = group_starts[row];
899 index < group_starts[row + 1];
900 ++index)
901 {
902 auto it = connectivity.begin(group_indices[index]);
903 const auto end_it = connectivity.end(group_indices[index]);
904 for (; it != end_it; ++it)
905 connectivity_next.add(row, touched_nodes[it->column()]);
906 }
907
908 // Recursively call the reordering
909 std::vector<types::global_dof_index> renumbering_next(n_groups);
910 reorder_hierarchical(connectivity_next, renumbering_next);
911
912 // Renumber the indices group by group according to the incoming
913 // ordering for the groups
914 for (types::global_dof_index row = 0, c = 0; row < n_groups; ++row)
915 for (types::global_dof_index index =
916 group_starts[renumbering_next[row]];
917 index < group_starts[renumbering_next[row] + 1];
918 ++index, ++c)
919 renumbering[c] = group_indices[index];
920 }
921 else
922 {
923 // All groups should have size one and no more recursion is possible,
924 // so use the numbering of the groups
925 unsigned int c = 0;
926 for (const types::global_dof_index i : group_indices)
927 renumbering[c++] = i;
928 }
929 }
930 } // namespace internal
931
932 void
934 const DynamicSparsityPattern &connectivity,
935 std::vector<DynamicSparsityPattern::size_type> &renumbering)
936 {
937 // the internal renumbering keeps the numbering the wrong way around (but
938 // we cannot invert the numbering inside that method because it is used
939 // recursively), so invert it here
940 internal::reorder_hierarchical(connectivity, renumbering);
941 renumbering = Utilities::invert_permutation(renumbering);
942 }
943
944
945
946#ifdef DEAL_II_WITH_MPI
947
948 void
950 const IndexSet &locally_owned_rows,
951 const MPI_Comm mpi_comm,
952 const IndexSet &locally_relevant_rows)
953 {
954 using map_vec_t =
955 std::map<unsigned int, std::vector<DynamicSparsityPattern::size_type>>;
956
957 // 1. limit rows to non owned:
958 IndexSet requested_rows(locally_relevant_rows);
959 requested_rows.subtract_set(locally_owned_rows);
960
961 std::vector<unsigned int> index_owner =
962 Utilities::MPI::compute_index_owner(locally_owned_rows,
963 requested_rows,
964 mpi_comm);
965
966 // 2. go through requested_rows, figure out the owner and add the row to
967 // request
968 map_vec_t rows_data;
970 i < requested_rows.n_elements();
971 ++i)
972 {
974 requested_rows.nth_index_in_set(i);
975
976 rows_data[index_owner[i]].push_back(row);
977 }
978
979 // 3. get what others ask us to send
980 const auto rows_data_received =
981 Utilities::MPI::some_to_some(mpi_comm, rows_data);
982
983 // 4. now prepare data to be sent in the same format as in
984 // distribute_sparsity_pattern() below, i.e.
985 // rX,num_rX,cols_rX
986 map_vec_t send_data;
987 for (const auto &data : rows_data_received)
988 {
989 for (const auto &row : data.second)
990 {
991 const auto rlen = dsp.row_length(row);
992
993 // skip empty lines
994 if (rlen == 0)
995 continue;
996
997 // save entries
998 send_data[data.first].push_back(row); // row index
999 send_data[data.first].push_back(rlen); // number of entries
1000 for (DynamicSparsityPattern::size_type c = 0; c < rlen; ++c)
1001 send_data[data.first].push_back(
1002 dsp.column_number(row, c)); // columns
1003 } // loop over rows
1004 } // loop over received data
1005
1006 // 5. communicate rows
1007 const auto received_data =
1008 Utilities::MPI::some_to_some(mpi_comm, send_data);
1009
1010 // 6. add result to our sparsity
1011 for (const auto &data : received_data)
1012 {
1013 const auto &recv_buf = data.second;
1014 auto ptr = recv_buf.begin();
1015 const auto end = recv_buf.end();
1016 while (ptr != end)
1017 {
1018 const auto row = *(ptr++);
1019 Assert(ptr != end, ExcInternalError());
1020
1021 const auto n_entries = *(ptr++);
1022 Assert(n_entries > 0, ExcInternalError());
1023 Assert(ptr != end, ExcInternalError());
1024
1025 // make sure we clear whatever was previously stored
1026 // in these rows. Otherwise we can't guarantee that the
1027 // data is consistent across MPI communicator.
1028 dsp.clear_row(row);
1029
1030 Assert(ptr + (n_entries - 1) != end, ExcInternalError());
1031 dsp.add_entries(row, ptr, ptr + n_entries, true);
1032 ptr += n_entries;
1033 }
1034 Assert(ptr == end, ExcInternalError());
1035 }
1036 }
1037
1038
1039
1040 void
1043 const std::vector<DynamicSparsityPattern::size_type> &rows_per_cpu,
1044 const MPI_Comm mpi_comm,
1045 const IndexSet &myrange)
1046 {
1047 const unsigned int myid = Utilities::MPI::this_mpi_process(mpi_comm);
1048 std::vector<DynamicSparsityPattern::size_type> start_index(
1049 rows_per_cpu.size() + 1);
1050 start_index[0] = 0;
1051 for (DynamicSparsityPattern::size_type i = 0; i < rows_per_cpu.size(); ++i)
1052 start_index[i + 1] = start_index[i] + rows_per_cpu[i];
1053
1054 IndexSet owned(start_index.back());
1055 owned.add_range(start_index[myid], start_index[myid] + rows_per_cpu[myid]);
1056
1057 distribute_sparsity_pattern(dsp, owned, mpi_comm, myrange);
1058 }
1059
1060
1061
1062 void
1064 const IndexSet &locally_owned_rows,
1065 const MPI_Comm mpi_comm,
1066 const IndexSet &locally_relevant_rows)
1067 {
1069 dsp.row_index_set() == locally_relevant_rows,
1070 ExcMessage(
1071 "The DynamicSparsityPattern must be initialized with an IndexSet that contains locally relevant indices."));
1072
1073 IndexSet requested_rows(locally_relevant_rows);
1074 requested_rows.subtract_set(locally_owned_rows);
1075
1076 std::vector<unsigned int> index_owner =
1077 Utilities::MPI::compute_index_owner(locally_owned_rows,
1078 requested_rows,
1079 mpi_comm);
1080
1081 using map_vec_t =
1082 std::map<unsigned int, std::vector<DynamicSparsityPattern::size_type>>;
1083
1084 map_vec_t send_data;
1085
1087 i < requested_rows.n_elements();
1088 ++i)
1089 {
1091 requested_rows.nth_index_in_set(i);
1092
1093 const auto rlen = dsp.row_length(row);
1094
1095 // skip empty lines
1096 if (rlen == 0)
1097 continue;
1098
1099 // save entries
1100 send_data[index_owner[i]].push_back(row); // row index
1101 send_data[index_owner[i]].push_back(rlen); // number of entries
1102 for (DynamicSparsityPattern::size_type c = 0; c < rlen; ++c)
1103 {
1104 // columns
1105 const auto column = dsp.column_number(row, c);
1106 send_data[index_owner[i]].push_back(column);
1107 }
1108 }
1109
1110 const auto receive_data = Utilities::MPI::some_to_some(mpi_comm, send_data);
1111
1112 // add what we received
1113 for (const auto &data : receive_data)
1114 {
1115 const auto &recv_buf = data.second;
1116 auto ptr = recv_buf.begin();
1117 const auto end = recv_buf.end();
1118 while (ptr != end)
1119 {
1120 const auto row = *(ptr++);
1121 Assert(ptr != end, ExcInternalError());
1122 const auto n_entries = *(ptr++);
1123
1124 Assert(ptr + (n_entries - 1) != end, ExcInternalError());
1125 dsp.add_entries(row, ptr, ptr + n_entries, true);
1126 ptr += n_entries;
1127 }
1128 Assert(ptr == end, ExcInternalError());
1129 }
1130 }
1131
1132
1133
1134 void
1136 const std::vector<IndexSet> &owned_set_per_cpu,
1137 const MPI_Comm mpi_comm,
1138 const IndexSet &myrange)
1139 {
1140 const unsigned int myid = Utilities::MPI::this_mpi_process(mpi_comm);
1142 owned_set_per_cpu[myid],
1143 mpi_comm,
1144 myrange);
1145 }
1146
1147
1148
1149 void
1151 const IndexSet &locally_owned_rows,
1152 const MPI_Comm mpi_comm,
1153 const IndexSet &locally_relevant_rows)
1154 {
1155 using map_vec_t =
1157 std::vector<BlockDynamicSparsityPattern::size_type>>;
1158 map_vec_t send_data;
1159
1160 IndexSet requested_rows(locally_relevant_rows);
1161 requested_rows.subtract_set(locally_owned_rows);
1162
1163 std::vector<unsigned int> index_owner =
1164 Utilities::MPI::compute_index_owner(locally_owned_rows,
1165 requested_rows,
1166 mpi_comm);
1167
1169 i < requested_rows.n_elements();
1170 ++i)
1171 {
1173 requested_rows.nth_index_in_set(i);
1174
1176
1177 // skip empty lines
1178 if (rlen == 0)
1179 continue;
1180
1181 // save entries
1182 std::vector<BlockDynamicSparsityPattern::size_type> &dst =
1183 send_data[index_owner[i]];
1184
1185 dst.push_back(rlen); // number of entries
1186 dst.push_back(row); // row index
1187 for (BlockDynamicSparsityPattern::size_type c = 0; c < rlen; ++c)
1188 {
1189 // columns
1191 dsp.column_number(row, c);
1192 dst.push_back(column);
1193 }
1194 }
1195
1196 unsigned int num_receive = 0;
1197 {
1198 std::vector<unsigned int> send_to;
1199 send_to.reserve(send_data.size());
1200 for (const auto &sparsity_line : send_data)
1201 send_to.push_back(sparsity_line.first);
1202
1203 num_receive =
1205 send_to);
1206 }
1207
1208 std::vector<MPI_Request> requests(send_data.size());
1209
1210
1211 // send data
1212
1214 Utilities::MPI::CollectiveMutex::ScopedLock lock(mutex, mpi_comm);
1215
1216 const int mpi_tag = Utilities::MPI::internal::Tags::
1218
1219 {
1220 unsigned int idx = 0;
1221 for (const auto &sparsity_line : send_data)
1222 {
1223 const int ierr = MPI_Isend(
1224 sparsity_line.second.data(),
1225 sparsity_line.second.size(),
1226 Utilities::MPI::mpi_type_id_for_type<types::global_dof_index>,
1227 sparsity_line.first,
1228 mpi_tag,
1229 mpi_comm,
1230 &requests[idx++]);
1231 AssertThrowMPI(ierr);
1232 }
1233 }
1234
1235 {
1236 // receive
1237 std::vector<BlockDynamicSparsityPattern::size_type> recv_buf;
1238 for (unsigned int index = 0; index < num_receive; ++index)
1239 {
1240 MPI_Status status;
1241 int ierr = MPI_Probe(MPI_ANY_SOURCE, mpi_tag, mpi_comm, &status);
1242 AssertThrowMPI(ierr);
1243
1244 int len;
1245 ierr = MPI_Get_count(
1246 &status,
1247 Utilities::MPI::mpi_type_id_for_type<types::global_dof_index>,
1248 &len);
1249 AssertThrowMPI(ierr);
1250
1251 recv_buf.resize(len);
1252 ierr = MPI_Recv(
1253 recv_buf.data(),
1254 len,
1255 Utilities::MPI::mpi_type_id_for_type<types::global_dof_index>,
1256 status.MPI_SOURCE,
1257 status.MPI_TAG,
1258 mpi_comm,
1259 &status);
1260 AssertThrowMPI(ierr);
1261
1262 std::vector<BlockDynamicSparsityPattern::size_type>::const_iterator
1263 ptr = recv_buf.begin();
1264 std::vector<BlockDynamicSparsityPattern::size_type>::const_iterator
1265 end = recv_buf.end();
1266 while (ptr != end)
1267 {
1269 Assert(ptr != end, ExcInternalError());
1271 for (unsigned int c = 0; c < num; ++c)
1272 {
1273 Assert(ptr != end, ExcInternalError());
1274 dsp.add(row, *ptr);
1275 ++ptr;
1276 }
1277 }
1278 Assert(ptr == end, ExcInternalError());
1279 }
1280 }
1281
1282 // complete all sends, so that we can safely destroy the buffers.
1283 if (requests.size() > 0)
1284 {
1285 const int ierr =
1286 MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE);
1287 AssertThrowMPI(ierr);
1288 }
1289 }
1290#endif
1291} // namespace SparsityTools
1292
size_type column_number(const size_type row, const unsigned int index) const
unsigned int row_length(const size_type row) const
void add(const size_type i, const size_type j)
void add_entries(const size_type row, ForwardIterator begin, ForwardIterator end, const bool indices_are_unique_and_sorted=false)
const IndexSet & row_index_set() const
size_type row_length(const size_type row) const
size_type column_number(const size_type row, const size_type index) const
void clear_row(const size_type row)
void add(const size_type i, const size_type j)
size_type size() const
Definition index_set.h:1787
size_type n_elements() const
Definition index_set.h:1945
void subtract_set(const IndexSet &other)
Definition index_set.cc:478
void add_range(const size_type begin, const size_type end)
Definition index_set.h:1814
size_type nth_index_in_set(const size_type local_index) const
Definition index_set.h:1994
types::global_dof_index size_type
size_type n_rows() const
size_type n_cols() const
bool is_compressed() const
std::size_t n_nonzero_elements() const
bool exists(const size_type i, const size_type j) const
iterator begin() const
void copy_from(const size_type n_rows, const size_type n_cols, const ForwardIterator begin, const ForwardIterator end)
iterator end() const
unsigned int row_length(const size_type row) const
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:35
constexpr bool running_in_debug_mode()
Definition config.h:73
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:36
static ::ExceptionBase & ExcMETISNotInstalled()
static ::ExceptionBase & ExcInvalidNumberOfPartitions(int arg1)
static ::ExceptionBase & ExcMETISError(int arg1)
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertThrowMPI(error_code)
static ::ExceptionBase & ExcInvalidArraySize(int arg1, int arg2)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
static ::ExceptionBase & ExcNotQuadratic()
static ::ExceptionBase & ExcZOLTANNotInstalled()
static ::ExceptionBase & ExcNotCompressed()
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
std::vector< index_type > data
Definition mpi.cc:750
DynamicSparsityPattern::size_type find_unnumbered_starting_index(const DynamicSparsityPattern &sparsity, const std::vector< DynamicSparsityPattern::size_type > &new_indices)
void reorder_hierarchical(const DynamicSparsityPattern &connectivity, std::vector< DynamicSparsityPattern::size_type > &renumbering)
unsigned int color_sparsity_pattern(const SparsityPattern &sparsity_pattern, std::vector< unsigned int > &color_indices)
void partition(const SparsityPattern &sparsity_pattern, const unsigned int n_partitions, std::vector< unsigned int > &partition_indices, const Partitioner partitioner=Partitioner::metis)
void reorder_hierarchical(const DynamicSparsityPattern &sparsity, std::vector< DynamicSparsityPattern::size_type > &new_indices)
void distribute_sparsity_pattern(DynamicSparsityPattern &dsp, const IndexSet &locally_owned_rows, const MPI_Comm mpi_comm, const IndexSet &locally_relevant_rows)
void gather_sparsity_pattern(DynamicSparsityPattern &dsp, const IndexSet &locally_owned_rows, const MPI_Comm mpi_comm, const IndexSet &locally_relevant_rows)
void reorder_Cuthill_McKee(const DynamicSparsityPattern &sparsity, std::vector< DynamicSparsityPattern::size_type > &new_indices, const std::vector< DynamicSparsityPattern::size_type > &starting_indices=std::vector< DynamicSparsityPattern::size_type >())
@ sparsity_tools_distribute_sparsity_pattern
SparsityTools::sparsity_tools_distribute_sparsity_pattern()
Definition mpi_tags.h:75
std::map< unsigned int, T > some_to_some(const MPI_Comm comm, const std::map< unsigned int, T > &objects_to_send)
std::vector< unsigned int > compute_index_owner(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm comm)
Definition mpi.cc:1839
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:120
unsigned int compute_n_point_to_point_communications(const MPI_Comm mpi_comm, const std::vector< unsigned int > &destinations)
Definition mpi.cc:375
std::vector< Integer > invert_permutation(const std::vector< Integer > &permutation)
Definition utilities.h:1700
constexpr types::global_dof_index invalid_dof_index
Definition types.h:269
constexpr types::global_dof_index invalid_size_type
Definition types.h:250
constexpr unsigned int invalid_unsigned_int
Definition types.h:238
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)