Reference documentation for deal.II version 9.5.0
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
task_info.cc
Go to the documentation of this file.
1// ---------------------------------------------------------------------
2//
3// Copyright (C) 2018 - 2023 by the deal.II authors
4//
5// This file is part of the deal.II library.
6//
7// The deal.II library is free software; you can use it, redistribute
8// it, and/or modify it under the terms of the GNU Lesser General
9// Public License as published by the Free Software Foundation; either
10// version 2.1 of the License, or (at your option) any later version.
11// The full text of the license can be found in the file LICENSE.md at
12// the top level directory of deal.II.
13//
14// ---------------------------------------------------------------------
15
16
19#include <deal.II/base/mpi.h>
23
25
28
29
30#ifdef DEAL_II_WITH_TBB
31# include <tbb/blocked_range.h>
32# include <tbb/parallel_for.h>
33# include <tbb/task.h>
34# ifndef DEAL_II_TBB_WITH_ONEAPI
35# include <tbb/task_scheduler_init.h>
36# endif
37#endif
38
39#include <iostream>
40#include <set>
41
42//
43// TBB with oneAPI API has deprecated and removed the
44// <code>tbb::tasks</code> backend. With this it is no longer possible to
45// compile the following code that builds a directed acyclic graph (DAG) of
46// (thread parallel) tasks without a major porting effort. It turned out
47// that such a dynamic handling of dependencies and structures is not as
48// competitive as initially assumed. Consequently, this part of the matrix
49// free infrastructure has seen less attention than the rest over the last
50// years and is (presumably) not used that often.
51//
52// In case of detected oneAPI backend we simply disable threading in the
53// matrix free backend for now.
54//
55// Matthias Maier, Martin Kronbichler, 2021
56//
57
59
60
61
62/*-------------------- Implementation of the matrix-free loop --------------*/
63namespace internal
64{
65 namespace MatrixFreeFunctions
66 {
67#if defined(DEAL_II_WITH_TBB) && !defined(DEAL_II_TBB_WITH_ONEAPI)
68
69 // This defines the TBB data structures that are needed to schedule the
70 // partition-partition variant
71
72 namespace partition
73 {
74 class ActualCellWork
75 {
76 public:
77 ActualCellWork(MFWorkerInterface **worker_pointer,
78 const unsigned int partition,
79 const TaskInfo & task_info)
80 : worker(nullptr)
81 , worker_pointer(worker_pointer)
83 , task_info(task_info)
84 {}
85
86 ActualCellWork(MFWorkerInterface &worker,
87 const unsigned int partition,
88 const TaskInfo & task_info)
89 : worker(&worker)
90 , worker_pointer(nullptr)
92 , task_info(task_info)
93 {}
94
95 void
96 operator()() const
97 {
98 MFWorkerInterface *used_worker =
99 worker != nullptr ? worker : *worker_pointer;
100 Assert(used_worker != nullptr, ExcInternalError());
101 used_worker->cell(partition);
102
103 if (task_info.face_partition_data.empty() == false)
104 {
105 used_worker->face(partition);
106 used_worker->boundary(partition);
107 }
108 }
109
110 private:
111 MFWorkerInterface * worker;
112 MFWorkerInterface **worker_pointer;
113 const unsigned int partition;
114 const TaskInfo & task_info;
115 };
116
117 class CellWork : public tbb::task
118 {
119 public:
120 CellWork(MFWorkerInterface &worker,
121 const unsigned int partition,
122 const TaskInfo & task_info,
123 const bool is_blocked)
124 : dummy(nullptr)
125 , work(worker, partition, task_info)
126 , is_blocked(is_blocked)
127 {}
128
129 tbb::task *
130 execute() override
131 {
132 work();
133
134 if (is_blocked == true)
135 tbb::empty_task::spawn(*dummy);
136 return nullptr;
137 }
138
139 tbb::empty_task *dummy;
140
141 private:
142 ActualCellWork work;
143 const bool is_blocked;
144 };
145
146
147
148 class PartitionWork : public tbb::task
149 {
150 public:
151 PartitionWork(MFWorkerInterface &function_in,
152 const unsigned int partition_in,
153 const TaskInfo & task_info_in,
154 const bool is_blocked_in = false)
155 : dummy(nullptr)
156 , function(function_in)
157 , partition(partition_in)
158 , task_info(task_info_in)
159 , is_blocked(is_blocked_in)
160 {}
161
162 tbb::task *
163 execute() override
164 {
165 tbb::empty_task *root =
166 new (tbb::task::allocate_root()) tbb::empty_task;
167 const unsigned int evens = task_info.partition_evens[partition];
168 const unsigned int odds = task_info.partition_odds[partition];
169 const unsigned int n_blocked_workers =
170 task_info.partition_n_blocked_workers[partition];
171 const unsigned int n_workers =
172 task_info.partition_n_workers[partition];
173 std::vector<CellWork *> worker(n_workers);
174 std::vector<CellWork *> blocked_worker(n_blocked_workers);
175
176 root->set_ref_count(evens + 1);
177 for (unsigned int j = 0; j < evens; ++j)
178 {
179 worker[j] = new (root->allocate_child())
180 CellWork(function,
181 task_info.partition_row_index[partition] + 2 * j,
182 task_info,
183 false);
184 if (j > 0)
185 {
186 worker[j]->set_ref_count(2);
187 blocked_worker[j - 1]->dummy =
188 new (worker[j]->allocate_child()) tbb::empty_task;
189 tbb::task::spawn(*blocked_worker[j - 1]);
190 }
191 else
192 worker[j]->set_ref_count(1);
193 if (j < evens - 1)
194 {
195 blocked_worker[j] = new (worker[j]->allocate_child())
196 CellWork(function,
197 task_info.partition_row_index[partition] + 2 * j +
198 1,
199 task_info,
200 true);
201 }
202 else
203 {
204 if (odds == evens)
205 {
206 worker[evens] = new (worker[j]->allocate_child())
207 CellWork(function,
208 task_info.partition_row_index[partition] +
209 2 * j + 1,
210 task_info,
211 false);
212 tbb::task::spawn(*worker[evens]);
213 }
214 else
215 {
216 tbb::empty_task *child =
217 new (worker[j]->allocate_child()) tbb::empty_task();
218 tbb::task::spawn(*child);
219 }
220 }
221 }
222
223 root->wait_for_all();
224 root->destroy(*root);
225 if (is_blocked == true)
226 tbb::empty_task::spawn(*dummy);
227 return nullptr;
228 }
229
230 tbb::empty_task *dummy;
231
232 private:
233 MFWorkerInterface &function;
234 const unsigned int partition;
235 const TaskInfo & task_info;
236 const bool is_blocked;
237 };
238
239 } // end of namespace partition
240
241
242
243 namespace color
244 {
245 class CellWork
246 {
247 public:
248 CellWork(MFWorkerInterface &worker_in,
249 const TaskInfo & task_info_in,
250 const unsigned int partition_in)
251 : worker(worker_in)
252 , task_info(task_info_in)
253 , partition(partition_in)
254 {}
255
256 void
257 operator()(const tbb::blocked_range<unsigned int> &r) const
258 {
259 const unsigned int start_index =
260 task_info.cell_partition_data[partition] +
261 task_info.block_size * r.begin();
262 const unsigned int end_index =
263 std::min(start_index + task_info.block_size * (r.end() - r.begin()),
264 task_info.cell_partition_data[partition + 1]);
265 worker.cell(std::make_pair(start_index, end_index));
266
267 if (task_info.face_partition_data.empty() == false)
268 {
270 }
271 }
272
273 private:
274 MFWorkerInterface &worker;
275 const TaskInfo & task_info;
276 const unsigned int partition;
277 };
278
279
280
281 class PartitionWork : public tbb::task
282 {
283 public:
284 PartitionWork(MFWorkerInterface &worker_in,
285 const unsigned int partition_in,
286 const TaskInfo & task_info_in,
287 const bool is_blocked_in)
288 : dummy(nullptr)
289 , worker(worker_in)
290 , partition(partition_in)
291 , task_info(task_info_in)
292 , is_blocked(is_blocked_in)
293 {}
294
295 tbb::task *
296 execute() override
297 {
298 const unsigned int n_chunks =
299 (task_info.cell_partition_data[partition + 1] -
300 task_info.cell_partition_data[partition] + task_info.block_size -
301 1) /
302 task_info.block_size;
303 parallel_for(tbb::blocked_range<unsigned int>(0, n_chunks, 1),
304 CellWork(worker, task_info, partition));
305 if (is_blocked == true)
306 tbb::empty_task::spawn(*dummy);
307 return nullptr;
308 }
309
310 tbb::empty_task *dummy;
311
312 private:
313 MFWorkerInterface &worker;
314 const unsigned int partition;
315 const TaskInfo & task_info;
316 const bool is_blocked;
317 };
318
319 } // end of namespace color
320
321
322
323 class MPICommunication : public tbb::task
324 {
325 public:
326 MPICommunication(MFWorkerInterface &worker_in, const bool do_compress)
327 : worker(worker_in)
328 , do_compress(do_compress)
329 {}
330
331 tbb::task *
332 execute() override
333 {
334 if (do_compress == false)
335 worker.vector_update_ghosts_finish();
336 else
337 worker.vector_compress_start();
338 return nullptr;
339 }
340
341 private:
342 MFWorkerInterface &worker;
343 const bool do_compress;
344 };
345
346#endif // DEAL_II_WITH_TBB
347
348
349
350 void
352 {
353 // If we use thread parallelism, we do not currently support to schedule
354 // pieces of updates within the loop, so this index will collect all
355 // calls in that case and work like a single complete loop over all
356 // cells
357 if (scheme != none)
359 else
362
364
365#if defined(DEAL_II_WITH_TBB) && !defined(DEAL_II_TBB_WITH_ONEAPI)
366
367 if (scheme != none)
368 {
370 if (scheme == partition_partition && evens > 0)
371 {
372 tbb::empty_task *root =
373 new (tbb::task::allocate_root()) tbb::empty_task;
374 root->set_ref_count(evens + 1);
375 std::vector<partition::PartitionWork *> worker(n_workers);
376 std::vector<partition::PartitionWork *> blocked_worker(
378 MPICommunication *worker_compr =
379 new (root->allocate_child()) MPICommunication(funct, true);
380 worker_compr->set_ref_count(1);
381 for (unsigned int j = 0; j < evens; ++j)
382 {
383 if (j > 0)
384 {
385 worker[j] = new (root->allocate_child())
386 partition::PartitionWork(funct, 2 * j, *this, false);
387 worker[j]->set_ref_count(2);
388 blocked_worker[j - 1]->dummy =
389 new (worker[j]->allocate_child()) tbb::empty_task;
390 tbb::task::spawn(*blocked_worker[j - 1]);
391 }
392 else
393 {
394 worker[j] = new (worker_compr->allocate_child())
395 partition::PartitionWork(funct, 2 * j, *this, false);
396 worker[j]->set_ref_count(2);
397 MPICommunication *worker_dist =
398 new (worker[j]->allocate_child())
399 MPICommunication(funct, false);
400 tbb::task::spawn(*worker_dist);
401 }
402 if (j < evens - 1)
403 {
404 blocked_worker[j] = new (worker[j]->allocate_child())
405 partition::PartitionWork(funct, 2 * j + 1, *this, true);
406 }
407 else
408 {
409 if (odds == evens)
410 {
411 worker[evens] = new (worker[j]->allocate_child())
412 partition::PartitionWork(funct,
413 2 * j + 1,
414 *this,
415 false);
416 tbb::task::spawn(*worker[evens]);
417 }
418 else
419 {
420 tbb::empty_task *child =
421 new (worker[j]->allocate_child()) tbb::empty_task();
422 tbb::task::spawn(*child);
423 }
424 }
425 }
426
427 root->wait_for_all();
428 root->destroy(*root);
429 }
430 else if (scheme == partition_partition)
431 {
432 // catch the case of empty partition list: we still need to call
433 // the vector communication routines to clean up and initiate
434 // things
436 funct.vector_compress_start();
437 }
438 else // end of partition-partition, start of partition-color
439 {
440 // check whether there is only one partition. if not, build up the
441 // tree of partitions
442 if (odds > 0)
443 {
444 tbb::empty_task *root =
445 new (tbb::task::allocate_root()) tbb::empty_task;
446 root->set_ref_count(evens + 1);
447 const unsigned int n_blocked_workers =
448 odds - (odds + evens + 1) % 2;
449 const unsigned int n_workers =
451 std::vector<color::PartitionWork *> worker(n_workers);
452 std::vector<color::PartitionWork *> blocked_worker(
454 unsigned int worker_index = 0, slice_index = 0;
455 int spawn_index_child = -2;
456 MPICommunication *worker_compr =
457 new (root->allocate_child()) MPICommunication(funct, true);
458 worker_compr->set_ref_count(1);
459 for (unsigned int part = 0;
460 part < partition_row_index.size() - 1;
461 part++)
462 {
463 if (part == 0)
464 worker[worker_index] =
465 new (worker_compr->allocate_child())
466 color::PartitionWork(funct,
467 slice_index,
468 *this,
469 false);
470 else
471 worker[worker_index] = new (root->allocate_child())
472 color::PartitionWork(funct,
473 slice_index,
474 *this,
475 false);
476 slice_index++;
477 for (; slice_index < partition_row_index[part + 1];
478 slice_index++)
479 {
480 worker[worker_index]->set_ref_count(1);
481 worker_index++;
482 worker[worker_index] =
483 new (worker[worker_index - 1]->allocate_child())
484 color::PartitionWork(funct,
485 slice_index,
486 *this,
487 false);
488 }
489 worker[worker_index]->set_ref_count(2);
490 if (part > 0)
491 {
492 blocked_worker[(part - 1) / 2]->dummy =
493 new (worker[worker_index]->allocate_child())
494 tbb::empty_task;
495 worker_index++;
496 if (spawn_index_child == -1)
497 tbb::task::spawn(*blocked_worker[(part - 1) / 2]);
498 else
499 {
500 Assert(spawn_index_child >= 0,
502 tbb::task::spawn(*worker[spawn_index_child]);
503 }
504 spawn_index_child = -2;
505 }
506 else
507 {
508 MPICommunication *worker_dist =
509 new (worker[worker_index]->allocate_child())
510 MPICommunication(funct, false);
511 tbb::task::spawn(*worker_dist);
512 worker_index++;
513 }
514 part += 1;
515 if (part < partition_row_index.size() - 1)
516 {
517 if (part < partition_row_index.size() - 2)
518 {
519 blocked_worker[part / 2] =
520 new (worker[worker_index - 1]->allocate_child())
521 color::PartitionWork(funct,
522 slice_index,
523 *this,
524 true);
525 slice_index++;
526 if (slice_index < partition_row_index[part + 1])
527 {
528 blocked_worker[part / 2]->set_ref_count(1);
529 worker[worker_index] = new (
530 blocked_worker[part / 2]->allocate_child())
531 color::PartitionWork(funct,
532 slice_index,
533 *this,
534 false);
535 slice_index++;
536 }
537 else
538 {
539 spawn_index_child = -1;
540 continue;
541 }
542 }
543 for (; slice_index < partition_row_index[part + 1];
544 slice_index++)
545 {
546 if (slice_index > partition_row_index[part])
547 {
548 worker[worker_index]->set_ref_count(1);
549 worker_index++;
550 }
551 worker[worker_index] =
552 new (worker[worker_index - 1]->allocate_child())
553 color::PartitionWork(funct,
554 slice_index,
555 *this,
556 false);
557 }
558 spawn_index_child = worker_index;
559 worker_index++;
560 }
561 else
562 {
563 tbb::empty_task *final =
564 new (worker[worker_index - 1]->allocate_child())
565 tbb::empty_task;
566 tbb::task::spawn(*final);
567 spawn_index_child = worker_index - 1;
568 }
569 }
570 if (evens == odds)
571 {
572 Assert(spawn_index_child >= 0, ExcInternalError());
573 tbb::task::spawn(*worker[spawn_index_child]);
574 }
575 root->wait_for_all();
576 root->destroy(*root);
577 }
578 // case when we only have one partition: this is the usual
579 // coloring scheme, and we just schedule a parallel for loop for
580 // each color
581 else
582 {
585
586 for (unsigned int color = 0; color < partition_row_index[1];
587 ++color)
588 {
589 tbb::empty_task *root =
590 new (tbb::task::allocate_root()) tbb::empty_task;
591 root->set_ref_count(2);
592 color::PartitionWork *worker =
593 new (root->allocate_child())
594 color::PartitionWork(funct, color, *this, false);
595 tbb::empty_task::spawn(*worker);
596 root->wait_for_all();
597 root->destroy(*root);
598 }
599
600 funct.vector_compress_start();
601 }
602 }
603 }
604 else
605#endif
606 // serial loop, go through up to three times and do the MPI transfer at
607 // the beginning/end of the second part
608 {
609 for (unsigned int part = 0; part < partition_row_index.size() - 2;
610 ++part)
611 {
612 if (part == 1)
614
615 for (unsigned int i = partition_row_index[part];
616 i < partition_row_index[part + 1];
617 ++i)
618 {
619 funct.cell_loop_pre_range(i);
620 funct.zero_dst_vector_range(i);
623 {
624 funct.cell(i);
625 }
626
627 if (face_partition_data.empty() == false)
628 {
630 funct.face(i);
631 if (boundary_partition_data[i + 1] >
633 funct.boundary(i);
634 }
635 funct.cell_loop_post_range(i);
636 }
637
638 if (part == 1)
639 funct.vector_compress_start();
640 }
641 }
643
644 if (scheme != none)
646 else
649 }
650
651
652
654 {
655 clear();
656 }
657
658
659
660 void
662 {
663 n_active_cells = 0;
664 n_ghost_cells = 0;
666 block_size = 0;
667 n_blocks = 0;
668 scheme = none;
669 partition_row_index.clear();
670 partition_row_index.resize(2);
671 cell_partition_data.clear();
672 face_partition_data.clear();
674 evens = 0;
675 odds = 0;
677 n_workers = 0;
678 partition_evens.clear();
679 partition_odds.clear();
681 partition_n_workers.clear();
682 communicator = MPI_COMM_SELF;
683 my_pid = 0;
684 n_procs = 1;
685 }
686
687
688
689 template <typename StreamType>
690 void
692 const std::size_t data_length) const
693 {
695 Utilities::MPI::min_max_avg(1e-6 * data_length, communicator);
696 if (n_procs < 2)
697 out << memory_c.min;
698 else
699 out << memory_c.min << "/" << memory_c.avg << "/" << memory_c.max;
700 out << " MB" << std::endl;
701 }
702
703
704
705 std::size_t
707 {
708 return (
709 sizeof(*this) +
718 }
719
720
721
722 void
724 std::vector<unsigned int> &boundary_cells)
725 {
726 // try to make the number of boundary cells divisible by the number of
727 // vectors in vectorization
728 unsigned int fillup_needed =
729 (vectorization_length - boundary_cells.size() % vectorization_length) %
731 if (fillup_needed > 0 && boundary_cells.size() < n_active_cells)
732 {
733 // fill additional cells into the list of boundary cells to get a
734 // balanced number. Go through the indices successively until we
735 // found enough indices
736 std::vector<unsigned int> new_boundary_cells;
737 new_boundary_cells.reserve(boundary_cells.size());
738
739 unsigned int next_free_slot = 0, bound_index = 0;
740 while (fillup_needed > 0 && bound_index < boundary_cells.size())
741 {
742 if (next_free_slot < boundary_cells[bound_index])
743 {
744 // check if there are enough cells to fill with in the
745 // current slot
746 if (next_free_slot + fillup_needed <=
747 boundary_cells[bound_index])
748 {
749 for (unsigned int j =
750 boundary_cells[bound_index] - fillup_needed;
751 j < boundary_cells[bound_index];
752 ++j)
753 new_boundary_cells.push_back(j);
754 fillup_needed = 0;
755 }
756 // ok, not enough indices, so just take them all up to the
757 // next boundary cell
758 else
759 {
760 for (unsigned int j = next_free_slot;
761 j < boundary_cells[bound_index];
762 ++j)
763 new_boundary_cells.push_back(j);
764 fillup_needed -=
765 boundary_cells[bound_index] - next_free_slot;
766 }
767 }
768 new_boundary_cells.push_back(boundary_cells[bound_index]);
769 next_free_slot = boundary_cells[bound_index] + 1;
770 ++bound_index;
771 }
772 while (fillup_needed > 0 &&
773 (new_boundary_cells.size() == 0 ||
774 new_boundary_cells.back() < n_active_cells - 1))
775 new_boundary_cells.push_back(new_boundary_cells.back() + 1);
776 while (bound_index < boundary_cells.size())
777 new_boundary_cells.push_back(boundary_cells[bound_index++]);
778
779 boundary_cells.swap(new_boundary_cells);
780 }
781
782 // set the number of cells
783 std::sort(boundary_cells.begin(), boundary_cells.end());
784
785 // check that number of boundary cells is divisible by
786 // vectorization_length or that it contains all cells
787 Assert(boundary_cells.size() % vectorization_length == 0 ||
788 boundary_cells.size() == n_active_cells,
790 }
791
792
793
794 void
796 const std::vector<unsigned int> &cells_with_comm,
797 const unsigned int dofs_per_cell,
798 const bool categories_are_hp,
799 const std::vector<unsigned int> &cell_vectorization_categories,
800 const bool cell_vectorization_categories_strict,
801 const std::vector<unsigned int> &parent_relation,
802 std::vector<unsigned int> & renumbering,
803 std::vector<unsigned char> & incompletely_filled_vectorization)
804 {
805 Assert(dofs_per_cell > 0, ExcInternalError());
806 // This function is decomposed into several steps to determine a good
807 // ordering that satisfies the following constraints:
808 // a. Only cells belonging to the same category (or next higher if the
809 // cell_vectorization_categories_strict is false) can be grouped into
810 // the same SIMD batch
811 // b. hp-adaptive computations must form contiguous ranges for the same
812 // degree (category) in cell_partition_data
813 // c. We want to group the cells with the same parent in the same SIMD
814 // lane if possible
815 // d. The cell order should be similar to the initial one
816 // e. Form sets without MPI communication and those with to overlap
817 // communication with computation
818 //
819 // These constraints are satisfied by first grouping by the categories
820 // and, within the groups, to distinguish between cells with a parent
821 // and those without. All of this is set up with batches of cells (with
822 // padding if the size does not match). Then we define a vector of
823 // arrays where we define sorting criteria for the cell batches to
824 // satisfy the items b and d together, split by different parts to
825 // satisfy item e.
826
827 // Give the compiler a chance to detect that vectorization_length is a
828 // power of two, which allows it to replace integer divisions by shifts
829 const unsigned int n_lanes = indicate_power_of_two(vectorization_length);
830
831 // Step 1: find tight map of categories for not taking exceeding amounts
832 // of memory below. Sort the new categories by the numbers in the
833 // old one to ensure we respect the given rules
834 unsigned int n_categories = 1;
835 std::vector<unsigned int> tight_category_map(n_active_cells, 0);
836 if (cell_vectorization_categories.empty() == false)
837 {
838 AssertDimension(cell_vectorization_categories.size(),
840
841 std::set<unsigned int> used_categories;
842 for (unsigned int i = 0; i < n_active_cells; ++i)
843 used_categories.insert(cell_vectorization_categories[i]);
844 std::vector<unsigned int> used_categories_vector(
845 used_categories.size());
846 n_categories = 0;
847 for (const auto &it : used_categories)
848 used_categories_vector[n_categories++] = it;
849 for (unsigned int i = 0; i < n_active_cells; ++i)
850 {
851 const unsigned int index =
852 std::lower_bound(used_categories_vector.begin(),
853 used_categories_vector.end(),
854 cell_vectorization_categories[i]) -
855 used_categories_vector.begin();
856 AssertIndexRange(index, used_categories_vector.size());
857 tight_category_map[i] = index;
858 }
859 }
860
861 // Step 2: Sort the cells by the category. If we want to fill up the
862 // ranges in vectorization, promote some of the cells to a higher
863 // category
864 std::vector<std::vector<unsigned int>> renumbering_category(n_categories);
865 for (unsigned int i = 0; i < n_active_cells; ++i)
866 renumbering_category[tight_category_map[i]].push_back(i);
867
868 if (cell_vectorization_categories_strict == false && n_categories > 1)
869 for (unsigned int j = n_categories - 1; j > 0; --j)
870 {
871 unsigned int lower_index = j - 1;
872 while ((renumbering_category[j].size() % n_lanes) != 0u)
873 {
874 while (((renumbering_category[j].size() % n_lanes) != 0u) &&
875 !renumbering_category[lower_index].empty())
876 {
877 renumbering_category[j].push_back(
878 renumbering_category[lower_index].back());
879 renumbering_category[lower_index].pop_back();
880 }
881 if (lower_index == 0)
882 break;
883 else
884 --lower_index;
885 }
886 }
887
888 // Step 3: Use the parent relation to find a good grouping of cells. To
889 // do this, we first put cells of each category defined above into two
890 // bins, those which we know can be grouped together by the given parent
891 // relation and those which cannot
892 std::vector<unsigned int> temporary_numbering;
893 temporary_numbering.reserve(n_active_cells +
894 (n_lanes - 1) * n_categories);
895 const unsigned int n_cells_per_parent =
896 std::count(parent_relation.begin(), parent_relation.end(), 0);
897 std::vector<unsigned int> category_size;
898 for (unsigned int j = 0; j < n_categories; ++j)
899 {
900 std::vector<std::pair<unsigned int, unsigned int>> grouped_cells;
901 std::vector<unsigned int> other_cells;
902 for (const unsigned int cell : renumbering_category[j])
903 if (parent_relation.empty() ||
904 parent_relation[cell] == numbers::invalid_unsigned_int)
905 other_cells.push_back(cell);
906 else
907 grouped_cells.emplace_back(parent_relation[cell], cell);
908
909 // Compute the number of cells per group
910 std::sort(grouped_cells.begin(), grouped_cells.end());
911 std::vector<unsigned int> n_cells_per_group;
912 unsigned int length = 0;
913 for (unsigned int i = 0; i < grouped_cells.size(); ++i, ++length)
914 if (i > 0 && grouped_cells[i].first != grouped_cells[i - 1].first)
915 {
916 n_cells_per_group.push_back(length);
917 length = 0;
918 }
919 if (length > 0)
920 n_cells_per_group.push_back(length);
921
922 // Move groups that do not have the complete size (due to
923 // categories) to the 'other_cells'. The cells with correct group
924 // size are immediately appended to the temporary cell numbering
925 auto group_it = grouped_cells.begin();
926 for (const unsigned int length : n_cells_per_group)
927 if (length < n_cells_per_parent)
928 for (unsigned int j = 0; j < length; ++j)
929 other_cells.push_back((group_it++)->second);
930 else
931 {
932 // we should not have more cells in a group than in the first
933 // check we did above
934 AssertDimension(length, n_cells_per_parent);
935 for (unsigned int j = 0; j < length; ++j)
936 temporary_numbering.push_back((group_it++)->second);
937 }
938
939 // Sort the remaining cells and append them as well
940 std::sort(other_cells.begin(), other_cells.end());
941 temporary_numbering.insert(temporary_numbering.end(),
942 other_cells.begin(),
943 other_cells.end());
944
945 while (temporary_numbering.size() % n_lanes != 0)
946 temporary_numbering.push_back(numbers::invalid_unsigned_int);
947
948 category_size.push_back(temporary_numbering.size());
949 }
950
951 // Step 4: Identify the batches with cells marked as "comm"
952 std::vector<bool> batch_with_comm(temporary_numbering.size() / n_lanes,
953 false);
954 std::vector<unsigned int> temporary_numbering_inverse(n_active_cells);
955 for (unsigned int i = 0; i < temporary_numbering.size(); ++i)
956 if (temporary_numbering[i] != numbers::invalid_unsigned_int)
957 temporary_numbering_inverse[temporary_numbering[i]] = i;
958 for (const unsigned int cell : cells_with_comm)
959 batch_with_comm[temporary_numbering_inverse[cell] / n_lanes] = true;
960
961 // Step 5: Sort the batches of cells by their last cell index to get
962 // good locality, assuming that the initial cell order is of good
963 // locality. In case we have hp-calculations with categories, we need to
964 // sort also by the category.
965 std::vector<std::array<unsigned int, 3>> batch_order;
966 std::vector<std::array<unsigned int, 3>> batch_order_comm;
967 for (unsigned int i = 0; i < temporary_numbering.size(); i += n_lanes)
968 {
969 unsigned int max_index = 0;
970 for (unsigned int j = 0; j < n_lanes; ++j)
971 if (temporary_numbering[i + j] < numbers::invalid_unsigned_int)
972 max_index = std::max(temporary_numbering[i + j], max_index);
973 const unsigned int category_hp =
974 categories_are_hp ?
975 std::upper_bound(category_size.begin(), category_size.end(), i) -
976 category_size.begin() :
977 0;
978 const std::array<unsigned int, 3> next{{category_hp, max_index, i}};
979 if (batch_with_comm[i / n_lanes])
980 batch_order_comm.emplace_back(next);
981 else
982 batch_order.emplace_back(next);
983 }
984
985 std::sort(batch_order.begin(), batch_order.end());
986 std::sort(batch_order_comm.begin(), batch_order_comm.end());
987
988 // Step 6: Put the cells with communication in the middle of the cell
989 // range. For the MPI case, we need three groups to enable overlap for
990 // communication and computation (part before comm, part with comm, part
991 // after comm), whereas we need one for the other case. And in each
992 // case, we allow for a slot of "ghosted" cells.
993 std::vector<unsigned int> blocks;
994 if (n_procs == 1)
995 {
996 if (batch_order.empty())
997 std::swap(batch_order_comm, batch_order);
998 Assert(batch_order_comm.empty(), ExcInternalError());
999 partition_row_index.resize(3);
1000 blocks = {0, static_cast<unsigned int>(batch_order.size())};
1001 }
1002 else
1003 {
1004 partition_row_index.resize(5);
1005 const unsigned int comm_begin = batch_order.size() / 2;
1006 batch_order.insert(batch_order.begin() + comm_begin,
1007 batch_order_comm.begin(),
1008 batch_order_comm.end());
1009 const unsigned int comm_end = comm_begin + batch_order_comm.size();
1010 const unsigned int end = batch_order.size();
1011 blocks = {0, comm_begin, comm_end, end};
1012 }
1013
1014 // Step 7: sort ghost cells according to the category
1015 std::vector<std::array<unsigned int, 2>> tight_category_map_ghost;
1016
1017 if (cell_vectorization_categories.empty() == false)
1018 {
1019 tight_category_map_ghost.reserve(n_ghost_cells);
1020
1021 std::set<unsigned int> used_categories;
1022 for (unsigned int i = 0; i < n_ghost_cells; ++i)
1023 used_categories.insert(
1024 cell_vectorization_categories[i + n_active_cells]);
1025
1026 std::vector<unsigned int> used_categories_vector(
1027 used_categories.size());
1028 n_categories = 0;
1029 for (const auto &it : used_categories)
1030 used_categories_vector[n_categories++] = it;
1031
1032 std::vector<unsigned int> counters(n_categories, 0);
1033
1034 for (unsigned int i = 0; i < n_ghost_cells; ++i)
1035 {
1036 const unsigned int index =
1037 std::lower_bound(
1038 used_categories_vector.begin(),
1039 used_categories_vector.end(),
1040 cell_vectorization_categories[i + n_active_cells]) -
1041 used_categories_vector.begin();
1042 AssertIndexRange(index, used_categories_vector.size());
1043 tight_category_map_ghost.emplace_back(
1044 std::array<unsigned int, 2>{{index, i}});
1045
1046 // account for padding in the hp and strict case
1047 if (categories_are_hp || cell_vectorization_categories_strict)
1048 counters[index]++;
1049 }
1050
1051 // insert padding
1052 for (unsigned int i = 0; i < counters.size(); ++i)
1053 if (counters[i] % n_lanes != 0)
1054 for (unsigned int j = counters[i] % n_lanes; j < n_lanes; ++j)
1055 tight_category_map_ghost.emplace_back(
1056 std::array<unsigned int, 2>{
1057 {i, numbers::invalid_unsigned_int}});
1058
1059 std::sort(tight_category_map_ghost.begin(),
1060 tight_category_map_ghost.end());
1061 }
1062
1063 // Step 8: Fill in the data by batches for the locally owned cells.
1064 const unsigned int n_cell_batches = batch_order.size();
1065 const unsigned int n_ghost_batches =
1066 ((tight_category_map_ghost.empty() ? n_ghost_cells :
1067 tight_category_map_ghost.size()) +
1068 n_lanes - 1) /
1069 n_lanes;
1070 incompletely_filled_vectorization.resize(n_cell_batches +
1071 n_ghost_batches);
1072
1073 cell_partition_data.clear();
1074 cell_partition_data.resize(1, 0);
1075
1076 renumbering.clear();
1077 renumbering.resize(n_active_cells + n_ghost_cells,
1079
1080 unsigned int counter = 0;
1081 for (unsigned int block = 0; block < blocks.size() - 1; ++block)
1082 {
1083 const unsigned int grain_size =
1084 std::max((2048U / dofs_per_cell) / 8 * 4, 2U);
1085 for (unsigned int k = blocks[block]; k < blocks[block + 1];
1086 k += grain_size)
1087 cell_partition_data.push_back(
1088 std::min(k + grain_size, blocks[block + 1]));
1089 partition_row_index[block + 1] = cell_partition_data.size() - 1;
1090
1091 // Set the numbering according to the reordered temporary one
1092 for (unsigned int k = blocks[block]; k < blocks[block + 1]; ++k)
1093 {
1094 const unsigned int pos = batch_order[k][2];
1095 unsigned int j = 0;
1096 for (; j < n_lanes && temporary_numbering[pos + j] !=
1098 ++j)
1099 renumbering[counter++] = temporary_numbering[pos + j];
1100 if (j < n_lanes)
1101 incompletely_filled_vectorization[k] = j;
1102 }
1103 }
1104 AssertDimension(counter, n_active_cells);
1105
1106 // Step 9: Treat the ghost cells
1107 if (tight_category_map_ghost.empty())
1108 {
1109 for (unsigned int cell = 0; cell < n_ghost_cells; ++cell)
1110 renumbering[n_active_cells + cell] = n_active_cells + cell;
1111
1112 if ((n_ghost_cells % n_lanes) != 0u)
1113 incompletely_filled_vectorization.back() = n_ghost_cells % n_lanes;
1114 }
1115 else
1116 {
1117 for (unsigned int k = 0, ptr = 0; k < n_ghost_batches;
1118 ++k, ptr += n_lanes)
1119 {
1120 unsigned int j = 0;
1121
1122 for (;
1123 j < n_lanes && (ptr + j < tight_category_map_ghost.size()) &&
1124 (tight_category_map_ghost[ptr + j][1] !=
1126 ++j)
1127 renumbering[counter++] =
1128 n_active_cells + tight_category_map_ghost[ptr + j][1];
1129
1130 if (j < n_lanes)
1131 incompletely_filled_vectorization[n_cell_batches + k] = j;
1132 }
1133
1134 AssertDimension(counter, n_active_cells + n_ghost_cells);
1135 }
1136
1137 cell_partition_data.push_back(n_cell_batches + n_ghost_batches);
1138 partition_row_index.back() = cell_partition_data.size() - 1;
1139
1140#ifdef DEBUG
1141 std::vector<unsigned int> renumber_cpy(renumbering);
1142 std::sort(renumber_cpy.begin(), renumber_cpy.end());
1143 for (unsigned int i = 0; i < renumber_cpy.size(); ++i)
1144 AssertDimension(i, renumber_cpy[i]);
1145#endif
1146 }
1147
1148
1149
1150 void
1151 TaskInfo::initial_setup_blocks_tasks(
1152 const std::vector<unsigned int> &boundary_cells,
1153 std::vector<unsigned int> & renumbering,
1154 std::vector<unsigned char> & incompletely_filled_vectorization)
1155 {
1156 const unsigned int n_cell_batches =
1157 (n_active_cells + vectorization_length - 1) / vectorization_length;
1158 const unsigned int n_ghost_slots =
1159 (n_ghost_cells + vectorization_length - 1) / vectorization_length;
1160 incompletely_filled_vectorization.resize(n_cell_batches + n_ghost_slots);
1161 if (n_cell_batches * vectorization_length > n_active_cells)
1162 incompletely_filled_vectorization[n_cell_batches - 1] =
1163 vectorization_length -
1164 (n_cell_batches * vectorization_length - n_active_cells);
1165 if (n_ghost_slots * vectorization_length > n_ghost_cells)
1166 incompletely_filled_vectorization[n_cell_batches + n_ghost_slots - 1] =
1167 vectorization_length -
1168 (n_ghost_slots * vectorization_length - n_ghost_cells);
1169
1170 std::vector<unsigned int> reverse_numbering(
1171 n_active_cells, numbers::invalid_unsigned_int);
1172 for (unsigned int j = 0; j < boundary_cells.size(); ++j)
1173 reverse_numbering[boundary_cells[j]] = j;
1174 unsigned int counter = boundary_cells.size();
1175 for (unsigned int j = 0; j < n_active_cells; ++j)
1176 if (reverse_numbering[j] == numbers::invalid_unsigned_int)
1177 reverse_numbering[j] = counter++;
1178
1179 AssertDimension(counter, n_active_cells);
1180 renumbering = Utilities::invert_permutation(reverse_numbering);
1181
1182 for (unsigned int j = n_active_cells; j < n_active_cells + n_ghost_cells;
1183 ++j)
1184 renumbering.push_back(j);
1185
1186 // TODO: might be able to simplify this code by not relying on the cell
1187 // partition data while computing the thread graph
1188 cell_partition_data.clear();
1189 cell_partition_data.push_back(0);
1190 if (n_procs > 1)
1191 {
1192 const unsigned int n_macro_boundary_cells =
1193 (boundary_cells.size() + vectorization_length - 1) /
1194 vectorization_length;
1195 cell_partition_data.push_back(
1196 (n_cell_batches - n_macro_boundary_cells) / 2);
1197 cell_partition_data.push_back(cell_partition_data[1] +
1198 n_macro_boundary_cells);
1199 }
1200 else
1201 AssertDimension(boundary_cells.size(), 0);
1202 cell_partition_data.push_back(n_cell_batches);
1203 cell_partition_data.push_back(cell_partition_data.back() + n_ghost_slots);
1204 partition_row_index.resize(n_procs > 1 ? 4 : 2);
1205 partition_row_index[0] = 0;
1206 partition_row_index[1] = 1;
1207 if (n_procs > 1)
1208 {
1209 partition_row_index[2] = 2;
1210 partition_row_index[3] = 3;
1211 }
1212 }
1213
1214
1215
1216 void
1217 TaskInfo::guess_block_size(const unsigned int dofs_per_cell)
1218 {
1219 // user did not say a positive number, so we have to guess
1220 if (block_size == 0)
1221 {
1222 // we would like to have enough work to do, so as first guess, try
1223 // to get 16 times as many chunks as we have threads on the system.
1224 block_size = n_active_cells / (MultithreadInfo::n_threads() * 16 *
1225 vectorization_length);
1226
1227 // if there are too few degrees of freedom per cell, need to
1228 // increase the block size
1229 const unsigned int minimum_parallel_grain_size = 200;
1230 if (dofs_per_cell * block_size < minimum_parallel_grain_size)
1231 block_size = (minimum_parallel_grain_size / dofs_per_cell + 1);
1232 if (dofs_per_cell * block_size > 10000)
1233 block_size /= 4;
1234
1235 block_size =
1236 1 << static_cast<unsigned int>(std::log2(block_size + 1));
1237 }
1238 if (block_size > n_active_cells)
1239 block_size = std::max(1U, n_active_cells);
1240 }
1241
1242
1243
1244 void
1245 TaskInfo::make_thread_graph_partition_color(
1246 DynamicSparsityPattern & connectivity_large,
1247 std::vector<unsigned int> & renumbering,
1248 std::vector<unsigned char> &irregular_cells,
1249 const bool)
1250 {
1251 const unsigned int n_cell_batches = *(cell_partition_data.end() - 2);
1252 if (n_cell_batches == 0)
1253 return;
1254
1255 Assert(vectorization_length > 0, ExcInternalError());
1256
1257 unsigned int partition = 0, counter = 0;
1258
1259 // Create connectivity graph for blocks based on connectivity graph for
1260 // cells.
1261 DynamicSparsityPattern connectivity(n_blocks, n_blocks);
1262 make_connectivity_cells_to_blocks(irregular_cells,
1263 connectivity_large,
1264 connectivity);
1265
1266 // Create cell-block partitioning.
1267
1268 // For each block of cells, this variable saves to which partitions the
1269 // block belongs. Initialize all to -1 to mark them as not yet assigned
1270 // a partition.
1271 std::vector<unsigned int> cell_partition(n_blocks,
1273
1274 // In element j of this variable, one puts the old number of the block
1275 // that should be the jth block in the new numeration.
1276 std::vector<unsigned int> partition_list(n_blocks, 0);
1277 std::vector<unsigned int> partition_color_list(n_blocks, 0);
1278
1279 // This vector points to the start of each partition.
1280 std::vector<unsigned int> partition_size(2, 0);
1281
1282 // blocking_connectivity = true;
1283
1284 // The cluster_size in make_partitioning defines that the no. of cells
1285 // in each partition should be a multiple of cluster_size.
1286 unsigned int cluster_size = 1;
1287
1288 // Make the partitioning of the first layer of the blocks of cells.
1289 make_partitioning(connectivity,
1290 cluster_size,
1291 cell_partition,
1292 partition_list,
1293 partition_size,
1294 partition);
1295
1296 // Color the cells within each partition
1297 make_coloring_within_partitions_pre_blocked(connectivity,
1298 partition,
1299 cell_partition,
1300 partition_list,
1301 partition_size,
1302 partition_color_list);
1303
1304 partition_list = renumbering;
1305
1306#ifdef DEBUG
1307 // in debug mode, check that the partition color list is one-to-one
1308 {
1309 std::vector<unsigned int> sorted_pc_list(partition_color_list);
1310 std::sort(sorted_pc_list.begin(), sorted_pc_list.end());
1311 for (unsigned int i = 0; i < sorted_pc_list.size(); ++i)
1312 Assert(sorted_pc_list[i] == i, ExcInternalError());
1313 }
1314#endif
1315
1316 // set the start list for each block and compute the renumbering of
1317 // cells
1318 std::vector<unsigned int> block_start(n_cell_batches + 1);
1319 std::vector<unsigned char> irregular(n_cell_batches);
1320
1321 unsigned int mcell_start = 0;
1322 block_start[0] = 0;
1323 for (unsigned int block = 0; block < n_blocks; ++block)
1324 {
1325 block_start[block + 1] = block_start[block];
1326 for (unsigned int mcell = mcell_start;
1327 mcell < std::min(mcell_start + block_size, n_cell_batches);
1328 ++mcell)
1329 {
1330 unsigned int n_comp = (irregular_cells[mcell] > 0) ?
1331 irregular_cells[mcell] :
1332 vectorization_length;
1333 block_start[block + 1] += n_comp;
1334 ++counter;
1335 }
1336 mcell_start += block_size;
1337 }
1338 counter = 0;
1339 unsigned int counter_macro = 0;
1340 unsigned int block_size_last =
1341 n_cell_batches - block_size * (n_blocks - 1);
1342 if (block_size_last == 0)
1343 block_size_last = block_size;
1344
1345 unsigned int tick = 0;
1346 for (unsigned int block = 0; block < n_blocks; ++block)
1347 {
1348 unsigned int present_block = partition_color_list[block];
1349 for (unsigned int cell = block_start[present_block];
1350 cell < block_start[present_block + 1];
1351 ++cell)
1352 renumbering[counter++] = partition_list[cell];
1353 unsigned int this_block_size =
1354 (present_block == n_blocks - 1) ? block_size_last : block_size;
1355
1356 // Also re-compute the content of cell_partition_data to
1357 // contain the numbers of cells, not blocks
1358 if (cell_partition_data[tick] == block)
1359 cell_partition_data[tick++] = counter_macro;
1360
1361 for (unsigned int j = 0; j < this_block_size; ++j)
1362 irregular[counter_macro++] =
1363 irregular_cells[present_block * block_size + j];
1364 }
1365 AssertDimension(tick + 1, cell_partition_data.size());
1366 cell_partition_data.back() = counter_macro;
1367
1368 irregular_cells.swap(irregular);
1369 AssertDimension(counter, n_active_cells);
1370 AssertDimension(counter_macro, n_cell_batches);
1371
1372 // check that the renumbering is one-to-one
1373#ifdef DEBUG
1374 {
1375 std::vector<unsigned int> sorted_renumbering(renumbering);
1376 std::sort(sorted_renumbering.begin(), sorted_renumbering.end());
1377 for (unsigned int i = 0; i < sorted_renumbering.size(); ++i)
1378 Assert(sorted_renumbering[i] == i, ExcInternalError());
1379 }
1380#endif
1381
1382
1383 update_task_info(
1384 partition); // Actually sets too much for partition color case
1385
1386 AssertDimension(cell_partition_data.back(), n_cell_batches);
1387 }
1388
1389
1390
1391 void
1392 TaskInfo::make_thread_graph(
1393 const std::vector<unsigned int> &cell_active_fe_index,
1394 DynamicSparsityPattern & connectivity,
1395 std::vector<unsigned int> & renumbering,
1396 std::vector<unsigned char> & irregular_cells,
1397 const bool hp_bool)
1398 {
1399 const unsigned int n_cell_batches = *(cell_partition_data.end() - 2);
1400 if (n_cell_batches == 0)
1401 return;
1402
1403 Assert(vectorization_length > 0, ExcInternalError());
1404
1405 // if we want to block before partitioning, create connectivity graph
1406 // for blocks based on connectivity graph for cells.
1407 DynamicSparsityPattern connectivity_blocks(n_blocks, n_blocks);
1408 make_connectivity_cells_to_blocks(irregular_cells,
1409 connectivity,
1410 connectivity_blocks);
1411
1412 unsigned int n_blocks = 0;
1413 if (scheme == partition_color ||
1414 scheme == color) // blocking_connectivity == true
1415 n_blocks = this->n_blocks;
1416 else
1417 n_blocks = n_active_cells;
1418
1419 // For each block of cells, this variable saves to which partitions the
1420 // block belongs. Initialize all to -1 to mark them as not yet assigned
1421 // a partition.
1422 std::vector<unsigned int> cell_partition(n_blocks,
1424
1425 // In element j of this variable, one puts the old number (but after
1426 // renumbering according to the input renumbering) of the block that
1427 // should be the jth block in the new numeration.
1428 std::vector<unsigned int> partition_list(n_blocks, 0);
1429 std::vector<unsigned int> partition_2layers_list(n_blocks, 0);
1430
1431 // This vector points to the start of each partition.
1432 std::vector<unsigned int> partition_size(2, 0);
1433
1434 unsigned int partition = 0;
1435
1436 // Within the partitions we want to be able to block for the case that
1437 // we do not block already in the connectivity. The cluster_size in
1438 // make_partitioning defines that the no. of cells in each partition
1439 // should be a multiple of cluster_size.
1440 unsigned int cluster_size = 1;
1441 if (scheme == partition_partition)
1442 cluster_size = block_size * vectorization_length;
1443
1444 // Make the partitioning of the first layer of the blocks of cells.
1445 if (scheme == partition_color || scheme == color)
1446 make_partitioning(connectivity_blocks,
1447 cluster_size,
1448 cell_partition,
1449 partition_list,
1450 partition_size,
1451 partition);
1452 else
1453 make_partitioning(connectivity,
1454 cluster_size,
1455 cell_partition,
1456 partition_list,
1457 partition_size,
1458 partition);
1459
1460 // Partition or color second layer
1461 if (scheme == partition_partition)
1462
1463 {
1464 // Partition within partitions.
1465 make_partitioning_within_partitions_post_blocked(
1466 connectivity,
1467 cell_active_fe_index,
1468 partition,
1469 cluster_size,
1470 hp_bool,
1471 cell_partition,
1472 partition_list,
1473 partition_size,
1474 partition_2layers_list,
1475 irregular_cells);
1476 }
1477 else if (scheme == partition_color || scheme == color)
1478 {
1479 make_coloring_within_partitions_pre_blocked(connectivity_blocks,
1480 partition,
1481 cell_partition,
1482 partition_list,
1483 partition_size,
1484 partition_2layers_list);
1485 }
1486
1487 // in debug mode, check that the partition_2layers_list is one-to-one
1488#ifdef DEBUG
1489 {
1490 std::vector<unsigned int> sorted_pc_list(partition_2layers_list);
1491 std::sort(sorted_pc_list.begin(), sorted_pc_list.end());
1492 for (unsigned int i = 0; i < sorted_pc_list.size(); ++i)
1493 Assert(sorted_pc_list[i] == i, ExcInternalError());
1494 }
1495#endif
1496
1497 // Set the new renumbering
1498 std::vector<unsigned int> renumbering_in(n_active_cells, 0);
1499 renumbering_in.swap(renumbering);
1500 if (scheme == partition_partition) // blocking_connectivity == false
1501 {
1502 // This is the simple case. The renumbering is just a combination of
1503 // the renumbering that we were given as an input and the
1504 // renumbering of partition/coloring given in partition_2layers_list
1505 for (unsigned int j = 0; j < renumbering.size(); ++j)
1506 renumbering[j] = renumbering_in[partition_2layers_list[j]];
1507 // Account for the ghost cells, finally.
1508 for (unsigned int i = 0; i < n_ghost_cells; ++i)
1509 renumbering.push_back(i + n_active_cells);
1510 }
1511 else
1512 {
1513 // set the start list for each block and compute the renumbering of
1514 // cells
1515 std::vector<unsigned int> block_start(n_cell_batches + 1);
1516 std::vector<unsigned char> irregular(n_cell_batches);
1517
1518 unsigned int counter = 0;
1519 unsigned int mcell_start = 0;
1520 block_start[0] = 0;
1521 for (unsigned int block = 0; block < n_blocks; ++block)
1522 {
1523 block_start[block + 1] = block_start[block];
1524 for (unsigned int mcell = mcell_start;
1525 mcell < std::min(mcell_start + block_size, n_cell_batches);
1526 ++mcell)
1527 {
1528 unsigned int n_comp = (irregular_cells[mcell] > 0) ?
1529 irregular_cells[mcell] :
1530 vectorization_length;
1531 block_start[block + 1] += n_comp;
1532 ++counter;
1533 }
1534 mcell_start += block_size;
1535 }
1536 counter = 0;
1537 unsigned int counter_macro = 0;
1538 unsigned int block_size_last =
1539 n_cell_batches - block_size * (n_blocks - 1);
1540 if (block_size_last == 0)
1541 block_size_last = block_size;
1542
1543 unsigned int tick = 0;
1544 for (unsigned int block = 0; block < n_blocks; ++block)
1545 {
1546 unsigned int present_block = partition_2layers_list[block];
1547 for (unsigned int cell = block_start[present_block];
1548 cell < block_start[present_block + 1];
1549 ++cell)
1550 renumbering[counter++] = renumbering_in[cell];
1551 unsigned int this_block_size =
1552 (present_block == n_blocks - 1) ? block_size_last : block_size;
1553
1554 // Also re-compute the content of cell_partition_data to
1555 // contain the numbers of cells, not blocks
1556 if (cell_partition_data[tick] == block)
1557 cell_partition_data[tick++] = counter_macro;
1558
1559 for (unsigned int j = 0; j < this_block_size; ++j)
1560 irregular[counter_macro++] =
1561 irregular_cells[present_block * block_size + j];
1562 }
1563 AssertDimension(tick + 1, cell_partition_data.size());
1564 cell_partition_data.back() = counter_macro;
1565
1566 irregular_cells.swap(irregular);
1567 AssertDimension(counter, n_active_cells);
1568 AssertDimension(counter_macro, n_cell_batches);
1569 // check that the renumbering is one-to-one
1570#ifdef DEBUG
1571 {
1572 std::vector<unsigned int> sorted_renumbering(renumbering);
1573 std::sort(sorted_renumbering.begin(), sorted_renumbering.end());
1574 for (unsigned int i = 0; i < sorted_renumbering.size(); ++i)
1575 Assert(sorted_renumbering[i] == i, ExcInternalError());
1576 }
1577#endif
1578 }
1579
1580 // Update the task_info with the more information for the thread graph.
1581 update_task_info(partition);
1582 }
1583
1584
1585
1586 void
1587 TaskInfo::make_thread_graph_partition_partition(
1588 const std::vector<unsigned int> &cell_active_fe_index,
1589 DynamicSparsityPattern & connectivity,
1590 std::vector<unsigned int> & renumbering,
1591 std::vector<unsigned char> & irregular_cells,
1592 const bool hp_bool)
1593 {
1594 const unsigned int n_cell_batches = *(cell_partition_data.end() - 2);
1595 if (n_cell_batches == 0)
1596 return;
1597
1598 const unsigned int cluster_size = block_size * vectorization_length;
1599
1600 // Create cell-block partitioning.
1601
1602 // For each block of cells, this variable saves to which partitions the
1603 // block belongs. Initialize all to n_cell_batches to mark them as not
1604 // yet assigned a partition.
1605 std::vector<unsigned int> cell_partition(n_active_cells,
1607
1608
1609 // In element j of this variable, one puts the old number of the block
1610 // that should be the jth block in the new numeration.
1611 std::vector<unsigned int> partition_list(n_active_cells, 0);
1612 std::vector<unsigned int> partition_partition_list(n_active_cells, 0);
1613
1614 // This vector points to the start of each partition.
1615 std::vector<unsigned int> partition_size(2, 0);
1616
1617 unsigned int partition = 0;
1618 // Here, we do not block inside the connectivity graph
1619 // blocking_connectivity = false;
1620
1621 // Make the partitioning of the first layer of the blocks of cells.
1622 make_partitioning(connectivity,
1623 cluster_size,
1624 cell_partition,
1625 partition_list,
1626 partition_size,
1627 partition);
1628
1629 // Partition within partitions.
1630 make_partitioning_within_partitions_post_blocked(connectivity,
1631 cell_active_fe_index,
1632 partition,
1633 cluster_size,
1634 hp_bool,
1635 cell_partition,
1636 partition_list,
1637 partition_size,
1638 partition_partition_list,
1639 irregular_cells);
1640
1641 partition_list.swap(renumbering);
1642
1643 for (unsigned int j = 0; j < renumbering.size(); ++j)
1644 renumbering[j] = partition_list[partition_partition_list[j]];
1645
1646 for (unsigned int i = 0; i < n_ghost_cells; ++i)
1647 renumbering.push_back(i + n_active_cells);
1648
1649 update_task_info(partition);
1650 }
1651
1652
1653
1654 void
1655 TaskInfo::make_connectivity_cells_to_blocks(
1656 const std::vector<unsigned char> &irregular_cells,
1657 const DynamicSparsityPattern & connectivity_cells,
1658 DynamicSparsityPattern & connectivity_blocks) const
1659 {
1660 std::vector<std::vector<unsigned int>> cell_blocks(n_blocks);
1661 std::vector<unsigned int> touched_cells(n_active_cells);
1662 unsigned int cell = 0;
1663 for (unsigned int i = 0, mcell = 0; i < n_blocks; ++i)
1664 {
1665 for (unsigned int c = 0;
1666 c < block_size && mcell < *(cell_partition_data.end() - 2);
1667 ++c, ++mcell)
1668 {
1669 unsigned int ncomp = (irregular_cells[mcell] > 0) ?
1670 irregular_cells[mcell] :
1671 vectorization_length;
1672 for (unsigned int c = 0; c < ncomp; ++c, ++cell)
1673 {
1674 cell_blocks[i].push_back(cell);
1675 touched_cells[cell] = i;
1676 }
1677 }
1678 }
1679 AssertDimension(cell, n_active_cells);
1680 for (unsigned int i = 0; i < cell_blocks.size(); ++i)
1681 for (unsigned int col = 0; col < cell_blocks[i].size(); ++col)
1682 {
1684 connectivity_cells.begin(cell_blocks[i][col]);
1685 it != connectivity_cells.end(cell_blocks[i][col]);
1686 ++it)
1687 {
1688 if (touched_cells[it->column()] != i)
1689 connectivity_blocks.add(i, touched_cells[it->column()]);
1690 }
1691 }
1692 }
1693
1694
1695
1696 // Function to create partitioning on the second layer within each
1697 // partition. Version without preblocking.
1698 void
1699 TaskInfo::make_partitioning_within_partitions_post_blocked(
1700 const DynamicSparsityPattern & connectivity,
1701 const std::vector<unsigned int> &cell_active_fe_index,
1702 const unsigned int partition,
1703 const unsigned int cluster_size,
1704 const bool hp_bool,
1705 const std::vector<unsigned int> &cell_partition,
1706 const std::vector<unsigned int> &partition_list,
1707 const std::vector<unsigned int> &partition_size,
1708 std::vector<unsigned int> & partition_partition_list,
1709 std::vector<unsigned char> & irregular_cells)
1710 {
1711 const unsigned int n_cell_batches = *(cell_partition_data.end() - 2);
1712 const unsigned int n_ghost_slots =
1713 *(cell_partition_data.end() - 1) - n_cell_batches;
1714
1715 // List of cells in previous partition
1716 std::vector<unsigned int> neighbor_list;
1717 // List of cells in current partition for use as neighbors in next
1718 // partition
1719 std::vector<unsigned int> neighbor_neighbor_list;
1720
1721 std::vector<unsigned int> renumbering(n_active_cells);
1722
1723 irregular_cells.back() = 0;
1724 irregular_cells.resize(n_active_cells + n_ghost_slots);
1725
1726 unsigned int max_fe_index = 0;
1727 for (const unsigned int fe_index : cell_active_fe_index)
1728 max_fe_index = std::max(fe_index, max_fe_index);
1729
1730 Assert(!hp_bool || cell_active_fe_index.size() == n_active_cells,
1732
1733 {
1734 unsigned int n_cell_batches_before = 0;
1735 // Create partitioning within partitions.
1736
1737 // For each block of cells, this variable saves to which partitions
1738 // the block belongs. Initialize all to n_cell_batches to mark them as
1739 // not yet assigned a partition.
1740 std::vector<unsigned int> cell_partition_l2(
1741 n_active_cells, numbers::invalid_unsigned_int);
1742 partition_row_index.clear();
1743 partition_row_index.resize(partition + 1, 0);
1744 cell_partition_data.resize(1, 0);
1745
1746 unsigned int counter = 0;
1747 unsigned int missing_macros;
1748 for (unsigned int part = 0; part < partition; ++part)
1749 {
1750 neighbor_neighbor_list.resize(0);
1751 neighbor_list.resize(0);
1752 bool work = true;
1753 unsigned int partition_l2 = 0;
1754 unsigned int start_up = partition_size[part];
1755 unsigned int partition_counter = 0;
1756 while (work)
1757 {
1758 if (neighbor_list.size() == 0)
1759 {
1760 work = false;
1761 partition_counter = 0;
1762 for (unsigned int j = start_up;
1763 j < partition_size[part + 1];
1764 ++j)
1765 if (cell_partition[partition_list[j]] == part &&
1766 cell_partition_l2[partition_list[j]] ==
1768 {
1769 start_up = j;
1770 work = true;
1771 partition_counter = 1;
1772 // To start up, set the start_up cell to partition
1773 // and list all its neighbors.
1774 AssertIndexRange(start_up, partition_size[part + 1]);
1775 cell_partition_l2[partition_list[start_up]] =
1776 partition_l2;
1777 neighbor_neighbor_list.push_back(
1778 partition_list[start_up]);
1779 partition_partition_list[counter++] =
1780 partition_list[start_up];
1781 start_up++;
1782 break;
1783 }
1784 }
1785 else
1786 {
1787 partition_counter = 0;
1788 for (const unsigned int neighbor : neighbor_list)
1789 {
1790 Assert(cell_partition[neighbor] == part,
1792 Assert(cell_partition_l2[neighbor] == partition_l2 - 1,
1794 auto neighbor_it = connectivity.begin(neighbor);
1795 const auto end_it = connectivity.end(neighbor);
1796 for (; neighbor_it != end_it; ++neighbor_it)
1797 {
1798 if (cell_partition[neighbor_it->column()] == part &&
1799 cell_partition_l2[neighbor_it->column()] ==
1801 {
1802 cell_partition_l2[neighbor_it->column()] =
1803 partition_l2;
1804 neighbor_neighbor_list.push_back(
1805 neighbor_it->column());
1806 partition_partition_list[counter++] =
1807 neighbor_it->column();
1808 partition_counter++;
1809 }
1810 }
1811 }
1812 }
1813 if (partition_counter > 0)
1814 {
1815 int index_before = neighbor_neighbor_list.size(),
1816 index = index_before;
1817 {
1818 // put the cells into separate lists for each FE index
1819 // within one partition-partition
1820 missing_macros = 0;
1821 std::vector<unsigned int> remaining_per_cell_batch(
1822 max_fe_index + 1);
1823 std::vector<std::vector<unsigned int>>
1824 renumbering_fe_index;
1825 unsigned int cell;
1826 bool filled = true;
1827 if (hp_bool == true)
1828 {
1829 renumbering_fe_index.resize(max_fe_index + 1);
1830 for (cell = counter - partition_counter;
1831 cell < counter;
1832 ++cell)
1833 {
1834 renumbering_fe_index
1835 [cell_active_fe_index.empty() ?
1836 0 :
1837 cell_active_fe_index
1838 [partition_partition_list[cell]]]
1839 .push_back(partition_partition_list[cell]);
1840 }
1841 // check how many more cells are needed in the lists
1842 for (unsigned int j = 0; j < max_fe_index + 1; ++j)
1843 {
1844 remaining_per_cell_batch[j] =
1845 renumbering_fe_index[j].size() %
1846 vectorization_length;
1847 if (remaining_per_cell_batch[j] != 0)
1848 filled = false;
1849 missing_macros +=
1850 ((renumbering_fe_index[j].size() +
1851 vectorization_length - 1) /
1852 vectorization_length);
1853 }
1854 }
1855 else
1856 {
1857 remaining_per_cell_batch.resize(1);
1858 remaining_per_cell_batch[0] =
1859 partition_counter % vectorization_length;
1860 missing_macros =
1861 partition_counter / vectorization_length;
1862 if (remaining_per_cell_batch[0] != 0)
1863 {
1864 filled = false;
1865 missing_macros++;
1866 }
1867 }
1868 missing_macros =
1869 cluster_size - (missing_macros % cluster_size);
1870
1871 // now we realized that there are some cells missing.
1872 while (missing_macros > 0 || filled == false)
1873 {
1874 if (index == 0)
1875 {
1876 index = neighbor_neighbor_list.size();
1877 if (index == index_before)
1878 {
1879 if (missing_macros != 0)
1880 {
1881 neighbor_neighbor_list.resize(0);
1882 }
1883 start_up--;
1884 break; // not connected - start again
1885 }
1886 index_before = index;
1887 }
1888 index--;
1889 unsigned int additional =
1890 neighbor_neighbor_list[index];
1891
1892 // go through the neighbors of the last cell in the
1893 // current partition and check if we find some to
1894 // fill up with.
1896 connectivity.begin(
1897 additional),
1898 end =
1899 connectivity.end(
1900 additional);
1901 for (; neighbor != end; ++neighbor)
1902 {
1903 if (cell_partition[neighbor->column()] == part &&
1904 cell_partition_l2[neighbor->column()] ==
1906 {
1907 unsigned int this_index = 0;
1908 if (hp_bool == true)
1909 this_index =
1910 cell_active_fe_index.empty() ?
1911 0 :
1912 cell_active_fe_index[neighbor
1913 ->column()];
1914
1915 // Only add this cell if we need more macro
1916 // cells in the current block or if there is
1917 // a macro cell with the FE index that is
1918 // not yet fully populated
1919 if (missing_macros > 0 ||
1920 remaining_per_cell_batch[this_index] > 0)
1921 {
1922 cell_partition_l2[neighbor->column()] =
1923 partition_l2;
1924 neighbor_neighbor_list.push_back(
1925 neighbor->column());
1926 if (hp_bool == true)
1927 renumbering_fe_index[this_index]
1928 .push_back(neighbor->column());
1929 partition_partition_list[counter] =
1930 neighbor->column();
1931 counter++;
1932 partition_counter++;
1933 if (remaining_per_cell_batch
1934 [this_index] == 0 &&
1935 missing_macros > 0)
1936 missing_macros--;
1937 remaining_per_cell_batch[this_index]++;
1938 if (remaining_per_cell_batch
1939 [this_index] ==
1940 vectorization_length)
1941 {
1942 remaining_per_cell_batch[this_index] =
1943 0;
1944 }
1945 if (missing_macros == 0)
1946 {
1947 filled = true;
1948 for (unsigned int fe_ind = 0;
1949 fe_ind < max_fe_index + 1;
1950 ++fe_ind)
1951 if (remaining_per_cell_batch
1952 [fe_ind] != 0)
1953 filled = false;
1954 }
1955 if (filled == true)
1956 break;
1957 }
1958 }
1959 }
1960 }
1961 if (hp_bool == true)
1962 {
1963 // set the renumbering according to their active FE
1964 // index within one partition-partition which was
1965 // implicitly assumed above
1966 cell = counter - partition_counter;
1967 for (unsigned int j = 0; j < max_fe_index + 1; ++j)
1968 {
1969 for (const unsigned int jj :
1970 renumbering_fe_index[j])
1971 renumbering[cell++] = jj;
1972 if (renumbering_fe_index[j].size() %
1973 vectorization_length !=
1974 0)
1975 irregular_cells[renumbering_fe_index[j].size() /
1976 vectorization_length +
1977 n_cell_batches_before] =
1978 renumbering_fe_index[j].size() %
1979 vectorization_length;
1980 n_cell_batches_before +=
1981 (renumbering_fe_index[j].size() +
1982 vectorization_length - 1) /
1983 vectorization_length;
1984 renumbering_fe_index[j].resize(0);
1985 }
1986 }
1987 else
1988 {
1989 n_cell_batches_before +=
1990 partition_counter / vectorization_length;
1991 if (partition_counter % vectorization_length != 0)
1992 {
1993 irregular_cells[n_cell_batches_before] =
1994 partition_counter % vectorization_length;
1995 n_cell_batches_before++;
1996 }
1997 }
1998 }
1999 cell_partition_data.push_back(n_cell_batches_before);
2000 partition_l2++;
2001 }
2002 neighbor_list = neighbor_neighbor_list;
2003 neighbor_neighbor_list.resize(0);
2004 }
2005 partition_row_index[part + 1] =
2006 partition_row_index[part] + partition_l2;
2007 }
2008 }
2009 if (hp_bool == true)
2010 {
2011 partition_partition_list.swap(renumbering);
2012 }
2013 }
2014
2015
2016
2017 // Function to create coloring on the second layer within each partition.
2018 // Version assumes preblocking.
2019 void
2020 TaskInfo::make_coloring_within_partitions_pre_blocked(
2021 const DynamicSparsityPattern & connectivity,
2022 const unsigned int partition,
2023 const std::vector<unsigned int> &cell_partition,
2024 const std::vector<unsigned int> &partition_list,
2025 const std::vector<unsigned int> &partition_size,
2026 std::vector<unsigned int> & partition_color_list)
2027 {
2028 const unsigned int n_cell_batches = *(cell_partition_data.end() - 2);
2029 std::vector<unsigned int> cell_color(n_blocks, n_cell_batches);
2030 std::vector<bool> color_finder;
2031
2032 partition_row_index.resize(partition + 1);
2033 cell_partition_data.clear();
2034 unsigned int color_counter = 0, index_counter = 0;
2035 for (unsigned int part = 0; part < partition; ++part)
2036 {
2037 partition_row_index[part] = index_counter;
2038 unsigned int max_color = 0;
2039 for (unsigned int k = partition_size[part];
2040 k < partition_size[part + 1];
2041 k++)
2042 {
2043 unsigned int cell = partition_list[k];
2044 unsigned int n_neighbors = connectivity.row_length(cell);
2045
2046 // In the worst case, each neighbor has a different color. So we
2047 // find at least one available color between 0 and n_neighbors.
2048 color_finder.resize(n_neighbors + 1);
2049 for (unsigned int j = 0; j <= n_neighbors; ++j)
2050 color_finder[j] = true;
2052 connectivity.begin(cell),
2053 end = connectivity.end(cell);
2054 for (; neighbor != end; ++neighbor)
2055 {
2056 // Mark the color that a neighbor within the partition has
2057 // as taken
2058 if (cell_partition[neighbor->column()] == part &&
2059 cell_color[neighbor->column()] <= n_neighbors)
2060 color_finder[cell_color[neighbor->column()]] = false;
2061 }
2062 // Choose the smallest color that is not taken for the block
2063 cell_color[cell] = 0;
2064 while (color_finder[cell_color[cell]] == false)
2065 cell_color[cell]++;
2066 if (cell_color[cell] > max_color)
2067 max_color = cell_color[cell];
2068 }
2069 // Reorder within partition: First, all blocks that belong the 0 and
2070 // then so on until those with color max (Note that the smaller the
2071 // number the larger the partition)
2072 for (unsigned int color = 0; color <= max_color; ++color)
2073 {
2074 cell_partition_data.push_back(color_counter);
2075 index_counter++;
2076 for (unsigned int k = partition_size[part];
2077 k < partition_size[part + 1];
2078 k++)
2079 {
2080 unsigned int cell = partition_list[k];
2081 if (cell_color[cell] == color)
2082 {
2083 partition_color_list[color_counter++] = cell;
2084 }
2085 }
2086 }
2087 }
2088 cell_partition_data.push_back(n_blocks);
2089 partition_row_index[partition] = index_counter;
2090 AssertDimension(color_counter, n_blocks);
2091 }
2092
2093
2094 // Function to create partitioning on the first layer.
2095 void
2096 TaskInfo::make_partitioning(const DynamicSparsityPattern &connectivity,
2097 const unsigned int cluster_size,
2098 std::vector<unsigned int> & cell_partition,
2099 std::vector<unsigned int> & partition_list,
2100 std::vector<unsigned int> & partition_size,
2101 unsigned int & partition) const
2102
2103 {
2104 // For each block of cells, this variable saves to which partitions the
2105 // block belongs. Initialize all to n_cell_batches to mark them as not
2106 // yet assigned a partition.
2107 // std::vector<unsigned int> cell_partition (n_active_cells,
2108 // numbers::invalid_unsigned_int);
2109 // List of cells in previous partition
2110 std::vector<unsigned int> neighbor_list;
2111 // List of cells in current partition for use as neighbors in next
2112 // partition
2113 std::vector<unsigned int> neighbor_neighbor_list;
2114
2115 // In element j of this variable, one puts the old number of the block
2116 // that should be the jth block in the new numeration.
2117 // std::vector<unsigned int> partition_list(n_active_cells,0);
2118
2119 // This vector points to the start of each partition.
2120 // std::vector<unsigned int> partition_size(2,0);
2121
2122 partition = 0;
2123 unsigned int counter = 0;
2124 unsigned int start_nonboundary =
2125 cell_partition_data.size() == 5 ?
2126 vectorization_length *
2127 (cell_partition_data[2] - cell_partition_data[1]) :
2128 0;
2129
2130 const unsigned int n_cell_batches = *(cell_partition_data.end() - 2);
2131 if (n_cell_batches == 0)
2132 return;
2133 if (scheme == color)
2134 start_nonboundary = n_cell_batches;
2135 if (scheme == partition_color ||
2136 scheme == color) // blocking_connectivity == true
2137 start_nonboundary = ((start_nonboundary + block_size - 1) / block_size);
2138 unsigned int n_blocks;
2139 if (scheme == partition_color ||
2140 scheme == color) // blocking_connectivity == true
2141 n_blocks = this->n_blocks;
2142 else
2143 n_blocks = n_active_cells;
2144
2145 if (start_nonboundary > n_blocks)
2146 start_nonboundary = n_blocks;
2147
2148
2149 unsigned int start_up = 0;
2150 bool work = true;
2151 unsigned int remainder = cluster_size;
2152
2153 // this performs a classical breath-first search in the connectivity
2154 // graph of the cells under the restriction that the size of the
2155 // partitions should be a multiple of the given block size
2156 while (work)
2157 {
2158 // put the cells with neighbors on remote MPI processes up front
2159 if (start_nonboundary > 0)
2160 {
2161 for (unsigned int cell = 0; cell < start_nonboundary; ++cell)
2162 {
2163 const unsigned int cell_nn = cell;
2164 cell_partition[cell_nn] = partition;
2165 neighbor_list.push_back(cell_nn);
2166 partition_list[counter++] = cell_nn;
2167 partition_size.back()++;
2168 }
2169 start_nonboundary = 0;
2170 remainder -= (start_nonboundary % cluster_size);
2171 if (remainder == cluster_size)
2172 remainder = 0;
2173 }
2174 else
2175 {
2176 // To start up, set the start_up cell to partition and list all
2177 // its neighbors.
2178 cell_partition[start_up] = partition;
2179 neighbor_list.push_back(start_up);
2180 partition_list[counter++] = start_up;
2181 partition_size.back()++;
2182 start_up++;
2183 remainder--;
2184 if (remainder == cluster_size)
2185 remainder = 0;
2186 }
2187 int index_before = neighbor_list.size(), index = index_before,
2188 index_stop = 0;
2189 while (remainder > 0)
2190 {
2191 if (index == index_stop)
2192 {
2193 index = neighbor_list.size();
2194 if (index == index_before)
2195 {
2196 neighbor_list.resize(0);
2197 goto not_connect;
2198 }
2199 index_stop = index_before;
2200 index_before = index;
2201 }
2202 index--;
2203 unsigned int additional = neighbor_list[index];
2205 connectivity.begin(additional),
2206 end =
2207 connectivity.end(additional);
2208 for (; neighbor != end; ++neighbor)
2209 {
2210 if (cell_partition[neighbor->column()] ==
2212 {
2213 partition_size.back()++;
2214 cell_partition[neighbor->column()] = partition;
2215 neighbor_list.push_back(neighbor->column());
2216 partition_list[counter++] = neighbor->column();
2217 remainder--;
2218 if (remainder == 0)
2219 break;
2220 }
2221 }
2222 }
2223
2224 while (neighbor_list.size() > 0)
2225 {
2226 partition++;
2227
2228 // counter for number of cells so far in current partition
2229 unsigned int partition_counter = 0;
2230
2231 // Mark the start of the new partition
2232 partition_size.push_back(partition_size.back());
2233
2234 // Loop through the list of cells in previous partition and put
2235 // all their neighbors in current partition
2236 for (const unsigned int cell : neighbor_list)
2237 {
2238 Assert(cell_partition[cell] == partition - 1,
2240 auto neighbor = connectivity.begin(cell);
2241 const auto end = connectivity.end(cell);
2242 for (; neighbor != end; ++neighbor)
2243 {
2244 if (cell_partition[neighbor->column()] ==
2246 {
2247 partition_size.back()++;
2248 cell_partition[neighbor->column()] = partition;
2249
2250 // collect the cells of the current partition for
2251 // use as neighbors in next partition
2252 neighbor_neighbor_list.push_back(neighbor->column());
2253 partition_list[counter++] = neighbor->column();
2254 partition_counter++;
2255 }
2256 }
2257 }
2258 remainder = cluster_size - (partition_counter % cluster_size);
2259 if (remainder == cluster_size)
2260 remainder = 0;
2261 int index_stop = 0;
2262 int index_before = neighbor_neighbor_list.size(),
2263 index = index_before;
2264 while (remainder > 0)
2265 {
2266 if (index == index_stop)
2267 {
2268 index = neighbor_neighbor_list.size();
2269 if (index == index_before)
2270 {
2271 neighbor_neighbor_list.resize(0);
2272 break;
2273 }
2274 index_stop = index_before;
2275 index_before = index;
2276 }
2277 index--;
2278 unsigned int additional = neighbor_neighbor_list[index];
2280 connectivity.begin(
2281 additional),
2282 end = connectivity.end(
2283 additional);
2284 for (; neighbor != end; ++neighbor)
2285 {
2286 if (cell_partition[neighbor->column()] ==
2288 {
2289 partition_size.back()++;
2290 cell_partition[neighbor->column()] = partition;
2291 neighbor_neighbor_list.push_back(neighbor->column());
2292 partition_list[counter++] = neighbor->column();
2293 remainder--;
2294 if (remainder == 0)
2295 break;
2296 }
2297 }
2298 }
2299
2300 neighbor_list = neighbor_neighbor_list;
2301 neighbor_neighbor_list.resize(0);
2302 }
2303 not_connect:
2304 // One has to check if the graph is not connected so we have to find
2305 // another partition.
2306 work = false;
2307 for (unsigned int j = start_up; j < n_blocks; ++j)
2308 if (cell_partition[j] == numbers::invalid_unsigned_int)
2309 {
2310 start_up = j;
2311 work = true;
2312 if (remainder == 0)
2313 remainder = cluster_size;
2314 break;
2315 }
2316 }
2317 if (remainder != 0)
2318 partition++;
2319
2320 AssertDimension(partition_size[partition], n_blocks);
2321 }
2322
2323
2324 void
2325 TaskInfo::update_task_info(const unsigned int partition)
2326 {
2327 evens = (partition + 1) / 2;
2328 odds = partition / 2;
2329 n_blocked_workers = odds - (odds + evens + 1) % 2;
2330 n_workers = evens + odds - n_blocked_workers;
2331 // From here only used for partition partition option.
2332 partition_evens.resize(partition);
2333 partition_odds.resize(partition);
2334 partition_n_blocked_workers.resize(partition);
2335 partition_n_workers.resize(partition);
2336 for (unsigned int part = 0; part < partition; ++part)
2337 {
2338 partition_evens[part] =
2339 (partition_row_index[part + 1] - partition_row_index[part] + 1) / 2;
2340 partition_odds[part] =
2341 (partition_row_index[part + 1] - partition_row_index[part]) / 2;
2342 partition_n_blocked_workers[part] =
2343 partition_odds[part] -
2344 (partition_odds[part] + partition_evens[part] + 1) % 2;
2345 partition_n_workers[part] = partition_evens[part] +
2346 partition_odds[part] -
2347 partition_n_blocked_workers[part];
2348 }
2349 }
2350 } // namespace MatrixFreeFunctions
2351} // namespace internal
2352
2353
2354
2355// explicit instantiations of template functions
2356template void
2357internal::MatrixFreeFunctions::TaskInfo::print_memory_statistics<std::ostream>(
2358 std::ostream &,
2359 const std::size_t) const;
2360template void
2362 ConditionalOStream>(ConditionalOStream &, const std::size_t) const;
2363
2364
size_type row_length(const size_type row) const
void add(const size_type i, const size_type j)
static unsigned int n_threads()
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:472
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:473
Point< 2 > first
Definition grid_out.cc:4615
static ::ExceptionBase & ExcNotImplemented()
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcInternalError()
#define AssertThrow(cond, exc)
std::enable_if_t< std::is_fundamental< T >::value, std::size_t > memory_consumption(const T &t)
void partition(const SparsityPattern &sparsity_pattern, const unsigned int n_partitions, std::vector< unsigned int > &partition_indices, const Partitioner partitioner=Partitioner::metis)
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm mpi_communicator)
Definition mpi.cc:124
std::vector< Integer > invert_permutation(const std::vector< Integer > &permutation)
Definition utilities.h:1672
unsigned int indicate_power_of_two(const unsigned int vectorization_length)
Definition util.h:166
static const unsigned int invalid_unsigned_int
Definition types.h:213
void parallel_for(Iterator x_begin, Iterator x_end, const Functor &functor, const unsigned int grainsize)
Definition parallel.h:82
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
virtual void face(const unsigned int range_index)=0
virtual void zero_dst_vector_range(const unsigned int range_index)=0
virtual void cell(const std::pair< unsigned int, unsigned int > &cell_range)=0
virtual void boundary(const unsigned int range_index)=0
virtual void vector_compress_start()=0
Starts the communication for the vector compress operation.
virtual void cell_loop_post_range(const unsigned int range_index)=0
virtual void vector_update_ghosts_start()=0
Starts the communication for the update ghost values operation.
virtual void cell_loop_pre_range(const unsigned int range_index)=0
virtual void vector_update_ghosts_finish()=0
Finishes the communication for the update ghost values operation.
virtual void vector_compress_finish()=0
Finishes the communication for the vector compress operation.
std::vector< unsigned int > boundary_partition_data
Definition task_info.h:519
void loop(MFWorkerInterface &worker) const
Definition task_info.cc:351
std::vector< unsigned int > partition_n_workers
Definition task_info.h:577
void create_blocks_serial(const std::vector< unsigned int > &cells_with_comm, const unsigned int dofs_per_cell, const bool categories_are_hp, const std::vector< unsigned int > &cell_vectorization_categories, const bool cell_vectorization_categories_strict, const std::vector< unsigned int > &parent_relation, std::vector< unsigned int > &renumbering, std::vector< unsigned char > &incompletely_filled_vectorization)
Definition task_info.cc:795
void print_memory_statistics(StreamType &out, std::size_t data_length) const
Definition task_info.cc:691
std::vector< unsigned int > partition_row_index
Definition task_info.h:467
std::vector< unsigned int > partition_evens
Definition task_info.h:559
std::vector< unsigned int > partition_n_blocked_workers
Definition task_info.h:571
std::vector< unsigned int > cell_partition_data
Definition task_info.h:475
void make_boundary_cells_divisible(std::vector< unsigned int > &boundary_cells)
Definition task_info.cc:723
std::vector< unsigned int > partition_odds
Definition task_info.h:565
std::vector< unsigned int > face_partition_data
Definition task_info.h:497